Update to V8 with partial snapshots. This is taken from the partial_snapshot branch of V8.
diff --git a/AUTHORS b/AUTHORS
index 5d712fc..9128ba3 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -4,6 +4,7 @@
 #   Name/Organization <email address>
 
 Google Inc.
+Sigma Designs Inc.
 
 Alexander Botero-Lowry <alexbl@FreeBSD.org>
 Alexandre Vassalotti <avassalotti@gmail.com>
diff --git a/Android.libv8.mk b/Android.libv8.mk
index ecaf762..ada8904 100644
--- a/Android.libv8.mk
+++ b/Android.libv8.mk
@@ -64,8 +64,7 @@
 	-Wno-endif-labels \
 	-Wno-import \
 	-Wno-format \
-	-fno-exceptions \
-	-DENABLE_DEBUGGER_SUPPORT
+	-fno-exceptions
 
 ifeq ($(TARGET_ARCH),arm)
 	LOCAL_CFLAGS += -DARM -DV8_TARGET_ARCH_ARM
diff --git a/Android.mk b/Android.mk
index 37c79ff..925136c 100644
--- a/Android.mk
+++ b/Android.mk
@@ -40,6 +40,9 @@
 
 ifeq ($(NEED_V8),true)
   # Build libv8 and v8shell
+  # Temporarily enable snapshot support.
+  # TODO(andreip): re-enable this after the experiment
+  ENABLE_V8_SNAPSHOT = true
   ifeq ($(ENABLE_V8_SNAPSHOT),true)
     include $(BASE_PATH)/Android.mksnapshot.mk
   endif
diff --git a/Android.v8common.mk b/Android.v8common.mk
index 42ca22f..06e397e 100644
--- a/Android.v8common.mk
+++ b/Android.v8common.mk
@@ -33,6 +33,7 @@
 	src/handles.cc \
 	src/hashmap.cc \
 	src/heap.cc \
+	src/heap-profiler.cc \
 	src/ic.cc \
 	src/interpreter-irregexp.cc \
 	src/jsregexp.cc \
@@ -130,9 +131,7 @@
 	src/math.js \
 	src/messages.js \
 	src/apinatives.js \
-	src/date-delay.js \
-	src/regexp-delay.js \
-	src/json-delay.js \
-	src/mirror-delay.js \
-	src/debug-delay.js \
+	src/date.js \
+	src/regexp.js \
+	src/json.js \
 	src/macros.py
diff --git a/SConstruct b/SConstruct
index c9c5a55..0b03803 100644
--- a/SConstruct
+++ b/SConstruct
@@ -191,6 +191,17 @@
     'armvariant:arm': {
       'CPPDEFINES':   ['V8_ARM_VARIANT_ARM']
     },
+    'arch:mips': {
+      'CPPDEFINES':   ['V8_TARGET_ARCH_MIPS'],
+      'simulator:none': {
+        'CCFLAGS':      ['-EL', '-mips32r2', '-Wa,-mips32r2', '-fno-inline'],
+        'LDFLAGS':      ['-EL']
+      }
+    },
+    'simulator:mips': {
+      'CCFLAGS':      ['-m32'],
+      'LINKFLAGS':    ['-m32']
+    },
     'arch:x64': {
       'CPPDEFINES':   ['V8_TARGET_ARCH_X64'],
       'CCFLAGS':      ['-m64'],
@@ -293,6 +304,9 @@
       # used by the arm simulator.
       'WARNINGFLAGS': ['/wd4996']
     },
+    'arch:mips': {
+      'CPPDEFINES':   ['V8_TARGET_ARCH_MIPS'],
+    },
     'disassembler:on': {
       'CPPDEFINES':   ['ENABLE_DISASSEMBLER']
     }
@@ -458,10 +472,22 @@
       'CCFLAGS':      ['-m64'],
       'LINKFLAGS':    ['-m64']
     },
+    'arch:mips': {
+      'CPPDEFINES':   ['V8_TARGET_ARCH_MIPS'],
+      'simulator:none': {
+        'CCFLAGS':      ['-EL', '-mips32r2', '-Wa,-mips32r2', '-fno-inline'],
+        'LINKFLAGS':    ['-EL'],
+        'LDFLAGS':      ['-EL']
+      }
+    },
     'simulator:arm': {
       'CCFLAGS':      ['-m32'],
       'LINKFLAGS':    ['-m32']
     },
+    'simulator:mips': {
+      'CCFLAGS':      ['-m32'],
+      'LINKFLAGS':    ['-m32']
+    },
     'mode:release': {
       'CCFLAGS':      ['-O2']
     },
@@ -602,7 +628,7 @@
     'help': 'the os to build for (' + OS_GUESS + ')'
   },
   'arch': {
-    'values':['arm', 'ia32', 'x64'],
+    'values':['arm', 'ia32', 'x64', 'mips'],
     'default': ARCH_GUESS,
     'help': 'the architecture to build for (' + ARCH_GUESS + ')'
   },
@@ -652,7 +678,7 @@
     'help': 'use Microsoft Visual C++ link-time code generation'
   },
   'simulator': {
-    'values': ['arm', 'none'],
+    'values': ['arm', 'mips', 'none'],
     'default': 'none',
     'help': 'build with simulator'
   },
@@ -872,6 +898,11 @@
     options['armvariant'] = 'arm'
   if (options['armvariant'] != 'none' and options['arch'] != 'arm'):
     options['armvariant'] = 'none'
+  if options['arch'] == 'mips':
+    if ('regexp' in ARGUMENTS) and options['regexp'] == 'native':
+      # Print a warning if native regexp is specified for mips
+      print "Warning: forcing regexp to interpreted for mips"
+    options['regexp'] = 'interpreted'
 
 
 def ParseEnvOverrides(arg, imports):
diff --git a/include/v8.h b/include/v8.h
index 6125286..19a41f4 100644
--- a/include/v8.h
+++ b/include/v8.h
@@ -2587,7 +2587,7 @@
 
   /** Creates a new context. */
   static Persistent<Context> New(
-      ExtensionConfiguration* extensions = 0,
+      ExtensionConfiguration* extensions = NULL,
       Handle<ObjectTemplate> global_template = Handle<ObjectTemplate>(),
       Handle<Value> global_object = Handle<Value>());
 
diff --git a/src/SConscript b/src/SConscript
index ebda77a..864b4e7 100755
--- a/src/SConscript
+++ b/src/SConscript
@@ -131,6 +131,24 @@
   'armvariant:thumb2': Split("""
     arm/assembler-thumb2.cc
     """),
+  'arch:mips': Split("""
+    mips/assembler-mips.cc
+    mips/builtins-mips.cc
+    mips/codegen-mips.cc
+    mips/constants-mips.cc
+    mips/cpu-mips.cc
+    mips/debug-mips.cc
+    mips/disasm-mips.cc
+    mips/fast-codegen-mips.cc
+    mips/full-codegen-mips.cc
+    mips/frames-mips.cc
+    mips/ic-mips.cc
+    mips/jump-target-mips.cc
+    mips/macro-assembler-mips.cc
+    mips/register-allocator-mips.cc
+    mips/stub-cache-mips.cc
+    mips/virtual-frame-mips.cc
+    """),
   'arch:ia32': Split("""
     ia32/assembler-ia32.cc
     ia32/builtins-ia32.cc
@@ -168,6 +186,7 @@
     x64/virtual-frame-x64.cc
     """),
   'simulator:arm': ['arm/simulator-arm.cc'],
+  'simulator:mips': ['mips/simulator-mips.cc'],
   'os:freebsd': ['platform-freebsd.cc', 'platform-posix.cc'],
   'os:openbsd': ['platform-openbsd.cc', 'platform-posix.cc'],
   'os:linux':   ['platform-linux.cc', 'platform-posix.cc'],
@@ -226,11 +245,11 @@
 math.js
 messages.js
 apinatives.js
-debug-delay.js
-mirror-delay.js
-date-delay.js
-regexp-delay.js
-json-delay.js
+date.js
+regexp.js
+json.js
+mirror-debugger.js
+debug-debugger.js
 '''.split()
 
 
diff --git a/src/api.cc b/src/api.cc
index 322c90f..a949e6f 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -34,6 +34,7 @@
 #include "debug.h"
 #include "execution.h"
 #include "global-handles.h"
+#include "globals.h"
 #include "platform.h"
 #include "serialize.h"
 #include "snapshot.h"
@@ -1134,12 +1135,14 @@
   if (pre_data != NULL && !pre_data->SanityCheck()) {
     pre_data = NULL;
   }
-  i::Handle<i::JSFunction> boilerplate = i::Compiler::Compile(str,
-                                                              name_obj,
-                                                              line_offset,
-                                                              column_offset,
-                                                              NULL,
-                                                              pre_data);
+  i::Handle<i::JSFunction> boilerplate =
+      i::Compiler::Compile(str,
+                           name_obj,
+                           line_offset,
+                           column_offset,
+                           NULL,
+                           pre_data,
+                           i::NOT_NATIVES_CODE);
   has_pending_exception = boilerplate.is_null();
   EXCEPTION_BAILOUT_CHECK(Local<Script>());
   return Local<Script>(ToApi<Script>(boilerplate));
@@ -2739,17 +2742,18 @@
   LOG_API("Context::New");
   ON_BAILOUT("v8::Context::New()", return Persistent<Context>());
 
+#if defined(ANDROID)
+  // On mobile device, full GC is expensive, leave it to the system to
+  // decide when should make a full GC.
+#else
+  // Give the heap a chance to cleanup if we've disposed contexts.
+  i::Heap::CollectAllGarbageIfContextDisposed();
+#endif
+
   // Enter V8 via an ENTER_V8 scope.
   i::Handle<i::Context> env;
   {
     ENTER_V8;
-#if defined(ANDROID)
-    // On mobile device, full GC is expensive, leave it to the system to
-    // decide when should make a full GC.
-#else
-    // Give the heap a chance to cleanup if we've disposed contexts.
-    i::Heap::CollectAllGarbageIfContextDisposed();
-#endif
     v8::Handle<ObjectTemplate> proxy_template = global_template;
     i::Handle<i::FunctionTemplateInfo> proxy_constructor;
     i::Handle<i::FunctionTemplateInfo> global_constructor;
diff --git a/src/apinatives.js b/src/apinatives.js
index 6451e62..ca2bbf5 100644
--- a/src/apinatives.js
+++ b/src/apinatives.js
@@ -31,7 +31,7 @@
 
 
 function CreateDate(time) {
-  var date = new ORIGINAL_DATE();
+  var date = new $Date();
   date.setTime(time);
   return date;
 }
diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc
index 74547be..cf167f0 100644
--- a/src/arm/assembler-arm.cc
+++ b/src/arm/assembler-arm.cc
@@ -51,12 +51,18 @@
   // If the compiler is allowed to use vfp then we can use vfp too in our
   // code generation.
 #if !defined(__arm__)
-  // For the simulator=arm build, always use VFP since the arm simulator has
-  // VFP support.
-  supported_ |= 1u << VFP3;
+  // For the simulator=arm build, use VFP when FLAG_enable_vfp3 is enabled.
+  if (FLAG_enable_vfp3) {
+      supported_ |= 1u << VFP3;
+  }
+  // For the simulator=arm build, use ARMv7 when FLAG_enable_armv7 is enabled
+  if (FLAG_enable_armv7) {
+      supported_ |= 1u << ARMv7;
+  }
 #else
   if (Serializer::enabled()) {
-    supported_ |= OS::CpuFeaturesImpliedByPlatform();
+      supported_ |= 1u << VFP3;
+    //supported_ |= OS::CpuFeaturesImpliedByPlatform();
     return;  // No features if we might serialize.
   }
 
@@ -66,6 +72,11 @@
     supported_ |= 1u << VFP3;
     found_by_runtime_probing_ |= 1u << VFP3;
   }
+
+  if (OS::ArmCpuHasFeature(ARMv7)) {
+    supported_ |= 1u << ARMv7;
+    found_by_runtime_probing_ |= 1u << ARMv7;
+  }
 #endif
 }
 
@@ -83,9 +94,9 @@
 Register r5  = {  5 };
 Register r6  = {  6 };
 Register r7  = {  7 };
-Register r8  = {  8 };
+Register r8  = {  8 };  // Used as context register.
 Register r9  = {  9 };
-Register r10 = { 10 };
+Register r10 = { 10 };  // Used as roots register.
 Register fp  = { 11 };
 Register ip  = { 12 };
 Register sp  = { 13 };
@@ -264,9 +275,9 @@
 
 
 // -----------------------------------------------------------------------------
-// Implementation of Assembler
+// Implementation of Assembler.
 
-// Instruction encoding bits
+// Instruction encoding bits.
 enum {
   H   = 1 << 5,   // halfword (or byte)
   S6  = 1 << 6,   // signed (or unsigned)
@@ -299,14 +310,14 @@
   B26 = 1 << 26,
   B27 = 1 << 27,
 
-  // Instruction bit masks
+  // Instruction bit masks.
   RdMask     = 15 << 12,  // in str instruction
   CondMask   = 15 << 28,
   CoprocessorMask = 15 << 8,
   OpCodeMask = 15 << 21,  // in data-processing instructions
   Imm24Mask  = (1 << 24) - 1,
   Off12Mask  = (1 << 12) - 1,
-  // Reserved condition
+  // Reserved condition.
   nv = 15 << 28
 };
 
@@ -327,13 +338,13 @@
 // ldr pc, [pc, #XXX]
 const Instr kLdrPCPattern = al | B26 | L | pc.code() * B16;
 
-// spare_buffer_
+// Spare buffer.
 static const int kMinimalBufferSize = 4*KB;
 static byte* spare_buffer_ = NULL;
 
 Assembler::Assembler(void* buffer, int buffer_size) {
   if (buffer == NULL) {
-    // do our own buffer management
+    // Do our own buffer management.
     if (buffer_size <= kMinimalBufferSize) {
       buffer_size = kMinimalBufferSize;
 
@@ -351,14 +362,14 @@
     own_buffer_ = true;
 
   } else {
-    // use externally provided buffer instead
+    // Use externally provided buffer instead.
     ASSERT(buffer_size > 0);
     buffer_ = static_cast<byte*>(buffer);
     buffer_size_ = buffer_size;
     own_buffer_ = false;
   }
 
-  // setup buffer pointers
+  // Setup buffer pointers.
   ASSERT(buffer_ != NULL);
   pc_ = buffer_;
   reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
@@ -386,11 +397,11 @@
 
 
 void Assembler::GetCode(CodeDesc* desc) {
-  // emit constant pool if necessary
+  // Emit constant pool if necessary.
   CheckConstPool(true, false);
   ASSERT(num_prinfo_ == 0);
 
-  // setup desc
+  // Setup code descriptor.
   desc->buffer = buffer_;
   desc->buffer_size = buffer_size_;
   desc->instr_size = pc_offset();
@@ -539,7 +550,7 @@
 void Assembler::link_to(Label* L, Label* appendix) {
   if (appendix->is_linked()) {
     if (L->is_linked()) {
-      // append appendix to L's list
+      // Append appendix to L's list.
       int fixup_pos;
       int link = L->pos();
       do {
@@ -549,7 +560,7 @@
       ASSERT(link == kEndOfChain);
       target_at_put(fixup_pos, appendix->pos());
     } else {
-      // L is empty, simply use appendix
+      // L is empty, simply use appendix.
       *L = *appendix;
     }
   }
@@ -575,12 +586,12 @@
 }
 
 
-// Low-level code emission routines depending on the addressing mode
+// Low-level code emission routines depending on the addressing mode.
 static bool fits_shifter(uint32_t imm32,
                          uint32_t* rotate_imm,
                          uint32_t* immed_8,
                          Instr* instr) {
-  // imm32 must be unsigned
+  // imm32 must be unsigned.
   for (int rot = 0; rot < 16; rot++) {
     uint32_t imm8 = (imm32 << 2*rot) | (imm32 >> (32 - 2*rot));
     if ((imm8 <= 0xff)) {
@@ -589,7 +600,7 @@
       return true;
     }
   }
-  // if the opcode is mov or mvn and if ~imm32 fits, change the opcode
+  // If the opcode is mov or mvn and if ~imm32 fits, change the opcode.
   if (instr != NULL && (*instr & 0xd*B21) == 0xd*B21) {
     if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
       *instr ^= 0x2*B21;
@@ -626,7 +637,7 @@
   CheckBuffer();
   ASSERT((instr & ~(CondMask | OpCodeMask | S)) == 0);
   if (!x.rm_.is_valid()) {
-    // immediate
+    // Immediate.
     uint32_t rotate_imm;
     uint32_t immed_8;
     if (MustUseIp(x.rmode_) ||
@@ -634,7 +645,7 @@
       // The immediate operand cannot be encoded as a shifter operand, so load
       // it first to register ip and change the original instruction to use ip.
       // However, if the original instruction is a 'mov rd, x' (not setting the
-      // condition code), then replace it with a 'ldr rd, [pc]'
+      // condition code), then replace it with a 'ldr rd, [pc]'.
       RecordRelocInfo(x.rmode_, x.imm32_);
       CHECK(!rn.is(ip));  // rn should never be ip, or will be trashed
       Condition cond = static_cast<Condition>(instr & CondMask);
@@ -648,16 +659,16 @@
     }
     instr |= I | rotate_imm*B8 | immed_8;
   } else if (!x.rs_.is_valid()) {
-    // immediate shift
+    // Immediate shift.
     instr |= x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
   } else {
-    // register shift
+    // Register shift.
     ASSERT(!rn.is(pc) && !rd.is(pc) && !x.rm_.is(pc) && !x.rs_.is(pc));
     instr |= x.rs_.code()*B8 | x.shift_op_ | B4 | x.rm_.code();
   }
   emit(instr | rn.code()*B16 | rd.code()*B12);
   if (rn.is(pc) || x.rm_.is(pc))
-    // block constant pool emission for one instruction after reading pc
+    // Block constant pool emission for one instruction after reading pc.
     BlockConstPoolBefore(pc_offset() + kInstrSize);
 }
 
@@ -666,15 +677,15 @@
   ASSERT((instr & ~(CondMask | B | L)) == B26);
   int am = x.am_;
   if (!x.rm_.is_valid()) {
-    // immediate offset
+    // Immediate offset.
     int offset_12 = x.offset_;
     if (offset_12 < 0) {
       offset_12 = -offset_12;
       am ^= U;
     }
     if (!is_uint12(offset_12)) {
-      // immediate offset cannot be encoded, load it first to register ip
-      // rn (and rd in a load) should never be ip, or will be trashed
+      // Immediate offset cannot be encoded, load it first to register ip
+      // rn (and rd in a load) should never be ip, or will be trashed.
       ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
       mov(ip, Operand(x.offset_), LeaveCC,
           static_cast<Condition>(instr & CondMask));
@@ -684,9 +695,9 @@
     ASSERT(offset_12 >= 0);  // no masking needed
     instr |= offset_12;
   } else {
-    // register offset (shift_imm_ and shift_op_ are 0) or scaled
+    // Register offset (shift_imm_ and shift_op_ are 0) or scaled
     // register offset the constructors make sure than both shift_imm_
-    // and shift_op_ are initialized
+    // and shift_op_ are initialized.
     ASSERT(!x.rm_.is(pc));
     instr |= B25 | x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
   }
@@ -700,15 +711,15 @@
   ASSERT(x.rn_.is_valid());
   int am = x.am_;
   if (!x.rm_.is_valid()) {
-    // immediate offset
+    // Immediate offset.
     int offset_8 = x.offset_;
     if (offset_8 < 0) {
       offset_8 = -offset_8;
       am ^= U;
     }
     if (!is_uint8(offset_8)) {
-      // immediate offset cannot be encoded, load it first to register ip
-      // rn (and rd in a load) should never be ip, or will be trashed
+      // Immediate offset cannot be encoded, load it first to register ip
+      // rn (and rd in a load) should never be ip, or will be trashed.
       ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
       mov(ip, Operand(x.offset_), LeaveCC,
           static_cast<Condition>(instr & CondMask));
@@ -718,15 +729,15 @@
     ASSERT(offset_8 >= 0);  // no masking needed
     instr |= B | (offset_8 >> 4)*B8 | (offset_8 & 0xf);
   } else if (x.shift_imm_ != 0) {
-    // scaled register offset not supported, load index first
-    // rn (and rd in a load) should never be ip, or will be trashed
+    // Scaled register offset not supported, load index first
+    // rn (and rd in a load) should never be ip, or will be trashed.
     ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
     mov(ip, Operand(x.rm_, x.shift_op_, x.shift_imm_), LeaveCC,
         static_cast<Condition>(instr & CondMask));
     addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
     return;
   } else {
-    // register offset
+    // Register offset.
     ASSERT((am & (P|W)) == P || !x.rm_.is(pc));  // no pc index with writeback
     instr |= x.rm_.code();
   }
@@ -744,7 +755,7 @@
 
 
 void Assembler::addrmod5(Instr instr, CRegister crd, const MemOperand& x) {
-  // unindexed addressing is not encoded by this function
+  // Unindexed addressing is not encoded by this function.
   ASSERT_EQ((B27 | B26),
             (instr & ~(CondMask | CoprocessorMask | P | U | N | W | L)));
   ASSERT(x.rn_.is_valid() && !x.rm_.is_valid());
@@ -759,7 +770,7 @@
   ASSERT(is_uint8(offset_8));  // unsigned word offset must fit in a byte
   ASSERT((am & (P|W)) == P || !x.rn_.is(pc));  // no pc base with writeback
 
-  // post-indexed addressing requires W == 1; different than in addrmod2/3
+  // Post-indexed addressing requires W == 1; different than in addrmod2/3.
   if ((am & P) == 0)
     am |= W;
 
@@ -782,7 +793,7 @@
   }
 
   // Block the emission of the constant pool, since the branch instruction must
-  // be emitted at the pc offset recorded by the label
+  // be emitted at the pc offset recorded by the label.
   BlockConstPoolBefore(pc_offset() + kInstrSize);
   return target_pos - (pc_offset() + kPcLoadDelta);
 }
@@ -804,7 +815,7 @@
 }
 
 
-// Branch instructions
+// Branch instructions.
 void Assembler::b(int branch_offset, Condition cond) {
   ASSERT((branch_offset & 3) == 0);
   int imm24 = branch_offset >> 2;
@@ -812,7 +823,7 @@
   emit(cond | B27 | B25 | (imm24 & Imm24Mask));
 
   if (cond == al)
-    // dead code is a good location to emit the constant pool
+    // Dead code is a good location to emit the constant pool.
     CheckConstPool(false, false);
 }
 
@@ -849,7 +860,22 @@
 }
 
 
-// Data-processing instructions
+// Data-processing instructions.
+
+// UBFX <Rd>,<Rn>,#<lsb>,#<width - 1>
+// Instruction details available in ARM DDI 0406A, A8-464.
+// cond(31-28) | 01111(27-23)| 1(22) | 1(21) | widthm1(20-16) |
+//  Rd(15-12) | lsb(11-7) | 101(6-4) | Rn(3-0)
+void Assembler::ubfx(Register dst, Register src1, const Operand& src2,
+                     const Operand& src3, Condition cond) {
+  ASSERT(!src2.rm_.is_valid() && !src3.rm_.is_valid());
+  ASSERT(static_cast<uint32_t>(src2.imm32_) <= 0x1f);
+  ASSERT(static_cast<uint32_t>(src3.imm32_) <= 0x1f);
+  emit(cond | 0x3F*B21 | src3.imm32_*B16 |
+       dst.code()*B12 | src2.imm32_*B7 | 0x5*B4 | src1.code());
+}
+
+
 void Assembler::and_(Register dst, Register src1, const Operand& src2,
                      SBit s, Condition cond) {
   addrmod1(cond | 0*B21 | s, src1, dst, src2);
@@ -886,7 +912,7 @@
   if (FLAG_push_pop_elimination &&
       last_bound_pos_ <= (pc_offset() - pattern_size) &&
       reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
-      // pattern
+      // Pattern.
       instr_at(pc_ - 1 * kInstrSize) == kPopInstruction &&
       (instr_at(pc_ - 2 * kInstrSize) & ~RdMask) == kPushRegPattern) {
     pc_ -= 2 * kInstrSize;
@@ -960,7 +986,7 @@
 }
 
 
-// Multiply instructions
+// Multiply instructions.
 void Assembler::mla(Register dst, Register src1, Register src2, Register srcA,
                     SBit s, Condition cond) {
   ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
@@ -1029,7 +1055,7 @@
 }
 
 
-// Miscellaneous arithmetic instructions
+// Miscellaneous arithmetic instructions.
 void Assembler::clz(Register dst, Register src, Condition cond) {
   // v5 and above.
   ASSERT(!dst.is(pc) && !src.is(pc));
@@ -1038,7 +1064,7 @@
 }
 
 
-// Status register access instructions
+// Status register access instructions.
 void Assembler::mrs(Register dst, SRegister s, Condition cond) {
   ASSERT(!dst.is(pc));
   emit(cond | B24 | s | 15*B16 | dst.code()*B12);
@@ -1050,12 +1076,12 @@
   ASSERT(fields >= B16 && fields < B20);  // at least one field set
   Instr instr;
   if (!src.rm_.is_valid()) {
-    // immediate
+    // Immediate.
     uint32_t rotate_imm;
     uint32_t immed_8;
     if (MustUseIp(src.rmode_) ||
         !fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) {
-      // immediate operand cannot be encoded, load it first to register ip
+      // Immediate operand cannot be encoded, load it first to register ip.
       RecordRelocInfo(src.rmode_, src.imm32_);
       ldr(ip, MemOperand(pc, 0), cond);
       msr(fields, Operand(ip), cond);
@@ -1070,7 +1096,7 @@
 }
 
 
-// Load/Store instructions
+// Load/Store instructions.
 void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) {
   if (dst.is(pc)) {
     WriteRecordedPositions();
@@ -1085,7 +1111,7 @@
   if (FLAG_push_pop_elimination &&
       last_bound_pos_ <= (pc_offset() - pattern_size) &&
       reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
-      // pattern
+      // Pattern.
       instr_at(pc_ - 1 * kInstrSize) == (kPopRegPattern | dst.code() * B12) &&
       instr_at(pc_ - 2 * kInstrSize) == (kPushRegPattern | dst.code() * B12)) {
     pc_ -= 2 * kInstrSize;
@@ -1106,6 +1132,7 @@
   if (FLAG_push_pop_elimination &&
      last_bound_pos_ <= (pc_offset() - pattern_size) &&
      reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
+     // Pattern.
      instr_at(pc_ - 1 * kInstrSize) == (kPushRegPattern | src.code() * B12) &&
      instr_at(pc_ - 2 * kInstrSize) == kPopInstruction) {
     pc_ -= 2 * kInstrSize;
@@ -1147,17 +1174,17 @@
 }
 
 
-// Load/Store multiple instructions
+// Load/Store multiple instructions.
 void Assembler::ldm(BlockAddrMode am,
                     Register base,
                     RegList dst,
                     Condition cond) {
-  // ABI stack constraint: ldmxx base, {..sp..}  base != sp  is not restartable
+  // ABI stack constraint: ldmxx base, {..sp..}  base != sp  is not restartable.
   ASSERT(base.is(sp) || (dst & sp.bit()) == 0);
 
   addrmod4(cond | B27 | am | L, base, dst);
 
-  // emit the constant pool after a function return implemented by ldm ..{..pc}
+  // Emit the constant pool after a function return implemented by ldm ..{..pc}.
   if (cond == al && (dst & pc.bit()) != 0) {
     // There is a slight chance that the ldm instruction was actually a call,
     // in which case it would be wrong to return into the constant pool; we
@@ -1177,7 +1204,7 @@
 }
 
 
-// Semaphore instructions
+// Semaphore instructions.
 void Assembler::swp(Register dst, Register src, Register base, Condition cond) {
   ASSERT(!dst.is(pc) && !src.is(pc) && !base.is(pc));
   ASSERT(!dst.is(base) && !src.is(base));
@@ -1197,7 +1224,7 @@
 }
 
 
-// Exception-generating instructions and debugging support
+// Exception-generating instructions and debugging support.
 void Assembler::stop(const char* msg) {
 #if !defined(__arm__)
   // The simulator handles these special instructions and stops execution.
@@ -1222,7 +1249,7 @@
 }
 
 
-// Coprocessor instructions
+// Coprocessor instructions.
 void Assembler::cdp(Coprocessor coproc,
                     int opcode_1,
                     CRegister crd,
@@ -1307,7 +1334,7 @@
                     int option,
                     LFlag l,
                     Condition cond) {
-  // unindexed addressing
+  // Unindexed addressing.
   ASSERT(is_uint8(option));
   emit(cond | B27 | B26 | U | l | L | rn.code()*B16 | crd.code()*B12 |
        coproc*B8 | (option & 255));
@@ -1346,7 +1373,7 @@
                     int option,
                     LFlag l,
                     Condition cond) {
-  // unindexed addressing
+  // Unindexed addressing.
   ASSERT(is_uint8(option));
   emit(cond | B27 | B26 | U | l | rn.code()*B16 | crd.code()*B12 |
        coproc*B8 | (option & 255));
@@ -1464,7 +1491,7 @@
                      const Condition cond) {
   // Dd = Sm (integer in Sm converted to IEEE 64-bit doubles in Dd).
   // Instruction details available in ARM DDI 0406A, A8-576.
-  // cond(31-28) | 11101(27-23)| D=?(22) | 11(21-20) | 1(19) |opc2=000(18-16) |
+  // cond(31-28) | 11101(27-23)| D=?(22) | 11(21-20) | 1(19) | opc2=000(18-16) |
   // Vd(15-12) | 101(11-9) | sz(8)=1 | op(7)=1 | 1(6) | M=?(5) | 0(4) | Vm(3-0)
   ASSERT(CpuFeatures::IsEnabled(VFP3));
   emit(cond | 0xE*B24 | B23 | 0x3*B20 | B19 |
@@ -1571,14 +1598,14 @@
 }
 
 
-// Pseudo instructions
+// Pseudo instructions.
 void Assembler::lea(Register dst,
                     const MemOperand& x,
                     SBit s,
                     Condition cond) {
   int am = x.am_;
   if (!x.rm_.is_valid()) {
-    // immediate offset
+    // Immediate offset.
     if ((am & P) == 0)  // post indexing
       mov(dst, Operand(x.rn_), s, cond);
     else if ((am & U) == 0)  // negative indexing
@@ -1612,7 +1639,7 @@
 }
 
 
-// Debugging
+// Debugging.
 void Assembler::RecordJSReturn() {
   WriteRecordedPositions();
   CheckBuffer();
@@ -1665,7 +1692,7 @@
 void Assembler::GrowBuffer() {
   if (!own_buffer_) FATAL("external code buffer is too small");
 
-  // compute new buffer size
+  // Compute new buffer size.
   CodeDesc desc;  // the new buffer
   if (buffer_size_ < 4*KB) {
     desc.buffer_size = 4*KB;
@@ -1676,20 +1703,20 @@
   }
   CHECK_GT(desc.buffer_size, 0);  // no overflow
 
-  // setup new buffer
+  // Setup new buffer.
   desc.buffer = NewArray<byte>(desc.buffer_size);
 
   desc.instr_size = pc_offset();
   desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
 
-  // copy the data
+  // Copy the data.
   int pc_delta = desc.buffer - buffer_;
   int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
   memmove(desc.buffer, buffer_, desc.instr_size);
   memmove(reloc_info_writer.pos() + rc_delta,
           reloc_info_writer.pos(), desc.reloc_size);
 
-  // switch buffers
+  // Switch buffers.
   DeleteArray(buffer_);
   buffer_ = desc.buffer;
   buffer_size_ = desc.buffer_size;
@@ -1697,11 +1724,11 @@
   reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
                                reloc_info_writer.last_pc() + pc_delta);
 
-  // none of our relocation types are pc relative pointing outside the code
+  // None of our relocation types are pc relative pointing outside the code
   // buffer nor pc absolute pointing inside the code buffer, so there is no need
-  // to relocate any emitted relocation entries
+  // to relocate any emitted relocation entries.
 
-  // relocate pending relocation entries
+  // Relocate pending relocation entries.
   for (int i = 0; i < num_prinfo_; i++) {
     RelocInfo& rinfo = prinfo_[i];
     ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
@@ -1716,16 +1743,16 @@
 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
   RelocInfo rinfo(pc_, rmode, data);  // we do not try to reuse pool constants
   if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::STATEMENT_POSITION) {
-    // Adjust code for new modes
+    // Adjust code for new modes.
     ASSERT(RelocInfo::IsJSReturn(rmode)
            || RelocInfo::IsComment(rmode)
            || RelocInfo::IsPosition(rmode));
-    // these modes do not need an entry in the constant pool
+    // These modes do not need an entry in the constant pool.
   } else {
     ASSERT(num_prinfo_ < kMaxNumPRInfo);
     prinfo_[num_prinfo_++] = rinfo;
     // Make sure the constant pool is not emitted in place of the next
-    // instruction for which we just recorded relocation info
+    // instruction for which we just recorded relocation info.
     BlockConstPoolBefore(pc_offset() + kInstrSize);
   }
   if (rinfo.rmode() != RelocInfo::NONE) {
@@ -1752,7 +1779,7 @@
   // blocked for a specific range.
   next_buffer_check_ = pc_offset() + kCheckConstInterval;
 
-  // There is nothing to do if there are no pending relocation info entries
+  // There is nothing to do if there are no pending relocation info entries.
   if (num_prinfo_ == 0) return;
 
   // We emit a constant pool at regular intervals of about kDistBetweenPools
@@ -1778,10 +1805,11 @@
   // no_const_pool_before_, which is checked here. Also, recursive calls to
   // CheckConstPool are blocked by no_const_pool_before_.
   if (pc_offset() < no_const_pool_before_) {
-    // Emission is currently blocked; make sure we try again as soon as possible
+    // Emission is currently blocked; make sure we try again as soon as
+    // possible.
     next_buffer_check_ = no_const_pool_before_;
 
-    // Something is wrong if emission is forced and blocked at the same time
+    // Something is wrong if emission is forced and blocked at the same time.
     ASSERT(!force_emit);
     return;
   }
@@ -1795,23 +1823,23 @@
       jump_instr + kInstrSize + num_prinfo_*(kInstrSize + kMaxRelocSize);
   while (buffer_space() <= (max_needed_space + kGap)) GrowBuffer();
 
-  // Block recursive calls to CheckConstPool
+  // Block recursive calls to CheckConstPool.
   BlockConstPoolBefore(pc_offset() + jump_instr + kInstrSize +
                        num_prinfo_*kInstrSize);
   // Don't bother to check for the emit calls below.
   next_buffer_check_ = no_const_pool_before_;
 
-  // Emit jump over constant pool if necessary
+  // Emit jump over constant pool if necessary.
   Label after_pool;
   if (require_jump) b(&after_pool);
 
   RecordComment("[ Constant Pool");
 
-  // Put down constant pool marker
-  // "Undefined instruction" as specified by A3.1 Instruction set encoding
+  // Put down constant pool marker "Undefined instruction" as specified by
+  // A3.1 Instruction set encoding.
   emit(0x03000000 | num_prinfo_);
 
-  // Emit constant pool entries
+  // Emit constant pool entries.
   for (int i = 0; i < num_prinfo_; i++) {
     RelocInfo& rinfo = prinfo_[i];
     ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
@@ -1819,8 +1847,8 @@
            rinfo.rmode() != RelocInfo::STATEMENT_POSITION);
     Instr instr = instr_at(rinfo.pc());
 
-    // Instruction to patch must be a ldr/str [pc, #offset]
-    // P and U set, B and W clear, Rn == pc, offset12 still 0
+    // Instruction to patch must be a ldr/str [pc, #offset].
+    // P and U set, B and W clear, Rn == pc, offset12 still 0.
     ASSERT((instr & (7*B25 | P | U | B | W | 15*B16 | Off12Mask)) ==
            (2*B25 | P | U | pc.code()*B16));
     int delta = pc_ - rinfo.pc() - 8;
diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h
index 208d583..f6b7a06 100644
--- a/src/arm/assembler-arm.h
+++ b/src/arm/assembler-arm.h
@@ -80,7 +80,7 @@
     return 1 << code_;
   }
 
-  // (unfortunately we can't make this private in a struct)
+  // Unfortunately we can't make this private in a struct.
   int code_;
 };
 
@@ -205,7 +205,7 @@
     return 1 << code_;
   }
 
-  // (unfortunately we can't make this private in a struct)
+  // Unfortunately we can't make this private in a struct.
   int code_;
 };
 
@@ -250,7 +250,7 @@
 };
 
 
-// Condition field in instructions
+// Condition field in instructions.
 enum Condition {
   eq =  0 << 28,  // Z set            equal.
   ne =  1 << 28,  // Z clear          not equal.
@@ -628,6 +628,9 @@
   void blx(Label* L)  { blx(branch_offset(L, false)); }  // v5 and above
 
   // Data-processing instructions
+  void ubfx(Register dst, Register src1, const Operand& src2,
+            const Operand& src3, Condition cond = al);
+
   void and_(Register dst, Register src1, const Operand& src2,
             SBit s = LeaveCC, Condition cond = al);
 
diff --git a/src/arm/assembler-thumb2.h b/src/arm/assembler-thumb2.h
index 31e9487..9998e63 100644
--- a/src/arm/assembler-thumb2.h
+++ b/src/arm/assembler-thumb2.h
@@ -80,7 +80,7 @@
     return 1 << code_;
   }
 
-  // (unfortunately we can't make this private in a struct)
+  // Unfortunately we can't make this private in a struct.
   int code_;
 };
 
@@ -205,7 +205,7 @@
     return 1 << code_;
   }
 
-  // (unfortunately we can't make this private in a struct)
+  // Unfortunately we can't make this private in a struct.
   int code_;
 };
 
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index 7bee98d..9afefac 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -121,14 +121,10 @@
 // -------------------------------------------------------------------------
 // CodeGenerator implementation
 
-CodeGenerator::CodeGenerator(MacroAssembler* masm,
-                             Handle<Script> script,
-                             bool is_eval)
-    : is_eval_(is_eval),
-      script_(script),
-      deferred_(8),
+CodeGenerator::CodeGenerator(MacroAssembler* masm)
+    : deferred_(8),
       masm_(masm),
-      scope_(NULL),
+      info_(NULL),
       frame_(NULL),
       allocator_(NULL),
       cc_reg_(al),
@@ -137,23 +133,21 @@
 }
 
 
+Scope* CodeGenerator::scope() { return info_->function()->scope(); }
+
+
 // Calling conventions:
 // fp: caller's frame pointer
 // sp: stack pointer
 // r1: called JS function
 // cp: callee's context
 
-void CodeGenerator::Generate(FunctionLiteral* fun,
-                             Mode mode,
-                             CompilationInfo* info) {
+void CodeGenerator::Generate(CompilationInfo* info, Mode mode) {
   // Record the position for debugging purposes.
-  CodeForFunctionPosition(fun);
-
-  ZoneList<Statement*>* body = fun->body();
+  CodeForFunctionPosition(info->function());
 
   // Initialize state.
-  ASSERT(scope_ == NULL);
-  scope_ = fun->scope();
+  info_ = info;
   ASSERT(allocator_ == NULL);
   RegisterAllocator register_allocator(this);
   allocator_ = &register_allocator;
@@ -174,7 +168,7 @@
 
 #ifdef DEBUG
     if (strlen(FLAG_stop_at) > 0 &&
-        fun->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
+        info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
       frame_->SpillAll();
       __ stop("stop-at");
     }
@@ -189,7 +183,7 @@
       frame_->AllocateStackSlots();
 
       VirtualFrame::SpilledScope spilled_scope;
-      int heap_slots = scope_->num_heap_slots();
+      int heap_slots = scope()->num_heap_slots();
       if (heap_slots > 0) {
         // Allocate local context.
         // Get outer context and create a new context based on it.
@@ -219,7 +213,6 @@
       // 3) don't copy parameter operand code from SlotOperand!
       {
         Comment cmnt2(masm_, "[ copy context parameters into .context");
-
         // Note that iteration order is relevant here! If we have the same
         // parameter twice (e.g., function (x, y, x)), and that parameter
         // needs to be copied into the context, it must be the last argument
@@ -228,12 +221,11 @@
         // order: such a parameter is copied repeatedly into the same
         // context location and thus the last value is what is seen inside
         // the function.
-        for (int i = 0; i < scope_->num_parameters(); i++) {
-          Variable* par = scope_->parameter(i);
+        for (int i = 0; i < scope()->num_parameters(); i++) {
+          Variable* par = scope()->parameter(i);
           Slot* slot = par->slot();
           if (slot != NULL && slot->type() == Slot::CONTEXT) {
-            // No parameters in global scope.
-            ASSERT(!scope_->is_global_scope());
+            ASSERT(!scope()->is_global_scope());  // No params in global scope.
             __ ldr(r1, frame_->ParameterAt(i));
             // Loads r2 with context; used below in RecordWrite.
             __ str(r1, SlotOperand(slot, r2));
@@ -249,20 +241,20 @@
       // Store the arguments object.  This must happen after context
       // initialization because the arguments object may be stored in the
       // context.
-      if (scope_->arguments() != NULL) {
+      if (scope()->arguments() != NULL) {
         Comment cmnt(masm_, "[ allocate arguments object");
-        ASSERT(scope_->arguments_shadow() != NULL);
-        Variable* arguments = scope_->arguments()->var();
-        Variable* shadow = scope_->arguments_shadow()->var();
+        ASSERT(scope()->arguments_shadow() != NULL);
+        Variable* arguments = scope()->arguments()->var();
+        Variable* shadow = scope()->arguments_shadow()->var();
         ASSERT(arguments != NULL && arguments->slot() != NULL);
         ASSERT(shadow != NULL && shadow->slot() != NULL);
         ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
         __ ldr(r2, frame_->Function());
         // The receiver is below the arguments, the return address, and the
         // frame pointer on the stack.
-        const int kReceiverDisplacement = 2 + scope_->num_parameters();
+        const int kReceiverDisplacement = 2 + scope()->num_parameters();
         __ add(r1, fp, Operand(kReceiverDisplacement * kPointerSize));
-        __ mov(r0, Operand(Smi::FromInt(scope_->num_parameters())));
+        __ mov(r0, Operand(Smi::FromInt(scope()->num_parameters())));
         frame_->Adjust(3);
         __ stm(db_w, sp, r0.bit() | r1.bit() | r2.bit());
         frame_->CallStub(&stub, 3);
@@ -273,10 +265,10 @@
       }
 
       // Initialize ThisFunction reference if present.
-      if (scope_->is_function_scope() && scope_->function() != NULL) {
+      if (scope()->is_function_scope() && scope()->function() != NULL) {
         __ mov(ip, Operand(Factory::the_hole_value()));
         frame_->EmitPush(ip);
-        StoreToSlot(scope_->function()->slot(), NOT_CONST_INIT);
+        StoreToSlot(scope()->function()->slot(), NOT_CONST_INIT);
       }
     } else {
       // When used as the secondary compiler for splitting, r1, cp,
@@ -295,12 +287,12 @@
     // Generate code to 'execute' declarations and initialize functions
     // (source elements). In case of an illegal redeclaration we need to
     // handle that instead of processing the declarations.
-    if (scope_->HasIllegalRedeclaration()) {
+    if (scope()->HasIllegalRedeclaration()) {
       Comment cmnt(masm_, "[ illegal redeclarations");
-      scope_->VisitIllegalRedeclaration(this);
+      scope()->VisitIllegalRedeclaration(this);
     } else {
       Comment cmnt(masm_, "[ declarations");
-      ProcessDeclarations(scope_->declarations());
+      ProcessDeclarations(scope()->declarations());
       // Bail out if a stack-overflow exception occurred when processing
       // declarations.
       if (HasStackOverflow()) return;
@@ -314,7 +306,7 @@
     // Compile the body of the function in a vanilla state. Don't
     // bother compiling all the code if the scope has an illegal
     // redeclaration.
-    if (!scope_->HasIllegalRedeclaration()) {
+    if (!scope()->HasIllegalRedeclaration()) {
       Comment cmnt(masm_, "[ function body");
 #ifdef DEBUG
       bool is_builtin = Bootstrapper::IsActive();
@@ -325,14 +317,14 @@
         // Ignore the return value.
       }
 #endif
-      VisitStatementsAndSpill(body);
+      VisitStatementsAndSpill(info->function()->body());
     }
   }
 
   // Generate the return sequence if necessary.
   if (has_valid_frame() || function_return_.is_linked()) {
     if (!function_return_.is_linked()) {
-      CodeForReturnPosition(fun);
+      CodeForReturnPosition(info->function());
     }
     // exit
     // r0: result
@@ -355,7 +347,7 @@
 
     // Calculate the exact length of the return sequence and make sure that
     // the constant pool is not emitted inside of the return sequence.
-    int32_t sp_delta = (scope_->num_parameters() + 1) * kPointerSize;
+    int32_t sp_delta = (scope()->num_parameters() + 1) * kPointerSize;
     int return_sequence_length = Assembler::kJSReturnSequenceLength;
     if (!masm_->ImmediateFitsAddrMode1Instruction(sp_delta)) {
       // Additional mov instruction generated.
@@ -395,7 +387,6 @@
   }
 
   allocator_ = NULL;
-  scope_ = NULL;
 }
 
 
@@ -2341,7 +2332,7 @@
 
   // Build the function boilerplate and instantiate it.
   Handle<JSFunction> boilerplate =
-      Compiler::BuildBoilerplate(node, script_, this);
+      Compiler::BuildBoilerplate(node, script(), this);
   // Check for stack-overflow exception.
   if (HasStackOverflow()) {
     ASSERT(frame_->height() == original_height);
@@ -3519,7 +3510,7 @@
 
   // Seed the result with the formal parameters count, which will be used
   // in case no arguments adaptor frame is found below the current frame.
-  __ mov(r0, Operand(Smi::FromInt(scope_->num_parameters())));
+  __ mov(r0, Operand(Smi::FromInt(scope()->num_parameters())));
 
   // Call the shared stub to get to the arguments.length.
   ArgumentsAccessStub stub(ArgumentsAccessStub::READ_LENGTH);
@@ -3536,7 +3527,7 @@
   // Load the key into r1 and the formal parameters count into r0.
   LoadAndSpill(args->at(0));
   frame_->EmitPop(r1);
-  __ mov(r0, Operand(Smi::FromInt(scope_->num_parameters())));
+  __ mov(r0, Operand(Smi::FromInt(scope()->num_parameters())));
 
   // Call the shared stub to get to arguments[key].
   ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
@@ -3560,7 +3551,8 @@
   Load(args->at(0));
   Load(args->at(1));
 
-  frame_->CallRuntime(Runtime::kStringAdd, 2);
+  StringAddStub stub(NO_STRING_ADD_FLAGS);
+  frame_->CallStub(&stub, 2);
   frame_->EmitPush(r0);
 }
 
@@ -3572,7 +3564,8 @@
   Load(args->at(1));
   Load(args->at(2));
 
-  frame_->CallRuntime(Runtime::kSubString, 3);
+  SubStringStub stub;
+  frame_->CallStub(&stub, 3);
   frame_->EmitPush(r0);
 }
 
@@ -5340,7 +5333,7 @@
     // r1 : first argument
     // r0 : second argument
     // sp[0] : second argument
-    // sp[1] : first argument
+    // sp[4] : first argument
 
     Label not_strings, not_string1, string1;
     __ tst(r1, Operand(kSmiTagMask));
@@ -5355,7 +5348,8 @@
     __ b(ge, &string1);
 
     // First and second argument are strings.
-    __ TailCallRuntime(ExternalReference(Runtime::kStringAdd), 2, 1);
+    StringAddStub stub(NO_STRING_CHECK_IN_STUB);
+    __ TailCallStub(&stub);
 
     // Only first argument is a string.
     __ bind(&string1);
@@ -5369,7 +5363,6 @@
     __ b(ge, &not_strings);
 
     // Only second argument is a string.
-    __ b(&not_strings);
     __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_JS);
 
     __ bind(&not_strings);
@@ -5851,6 +5844,7 @@
 }
 
 
+
 void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
   // r1 : x
   // r0 : y
@@ -6043,9 +6037,7 @@
         case Token::BIT_XOR: __ eor(r0, r0, Operand(r1)); break;
         case Token::SAR:
           // Remove tags from right operand.
-          __ mov(r2, Operand(r0, ASR, kSmiTagSize));  // y
-          // Use only the 5 least significant bits of the shift count.
-          __ and_(r2, r2, Operand(0x1f));
+          __ GetLeastBitsFromSmi(r2, r0, 5);
           __ mov(r0, Operand(r1, ASR, r2));
           // Smi tag result.
           __ bic(r0, r0, Operand(kSmiTagMask));
@@ -6054,9 +6046,7 @@
           // Remove tags from operands.  We can't do this on a 31 bit number
           // because then the 0s get shifted into bit 30 instead of bit 31.
           __ mov(r3, Operand(r1, ASR, kSmiTagSize));  // x
-          __ mov(r2, Operand(r0, ASR, kSmiTagSize));  // y
-          // Use only the 5 least significant bits of the shift count.
-          __ and_(r2, r2, Operand(0x1f));
+          __ GetLeastBitsFromSmi(r2, r0, 5);
           __ mov(r3, Operand(r3, LSR, r2));
           // Unsigned shift is not allowed to produce a negative number, so
           // check the sign bit and the sign bit after Smi tagging.
@@ -6068,9 +6058,7 @@
         case Token::SHL:
           // Remove tags from operands.
           __ mov(r3, Operand(r1, ASR, kSmiTagSize));  // x
-          __ mov(r2, Operand(r0, ASR, kSmiTagSize));  // y
-          // Use only the 5 least significant bits of the shift count.
-          __ and_(r2, r2, Operand(0x1f));
+          __ GetLeastBitsFromSmi(r2, r0, 5);
           __ mov(r3, Operand(r3, LSL, r2));
           // Check that the signed result fits in a Smi.
           __ add(r2, r3, Operand(0x40000000), SetCC);
@@ -6836,6 +6824,340 @@
 }
 
 
+void StringStubBase::GenerateCopyCharacters(MacroAssembler* masm,
+                                            Register dest,
+                                            Register src,
+                                            Register count,
+                                            Register scratch,
+                                            bool ascii) {
+  Label loop;
+  Label done;
+  // This loop just copies one character at a time, as it is only used for very
+  // short strings.
+  if (!ascii) {
+    __ add(count, count, Operand(count), SetCC);
+  } else {
+    __ cmp(count, Operand(0));
+  }
+  __ b(eq, &done);
+
+  __ bind(&loop);
+  __ ldrb(scratch, MemOperand(src, 1, PostIndex));
+  // Perform sub between load and dependent store to get the load time to
+  // complete.
+  __ sub(count, count, Operand(1), SetCC);
+  __ strb(scratch, MemOperand(dest, 1, PostIndex));
+  // last iteration.
+  __ b(gt, &loop);
+
+  __ bind(&done);
+}
+
+
+enum CopyCharactersFlags {
+  COPY_ASCII = 1,
+  DEST_ALWAYS_ALIGNED = 2
+};
+
+
+void StringStubBase::GenerateCopyCharactersLong(MacroAssembler* masm,
+                                                Register dest,
+                                                Register src,
+                                                Register count,
+                                                Register scratch1,
+                                                Register scratch2,
+                                                Register scratch3,
+                                                Register scratch4,
+                                                Register scratch5,
+                                                int flags) {
+  bool ascii = (flags & COPY_ASCII) != 0;
+  bool dest_always_aligned = (flags & DEST_ALWAYS_ALIGNED) != 0;
+
+  if (dest_always_aligned && FLAG_debug_code) {
+    // Check that destination is actually word aligned if the flag says
+    // that it is.
+    __ tst(dest, Operand(kPointerAlignmentMask));
+    __ Check(eq, "Destination of copy not aligned.");
+  }
+
+  const int kReadAlignment = 4;
+  const int kReadAlignmentMask = kReadAlignment - 1;
+  // Ensure that reading an entire aligned word containing the last character
+  // of a string will not read outside the allocated area (because we pad up
+  // to kObjectAlignment).
+  ASSERT(kObjectAlignment >= kReadAlignment);
+  // Assumes word reads and writes are little endian.
+  // Nothing to do for zero characters.
+  Label done;
+  if (!ascii) {
+    __ add(count, count, Operand(count), SetCC);
+  } else {
+    __ cmp(count, Operand(0));
+  }
+  __ b(eq, &done);
+
+  // Assume that you cannot read (or write) unaligned.
+  Label byte_loop;
+  // Must copy at least eight bytes, otherwise just do it one byte at a time.
+  __ cmp(count, Operand(8));
+  __ add(count, dest, Operand(count));
+  Register limit = count;  // Read until src equals this.
+  __ b(lt, &byte_loop);
+
+  if (!dest_always_aligned) {
+    // Align dest by byte copying. Copies between zero and three bytes.
+    __ and_(scratch4, dest, Operand(kReadAlignmentMask), SetCC);
+    Label dest_aligned;
+    __ b(eq, &dest_aligned);
+    __ cmp(scratch4, Operand(2));
+    __ ldrb(scratch1, MemOperand(src, 1, PostIndex));
+    __ ldrb(scratch2, MemOperand(src, 1, PostIndex), le);
+    __ ldrb(scratch3, MemOperand(src, 1, PostIndex), lt);
+    __ strb(scratch1, MemOperand(dest, 1, PostIndex));
+    __ strb(scratch2, MemOperand(dest, 1, PostIndex), le);
+    __ strb(scratch3, MemOperand(dest, 1, PostIndex), lt);
+    __ bind(&dest_aligned);
+  }
+
+  Label simple_loop;
+
+  __ sub(scratch4, dest, Operand(src));
+  __ and_(scratch4, scratch4, Operand(0x03), SetCC);
+  __ b(eq, &simple_loop);
+  // Shift register is number of bits in a source word that
+  // must be combined with bits in the next source word in order
+  // to create a destination word.
+
+  // Complex loop for src/dst that are not aligned the same way.
+  {
+    Label loop;
+    __ mov(scratch4, Operand(scratch4, LSL, 3));
+    Register left_shift = scratch4;
+    __ and_(src, src, Operand(~3));  // Round down to load previous word.
+    __ ldr(scratch1, MemOperand(src, 4, PostIndex));
+    // Store the "shift" most significant bits of scratch in the least
+    // signficant bits (i.e., shift down by (32-shift)).
+    __ rsb(scratch2, left_shift, Operand(32));
+    Register right_shift = scratch2;
+    __ mov(scratch1, Operand(scratch1, LSR, right_shift));
+
+    __ bind(&loop);
+    __ ldr(scratch3, MemOperand(src, 4, PostIndex));
+    __ sub(scratch5, limit, Operand(dest));
+    __ orr(scratch1, scratch1, Operand(scratch3, LSL, left_shift));
+    __ str(scratch1, MemOperand(dest, 4, PostIndex));
+    __ mov(scratch1, Operand(scratch3, LSR, right_shift));
+    // Loop if four or more bytes left to copy.
+    // Compare to eight, because we did the subtract before increasing dst.
+    __ sub(scratch5, scratch5, Operand(8), SetCC);
+    __ b(ge, &loop);
+  }
+  // There is now between zero and three bytes left to copy (negative that
+  // number is in scratch5), and between one and three bytes already read into
+  // scratch1 (eight times that number in scratch4). We may have read past
+  // the end of the string, but because objects are aligned, we have not read
+  // past the end of the object.
+  // Find the minimum of remaining characters to move and preloaded characters
+  // and write those as bytes.
+  __ add(scratch5, scratch5, Operand(4), SetCC);
+  __ b(eq, &done);
+  __ cmp(scratch4, Operand(scratch5, LSL, 3), ne);
+  // Move minimum of bytes read and bytes left to copy to scratch4.
+  __ mov(scratch5, Operand(scratch4, LSR, 3), LeaveCC, lt);
+  // Between one and three (value in scratch5) characters already read into
+  // scratch ready to write.
+  __ cmp(scratch5, Operand(2));
+  __ strb(scratch1, MemOperand(dest, 1, PostIndex));
+  __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, ge);
+  __ strb(scratch1, MemOperand(dest, 1, PostIndex), ge);
+  __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, gt);
+  __ strb(scratch1, MemOperand(dest, 1, PostIndex), gt);
+  // Copy any remaining bytes.
+  __ b(&byte_loop);
+
+  // Simple loop.
+  // Copy words from src to dst, until less than four bytes left.
+  // Both src and dest are word aligned.
+  __ bind(&simple_loop);
+  {
+    Label loop;
+    __ bind(&loop);
+    __ ldr(scratch1, MemOperand(src, 4, PostIndex));
+    __ sub(scratch3, limit, Operand(dest));
+    __ str(scratch1, MemOperand(dest, 4, PostIndex));
+    // Compare to 8, not 4, because we do the substraction before increasing
+    // dest.
+    __ cmp(scratch3, Operand(8));
+    __ b(ge, &loop);
+  }
+
+  // Copy bytes from src to dst until dst hits limit.
+  __ bind(&byte_loop);
+  __ cmp(dest, Operand(limit));
+  __ ldrb(scratch1, MemOperand(src, 1, PostIndex), lt);
+  __ b(ge, &done);
+  __ strb(scratch1, MemOperand(dest, 1, PostIndex));
+  __ b(&byte_loop);
+
+  __ bind(&done);
+}
+
+
+void SubStringStub::Generate(MacroAssembler* masm) {
+  Label runtime;
+
+  // Stack frame on entry.
+  //  lr: return address
+  //  sp[0]: to
+  //  sp[4]: from
+  //  sp[8]: string
+
+  // This stub is called from the native-call %_SubString(...), so
+  // nothing can be assumed about the arguments. It is tested that:
+  //  "string" is a sequential string,
+  //  both "from" and "to" are smis, and
+  //  0 <= from <= to <= string.length.
+  // If any of these assumptions fail, we call the runtime system.
+
+  static const int kToOffset = 0 * kPointerSize;
+  static const int kFromOffset = 1 * kPointerSize;
+  static const int kStringOffset = 2 * kPointerSize;
+
+
+  // Check bounds and smi-ness.
+  __ ldr(r7, MemOperand(sp, kToOffset));
+  __ ldr(r6, MemOperand(sp, kFromOffset));
+  ASSERT_EQ(0, kSmiTag);
+  ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
+  // I.e., arithmetic shift right by one un-smi-tags.
+  __ mov(r2, Operand(r7, ASR, 1), SetCC);
+  __ mov(r3, Operand(r6, ASR, 1), SetCC, cc);
+  // If either r2 or r6 had the smi tag bit set, then carry is set now.
+  __ b(cs, &runtime);  // Either "from" or "to" is not a smi.
+  __ b(mi, &runtime);  // From is negative.
+
+  __ sub(r2, r2, Operand(r3), SetCC);
+  __ b(mi, &runtime);  // Fail if from > to.
+  // Handle sub-strings of length 2 and less in the runtime system.
+  __ cmp(r2, Operand(2));
+  __ b(le, &runtime);
+
+  // r2: length
+  // r6: from (smi)
+  // r7: to (smi)
+
+  // Make sure first argument is a sequential (or flat) string.
+  __ ldr(r5, MemOperand(sp, kStringOffset));
+  ASSERT_EQ(0, kSmiTag);
+  __ tst(r5, Operand(kSmiTagMask));
+  __ b(eq, &runtime);
+  Condition is_string = masm->IsObjectStringType(r5, r1);
+  __ b(NegateCondition(is_string), &runtime);
+
+  // r1: instance type
+  // r2: length
+  // r5: string
+  // r6: from (smi)
+  // r7: to (smi)
+  Label seq_string;
+  __ and_(r4, r1, Operand(kStringRepresentationMask));
+  ASSERT(kSeqStringTag < kConsStringTag);
+  ASSERT(kExternalStringTag > kConsStringTag);
+  __ cmp(r4, Operand(kConsStringTag));
+  __ b(gt, &runtime);  // External strings go to runtime.
+  __ b(lt, &seq_string);  // Sequential strings are handled directly.
+
+  // Cons string. Try to recurse (once) on the first substring.
+  // (This adds a little more generality than necessary to handle flattened
+  // cons strings, but not much).
+  __ ldr(r5, FieldMemOperand(r5, ConsString::kFirstOffset));
+  __ ldr(r4, FieldMemOperand(r5, HeapObject::kMapOffset));
+  __ ldrb(r1, FieldMemOperand(r4, Map::kInstanceTypeOffset));
+  __ tst(r1, Operand(kStringRepresentationMask));
+  ASSERT_EQ(0, kSeqStringTag);
+  __ b(ne, &runtime);  // Cons and External strings go to runtime.
+
+  // Definitly a sequential string.
+  __ bind(&seq_string);
+
+  // r1: instance type.
+  // r2: length
+  // r5: string
+  // r6: from (smi)
+  // r7: to (smi)
+  __ ldr(r4, FieldMemOperand(r5, String::kLengthOffset));
+  __ cmp(r4, Operand(r7, ASR, 1));
+  __ b(lt, &runtime);  // Fail if to > length.
+
+  // r1: instance type.
+  // r2: result string length.
+  // r5: string.
+  // r6: from offset (smi)
+  // Check for flat ascii string.
+  Label non_ascii_flat;
+  __ tst(r1, Operand(kStringEncodingMask));
+  ASSERT_EQ(0, kTwoByteStringTag);
+  __ b(eq, &non_ascii_flat);
+
+  // Allocate the result.
+  __ AllocateAsciiString(r0, r2, r3, r4, r1, &runtime);
+
+  // r0: result string.
+  // r2: result string length.
+  // r5: string.
+  // r6: from offset (smi)
+  // Locate first character of result.
+  __ add(r1, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  // Locate 'from' character of string.
+  __ add(r5, r5, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  __ add(r5, r5, Operand(r6, ASR, 1));
+
+  // r0: result string.
+  // r1: first character of result string.
+  // r2: result string length.
+  // r5: first character of sub string to copy.
+  ASSERT_EQ(0, SeqAsciiString::kHeaderSize & kObjectAlignmentMask);
+  GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9,
+                             COPY_ASCII | DEST_ALWAYS_ALIGNED);
+  __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4);
+  __ add(sp, sp, Operand(3 * kPointerSize));
+  __ Ret();
+
+  __ bind(&non_ascii_flat);
+  // r2: result string length.
+  // r5: string.
+  // r6: from offset (smi)
+  // Check for flat two byte string.
+
+  // Allocate the result.
+  __ AllocateTwoByteString(r0, r2, r1, r3, r4, &runtime);
+
+  // r0: result string.
+  // r2: result string length.
+  // r5: string.
+  // Locate first character of result.
+  __ add(r1, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+  // Locate 'from' character of string.
+    __ add(r5, r5, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+  // As "from" is a smi it is 2 times the value which matches the size of a two
+  // byte character.
+  __ add(r5, r5, Operand(r6));
+
+  // r0: result string.
+  // r1: first character of result.
+  // r2: result length.
+  // r5: first character of string to copy.
+  ASSERT_EQ(0, SeqTwoByteString::kHeaderSize & kObjectAlignmentMask);
+  GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9,
+                             DEST_ALWAYS_ALIGNED);
+  __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4);
+  __ add(sp, sp, Operand(3 * kPointerSize));
+  __ Ret();
+
+  // Just jump to runtime to create the sub string.
+  __ bind(&runtime);
+  __ TailCallRuntime(ExternalReference(Runtime::kSubString), 3, 1);
+}
 
 
 void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
@@ -6897,12 +7219,10 @@
   Label runtime;
 
   // Stack frame on entry.
-  //  sp[0]: return address
-  //  sp[4]: right string
-  //  sp[8]: left string
-
-  __ ldr(r0, MemOperand(sp, 2 * kPointerSize));  // left
-  __ ldr(r1, MemOperand(sp, 1 * kPointerSize));  // right
+  //  sp[0]: right string
+  //  sp[4]: left string
+  __ ldr(r0, MemOperand(sp, 1 * kPointerSize));  // left
+  __ ldr(r1, MemOperand(sp, 0 * kPointerSize));  // right
 
   Label not_same;
   __ cmp(r0, r1);
@@ -6931,6 +7251,220 @@
 }
 
 
+void StringAddStub::Generate(MacroAssembler* masm) {
+  Label string_add_runtime;
+  // Stack on entry:
+  // sp[0]: second argument.
+  // sp[4]: first argument.
+
+  // Load the two arguments.
+  __ ldr(r0, MemOperand(sp, 1 * kPointerSize));  // First argument.
+  __ ldr(r1, MemOperand(sp, 0 * kPointerSize));  // Second argument.
+
+  // Make sure that both arguments are strings if not known in advance.
+  if (string_check_) {
+    ASSERT_EQ(0, kSmiTag);
+    __ JumpIfEitherSmi(r0, r1, &string_add_runtime);
+    // Load instance types.
+    __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
+    __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
+    __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
+    __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
+    ASSERT_EQ(0, kStringTag);
+    // If either is not a string, go to runtime.
+    __ tst(r4, Operand(kIsNotStringMask));
+    __ tst(r5, Operand(kIsNotStringMask), eq);
+    __ b(ne, &string_add_runtime);
+  }
+
+  // Both arguments are strings.
+  // r0: first string
+  // r1: second string
+  // r4: first string instance type (if string_check_)
+  // r5: second string instance type (if string_check_)
+  {
+    Label strings_not_empty;
+    // Check if either of the strings are empty. In that case return the other.
+    __ ldr(r2, FieldMemOperand(r0, String::kLengthOffset));
+    __ ldr(r3, FieldMemOperand(r1, String::kLengthOffset));
+    __ cmp(r2, Operand(0));  // Test if first string is empty.
+    __ mov(r0, Operand(r1), LeaveCC, eq);  // If first is empty, return second.
+    __ cmp(r3, Operand(0), ne);  // Else test if second string is empty.
+    __ b(ne, &strings_not_empty);  // If either string was empty, return r0.
+
+    __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
+    __ add(sp, sp, Operand(2 * kPointerSize));
+    __ Ret();
+
+    __ bind(&strings_not_empty);
+  }
+
+  // Both strings are non-empty.
+  // r0: first string
+  // r1: second string
+  // r2: length of first string
+  // r3: length of second string
+  // r4: first string instance type (if string_check_)
+  // r5: second string instance type (if string_check_)
+  // Look at the length of the result of adding the two strings.
+  Label string_add_flat_result;
+  // Adding two lengths can't overflow.
+  ASSERT(String::kMaxLength * 2 > String::kMaxLength);
+  __ add(r6, r2, Operand(r3));
+  // Use the runtime system when adding two one character strings, as it
+  // contains optimizations for this specific case using the symbol table.
+  __ cmp(r6, Operand(2));
+  __ b(eq, &string_add_runtime);
+  // Check if resulting string will be flat.
+  __ cmp(r6, Operand(String::kMinNonFlatLength));
+  __ b(lt, &string_add_flat_result);
+  // Handle exceptionally long strings in the runtime system.
+  ASSERT((String::kMaxLength & 0x80000000) == 0);
+  ASSERT(IsPowerOf2(String::kMaxLength + 1));
+  // kMaxLength + 1 is representable as shifted literal, kMaxLength is not.
+  __ cmp(r6, Operand(String::kMaxLength + 1));
+  __ b(hs, &string_add_runtime);
+
+  // If result is not supposed to be flat, allocate a cons string object.
+  // If both strings are ascii the result is an ascii cons string.
+  if (!string_check_) {
+    __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
+    __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
+    __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
+    __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
+  }
+  Label non_ascii, allocated;
+  ASSERT_EQ(0, kTwoByteStringTag);
+  __ tst(r4, Operand(kStringEncodingMask));
+  __ tst(r5, Operand(kStringEncodingMask), ne);
+  __ b(eq, &non_ascii);
+
+  // Allocate an ASCII cons string.
+  __ AllocateAsciiConsString(r7, r6, r4, r5, &string_add_runtime);
+  __ bind(&allocated);
+  // Fill the fields of the cons string.
+  __ str(r0, FieldMemOperand(r7, ConsString::kFirstOffset));
+  __ str(r1, FieldMemOperand(r7, ConsString::kSecondOffset));
+  __ mov(r0, Operand(r7));
+  __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
+  __ add(sp, sp, Operand(2 * kPointerSize));
+  __ Ret();
+
+  __ bind(&non_ascii);
+  // Allocate a two byte cons string.
+  __ AllocateTwoByteConsString(r7, r6, r4, r5, &string_add_runtime);
+  __ jmp(&allocated);
+
+  // Handle creating a flat result. First check that both strings are
+  // sequential and that they have the same encoding.
+  // r0: first string
+  // r1: second string
+  // r2: length of first string
+  // r3: length of second string
+  // r4: first string instance type (if string_check_)
+  // r5: second string instance type (if string_check_)
+  // r6: sum of lengths.
+  __ bind(&string_add_flat_result);
+  if (!string_check_) {
+    __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
+    __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
+    __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
+    __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
+  }
+  // Check that both strings are sequential.
+  ASSERT_EQ(0, kSeqStringTag);
+  __ tst(r4, Operand(kStringRepresentationMask));
+  __ tst(r5, Operand(kStringRepresentationMask), eq);
+  __ b(ne, &string_add_runtime);
+  // Now check if both strings have the same encoding (ASCII/Two-byte).
+  // r0: first string.
+  // r1: second string.
+  // r2: length of first string.
+  // r3: length of second string.
+  // r6: sum of lengths..
+  Label non_ascii_string_add_flat_result;
+  ASSERT(IsPowerOf2(kStringEncodingMask));  // Just one bit to test.
+  __ eor(r7, r4, Operand(r5));
+  __ tst(r7, Operand(kStringEncodingMask));
+  __ b(ne, &string_add_runtime);
+  // And see if it's ASCII or two-byte.
+  __ tst(r4, Operand(kStringEncodingMask));
+  __ b(eq, &non_ascii_string_add_flat_result);
+
+  // Both strings are sequential ASCII strings. We also know that they are
+  // short (since the sum of the lengths is less than kMinNonFlatLength).
+  __ AllocateAsciiString(r7, r6, r4, r5, r9, &string_add_runtime);
+  // Locate first character of result.
+  __ add(r6, r7, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  // Locate first character of first argument.
+  __ add(r0, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  // r0: first character of first string.
+  // r1: second string.
+  // r2: length of first string.
+  // r3: length of second string.
+  // r6: first character of result.
+  // r7: result string.
+  GenerateCopyCharacters(masm, r6, r0, r2, r4, true);
+
+  // Load second argument and locate first character.
+  __ add(r1, r1, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  // r1: first character of second string.
+  // r3: length of second string.
+  // r6: next character of result.
+  // r7: result string.
+  GenerateCopyCharacters(masm, r6, r1, r3, r4, true);
+  __ mov(r0, Operand(r7));
+  __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
+  __ add(sp, sp, Operand(2 * kPointerSize));
+  __ Ret();
+
+  __ bind(&non_ascii_string_add_flat_result);
+  // Both strings are sequential two byte strings.
+  // r0: first string.
+  // r1: second string.
+  // r2: length of first string.
+  // r3: length of second string.
+  // r6: sum of length of strings.
+  __ AllocateTwoByteString(r7, r6, r4, r5, r9, &string_add_runtime);
+  // r0: first string.
+  // r1: second string.
+  // r2: length of first string.
+  // r3: length of second string.
+  // r7: result string.
+
+  // Locate first character of result.
+  __ add(r6, r7, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+  // Locate first character of first argument.
+  __ add(r0, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+
+  // r0: first character of first string.
+  // r1: second string.
+  // r2: length of first string.
+  // r3: length of second string.
+  // r6: first character of result.
+  // r7: result string.
+  GenerateCopyCharacters(masm, r6, r0, r2, r4, false);
+
+  // Locate first character of second argument.
+  __ add(r1, r1, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+
+  // r1: first character of second string.
+  // r3: length of second string.
+  // r6: next character of result (after copy of first string).
+  // r7: result string.
+  GenerateCopyCharacters(masm, r6, r1, r3, r4, false);
+
+  __ mov(r0, Operand(r7));
+  __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
+  __ add(sp, sp, Operand(2 * kPointerSize));
+  __ Ret();
+
+  // Just jump to runtime to add the two strings.
+  __ bind(&string_add_runtime);
+  __ TailCallRuntime(ExternalReference(Runtime::kStringAdd), 2, 1);
+}
+
+
 #undef __
 
 } }  // namespace v8::internal
diff --git a/src/arm/codegen-arm.h b/src/arm/codegen-arm.h
index 0384485..2578a39 100644
--- a/src/arm/codegen-arm.h
+++ b/src/arm/codegen-arm.h
@@ -161,19 +161,15 @@
 
   // Takes a function literal, generates code for it. This function should only
   // be called by compiler.cc.
-  static Handle<Code> MakeCode(FunctionLiteral* fun,
-                               Handle<Script> script,
-                               bool is_eval,
-                               CompilationInfo* info);
+  static Handle<Code> MakeCode(CompilationInfo* info);
 
   // Printing of AST, etc. as requested by flags.
-  static void MakeCodePrologue(FunctionLiteral* fun);
+  static void MakeCodePrologue(CompilationInfo* info);
 
   // Allocate and install the code.
-  static Handle<Code> MakeCodeEpilogue(FunctionLiteral* fun,
-                                       MacroAssembler* masm,
+  static Handle<Code> MakeCodeEpilogue(MacroAssembler* masm,
                                        Code::Flags flags,
-                                       Handle<Script> script);
+                                       CompilationInfo* info);
 
 #ifdef ENABLE_LOGGING_AND_PROFILING
   static bool ShouldGenerateLog(Expression* type);
@@ -189,7 +185,7 @@
   // Accessors
   MacroAssembler* masm() { return masm_; }
   VirtualFrame* frame() const { return frame_; }
-  Handle<Script> script() { return script_; }
+  inline Handle<Script> script();
 
   bool has_valid_frame() const { return frame_ != NULL; }
 
@@ -212,16 +208,15 @@
 
  private:
   // Construction/Destruction
-  CodeGenerator(MacroAssembler* masm, Handle<Script> script, bool is_eval);
+  explicit CodeGenerator(MacroAssembler* masm);
 
   // Accessors
-  Scope* scope() const { return scope_; }
+  inline bool is_eval();
+  Scope* scope();
 
   // Generating deferred code.
   void ProcessDeferred();
 
-  bool is_eval() { return is_eval_; }
-
   // State
   bool has_cc() const  { return cc_reg_ != al; }
   JumpTarget* true_target() const  { return state_->true_target(); }
@@ -249,7 +244,7 @@
   inline void VisitStatementsAndSpill(ZoneList<Statement*>* statements);
 
   // Main code generation function
-  void Generate(FunctionLiteral* fun, Mode mode, CompilationInfo* info);
+  void Generate(CompilationInfo* info, Mode mode);
 
   // The following are used by class Reference.
   void LoadReference(Reference* ref);
@@ -425,16 +420,14 @@
   bool HasValidEntryRegisters();
 #endif
 
-  bool is_eval_;  // Tells whether code is generated for eval.
-
-  Handle<Script> script_;
   List<DeferredCode*> deferred_;
 
   // Assembler
   MacroAssembler* masm_;  // to generate code
 
+  CompilationInfo* info_;
+
   // Code generation state
-  Scope* scope_;
   VirtualFrame* frame_;
   RegisterAllocator* allocator_;
   Condition cc_reg_;
@@ -538,6 +531,74 @@
 };
 
 
+class StringStubBase: public CodeStub {
+ public:
+  // Generate code for copying characters using a simple loop. This should only
+  // be used in places where the number of characters is small and the
+  // additional setup and checking in GenerateCopyCharactersLong adds too much
+  // overhead. Copying of overlapping regions is not supported.
+  // Dest register ends at the position after the last character written.
+  void GenerateCopyCharacters(MacroAssembler* masm,
+                              Register dest,
+                              Register src,
+                              Register count,
+                              Register scratch,
+                              bool ascii);
+
+  // Generate code for copying a large number of characters. This function
+  // is allowed to spend extra time setting up conditions to make copying
+  // faster. Copying of overlapping regions is not supported.
+  // Dest register ends at the position after the last character written.
+  void GenerateCopyCharactersLong(MacroAssembler* masm,
+                                  Register dest,
+                                  Register src,
+                                  Register count,
+                                  Register scratch1,
+                                  Register scratch2,
+                                  Register scratch3,
+                                  Register scratch4,
+                                  Register scratch5,
+                                  int flags);
+};
+
+
+// Flag that indicates how to generate code for the stub StringAddStub.
+enum StringAddFlags {
+  NO_STRING_ADD_FLAGS = 0,
+  NO_STRING_CHECK_IN_STUB = 1 << 0  // Omit string check in stub.
+};
+
+
+class StringAddStub: public StringStubBase {
+ public:
+  explicit StringAddStub(StringAddFlags flags) {
+    string_check_ = ((flags & NO_STRING_CHECK_IN_STUB) == 0);
+  }
+
+ private:
+  Major MajorKey() { return StringAdd; }
+  int MinorKey() { return string_check_ ? 0 : 1; }
+
+  void Generate(MacroAssembler* masm);
+
+  // Should the stub check whether arguments are strings?
+  bool string_check_;
+};
+
+
+class SubStringStub: public StringStubBase {
+ public:
+  SubStringStub() {}
+
+ private:
+  Major MajorKey() { return SubString; }
+  int MinorKey() { return 0; }
+
+  void Generate(MacroAssembler* masm);
+};
+
+
+
 class StringCompareStub: public CodeStub {
  public:
   StringCompareStub() { }
diff --git a/src/arm/disasm-arm.cc b/src/arm/disasm-arm.cc
index 5b31455..127c160 100644
--- a/src/arm/disasm-arm.cc
+++ b/src/arm/disasm-arm.cc
@@ -429,12 +429,22 @@
       return 3;
     }
     case 'o': {
-      if (format[3] == '1') {
+      if ((format[3] == '1') && (format[4] == '2')) {
         // 'off12: 12-bit offset for load and store instructions
         ASSERT(STRING_STARTS_WITH(format, "off12"));
         out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
                                              "%d", instr->Offset12Field());
         return 5;
+      } else if ((format[3] == '1') && (format[4] == '6')) {
+        ASSERT(STRING_STARTS_WITH(format, "off16to20"));
+        out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+                                           "%d", instr->Bits(20, 16) +1);
+        return 9;
+      } else if (format[3] == '7') {
+        ASSERT(STRING_STARTS_WITH(format, "off7to11"));
+        out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+                                            "%d", instr->ShiftAmountField());
+        return 8;
       }
       // 'off8: 8-bit offset for extra load and store instructions
       ASSERT(STRING_STARTS_WITH(format, "off8"));
@@ -795,7 +805,18 @@
       break;
     }
     case 3: {
-      Format(instr, "'memop'cond'b 'rd, ['rn, +'shift_rm]'w");
+      if (instr->HasW() && (instr->Bits(6, 4) == 0x5)) {
+        uint32_t widthminus1 = static_cast<uint32_t>(instr->Bits(20, 16));
+        uint32_t lsbit = static_cast<uint32_t>(instr->ShiftAmountField());
+        uint32_t msbit = widthminus1 + lsbit;
+        if (msbit <= 31) {
+          Format(instr, "ubfx'cond 'rd, 'rm, #'off7to11, #'off16to20");
+        } else {
+          UNREACHABLE();
+        }
+      } else {
+        Format(instr, "'memop'cond'b 'rd, ['rn, +'shift_rm]'w");
+      }
       break;
     }
     default: {
diff --git a/src/arm/fast-codegen-arm.cc b/src/arm/fast-codegen-arm.cc
index 1aeea7a..80da533 100644
--- a/src/arm/fast-codegen-arm.cc
+++ b/src/arm/fast-codegen-arm.cc
@@ -37,7 +37,7 @@
 
 void FastCodeGenerator::EmitLoadReceiver(Register reg) {
   // Offset 2 is due to return address and saved frame pointer.
-  int index = 2 + function()->scope()->num_parameters();
+  int index = 2 + scope()->num_parameters();
   __ ldr(reg, MemOperand(sp, index * kPointerSize));
 }
 
@@ -48,42 +48,48 @@
     PrintF("MapCheck(this)\n");
   }
 
-  EmitLoadReceiver(r1);
-  __ BranchOnSmi(r1, bailout());
-
-  ASSERT(has_receiver() && receiver()->IsHeapObject());
-  Handle<HeapObject> object = Handle<HeapObject>::cast(receiver());
+  ASSERT(info()->has_receiver() && info()->receiver()->IsHeapObject());
+  Handle<HeapObject> object = Handle<HeapObject>::cast(info()->receiver());
   Handle<Map> map(object->map());
-  __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
-  __ mov(ip, Operand(map));
-  __ cmp(r3, ip);
-  __ b(ne, bailout());
+
+  EmitLoadReceiver(r1);
+  __ CheckMap(r1, r3, map, bailout(), false);
 }
 
 
-void FastCodeGenerator::EmitGlobalVariableLoad(Handle<String> name) {
-  // Compile global variable accesses as load IC calls.  The only live
-  // registers are cp (context) and possibly r1 (this).  Both are also saved
-  // in the stack and cp is preserved by the call.
-  __ ldr(ip, CodeGenerator::GlobalObject());
-  __ push(ip);
-  __ mov(r2, Operand(name));
-  Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
-  __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
-  if (has_this_properties()) {
-    // Restore this.
-    EmitLoadReceiver(r1);
+void FastCodeGenerator::EmitGlobalMapCheck() {
+  Comment cmnt(masm(), ";; GlobalMapCheck");
+  if (FLAG_print_ir) {
+    PrintF(";; GlobalMapCheck()");
+  }
+
+  ASSERT(info()->has_global_object());
+  Handle<Map> map(info()->global_object()->map());
+
+  __ ldr(r3, CodeGenerator::GlobalObject());
+  __ CheckMap(r3, r3, map, bailout(), true);
+}
+
+
+void FastCodeGenerator::EmitGlobalVariableLoad(Handle<Object> cell) {
+  ASSERT(cell->IsJSGlobalPropertyCell());
+  __ mov(r0, Operand(cell));
+  __ ldr(r0, FieldMemOperand(r0, JSGlobalPropertyCell::kValueOffset));
+  if (FLAG_debug_code) {
+    __ mov(ip, Operand(Factory::the_hole_value()));
+    __ cmp(r0, ip);
+    __ Check(ne, "DontDelete cells can't contain the hole");
   }
 }
 
 
 void FastCodeGenerator::EmitThisPropertyStore(Handle<String> name) {
   LookupResult lookup;
-  receiver()->Lookup(*name, &lookup);
+  info()->receiver()->Lookup(*name, &lookup);
 
-  ASSERT(lookup.holder() == *receiver());
+  ASSERT(lookup.holder() == *info()->receiver());
   ASSERT(lookup.type() == FIELD);
-  Handle<Map> map(Handle<HeapObject>::cast(receiver())->map());
+  Handle<Map> map(Handle<HeapObject>::cast(info()->receiver())->map());
   int index = lookup.GetFieldIndex() - map->inobject_properties();
   int offset = index * kPointerSize;
 
@@ -102,11 +108,9 @@
 }
 
 
-void FastCodeGenerator::Generate(FunctionLiteral* fun, CompilationInfo* info) {
-  ASSERT(function_ == NULL);
+void FastCodeGenerator::Generate(CompilationInfo* compilation_info) {
   ASSERT(info_ == NULL);
-  function_ = fun;
-  info_ = info;
+  info_ = compilation_info;
 
   // Save the caller's frame pointer and set up our own.
   Comment prologue_cmnt(masm(), ";; Prologue");
@@ -116,9 +120,13 @@
   // this point.
 
   // Receiver (this) is allocated to r1 if there are this properties.
-  if (has_this_properties()) EmitReceiverMapCheck();
+  if (info()->has_this_properties()) EmitReceiverMapCheck();
 
-  VisitStatements(fun->body());
+  // If there is a global variable access check if the global object
+  // is the same as at lazy-compilation time.
+  if (info()->has_globals()) EmitGlobalMapCheck();
+
+  VisitStatements(function()->body());
 
   Comment return_cmnt(masm(), ";; Return(<undefined>)");
   __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
@@ -126,7 +134,7 @@
   Comment epilogue_cmnt(masm(), ";; Epilogue");
   __ mov(sp, fp);
   __ ldm(ia_w, sp, fp.bit() | lr.bit());
-  int32_t sp_delta = (fun->scope()->num_parameters() + 1) * kPointerSize;
+  int32_t sp_delta = (scope()->num_parameters() + 1) * kPointerSize;
   __ add(sp, sp, Operand(sp_delta));
   __ Jump(lr);
 
diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc
index 9f240dd..7e048ff 100644
--- a/src/arm/full-codegen-arm.cc
+++ b/src/arm/full-codegen-arm.cc
@@ -52,12 +52,13 @@
 //
 // The function builds a JS frame.  Please see JavaScriptFrameConstants in
 // frames-arm.h for its layout.
-void FullCodeGenerator::Generate(FunctionLiteral* fun, Mode mode) {
-  function_ = fun;
-  SetFunctionPosition(fun);
+void FullCodeGenerator::Generate(CompilationInfo* info, Mode mode) {
+  ASSERT(info_ == NULL);
+  info_ = info;
+  SetFunctionPosition(function());
 
   if (mode == PRIMARY) {
-    int locals_count = fun->scope()->num_stack_slots();
+    int locals_count = scope()->num_stack_slots();
 
     __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
     if (locals_count > 0) {
@@ -77,7 +78,7 @@
     bool function_in_register = true;
 
     // Possibly allocate a local context.
-    if (fun->scope()->num_heap_slots() > 0) {
+    if (scope()->num_heap_slots() > 0) {
       Comment cmnt(masm_, "[ Allocate local context");
       // Argument to NewContext is the function, which is in r1.
       __ push(r1);
@@ -87,9 +88,9 @@
       // passed to us.  It's saved in the stack and kept live in cp.
       __ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
       // Copy any necessary parameters into the context.
-      int num_parameters = fun->scope()->num_parameters();
+      int num_parameters = scope()->num_parameters();
       for (int i = 0; i < num_parameters; i++) {
-        Slot* slot = fun->scope()->parameter(i)->slot();
+        Slot* slot = scope()->parameter(i)->slot();
         if (slot != NULL && slot->type() == Slot::CONTEXT) {
           int parameter_offset = StandardFrameConstants::kCallerSPOffset +
                                    (num_parameters - 1 - i) * kPointerSize;
@@ -107,7 +108,7 @@
       }
     }
 
-    Variable* arguments = fun->scope()->arguments()->AsVariable();
+    Variable* arguments = scope()->arguments()->AsVariable();
     if (arguments != NULL) {
       // Function uses arguments object.
       Comment cmnt(masm_, "[ Allocate arguments object");
@@ -118,9 +119,10 @@
         __ mov(r3, r1);
       }
       // Receiver is just before the parameters on the caller's stack.
-      __ add(r2, fp, Operand(StandardFrameConstants::kCallerSPOffset +
-                             fun->num_parameters() * kPointerSize));
-      __ mov(r1, Operand(Smi::FromInt(fun->num_parameters())));
+      int offset = scope()->num_parameters() * kPointerSize;
+      __ add(r2, fp,
+             Operand(StandardFrameConstants::kCallerSPOffset + offset));
+      __ mov(r1, Operand(Smi::FromInt(scope()->num_parameters())));
       __ stm(db_w, sp, r3.bit() | r2.bit() | r1.bit());
 
       // Arguments to ArgumentsAccessStub:
@@ -133,7 +135,7 @@
       __ mov(r3, r0);
       Move(arguments->slot(), r0, r1, r2);
       Slot* dot_arguments_slot =
-          fun->scope()->arguments_shadow()->AsVariable()->slot();
+          scope()->arguments_shadow()->AsVariable()->slot();
       Move(dot_arguments_slot, r3, r1, r2);
     }
   }
@@ -155,7 +157,7 @@
   }
 
   { Comment cmnt(masm_, "[ Declarations");
-    VisitDeclarations(fun->scope()->declarations());
+    VisitDeclarations(scope()->declarations());
   }
 
   if (FLAG_trace) {
@@ -164,7 +166,7 @@
 
   { Comment cmnt(masm_, "[ Body");
     ASSERT(loop_depth() == 0);
-    VisitStatements(fun->body());
+    VisitStatements(function()->body());
     ASSERT(loop_depth() == 0);
   }
 
@@ -173,7 +175,7 @@
     // body.
     __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
   }
-  EmitReturnSequence(function_->end_position());
+  EmitReturnSequence(function()->end_position());
 }
 
 
@@ -196,7 +198,7 @@
 
     // Calculate the exact length of the return sequence and make sure that
     // the constant pool is not emitted inside of the return sequence.
-    int num_parameters = function_->scope()->num_parameters();
+    int num_parameters = scope()->num_parameters();
     int32_t sp_delta = (num_parameters + 1) * kPointerSize;
     int return_sequence_length = Assembler::kJSReturnSequenceLength;
     if (!masm_->ImmediateFitsAddrMode1Instruction(sp_delta)) {
@@ -512,7 +514,7 @@
       return MemOperand(fp, SlotOffset(slot));
     case Slot::CONTEXT: {
       int context_chain_length =
-          function_->scope()->ContextChainLength(slot->var()->scope());
+          scope()->ContextChainLength(slot->var()->scope());
       __ LoadContext(scratch, context_chain_length);
       return CodeGenerator::ContextOperand(scratch, slot->index());
     }
@@ -572,7 +574,7 @@
         // this specific context.
 
         // The variable in the decl always resides in the current context.
-        ASSERT_EQ(0, function_->scope()->ContextChainLength(var->scope()));
+        ASSERT_EQ(0, scope()->ContextChainLength(var->scope()));
         if (FLAG_debug_code) {
           // Check if we have the correct context pointer.
           __ ldr(r1,
@@ -652,7 +654,7 @@
   // Call the runtime to declare the globals.
   // The context is the first argument.
   __ mov(r1, Operand(pairs));
-  __ mov(r0, Operand(Smi::FromInt(is_eval_ ? 1 : 0)));
+  __ mov(r0, Operand(Smi::FromInt(is_eval() ? 1 : 0)));
   __ stm(db_w, sp, cp.bit() | r1.bit() | r0.bit());
   __ CallRuntime(Runtime::kDeclareGlobals, 3);
   // Return value is ignored.
@@ -664,7 +666,7 @@
 
   // Build the function boilerplate and instantiate it.
   Handle<JSFunction> boilerplate =
-      Compiler::BuildBoilerplate(expr, script_, this);
+      Compiler::BuildBoilerplate(expr, script(), this);
   if (HasStackOverflow()) return;
 
   ASSERT(boilerplate->IsBoilerplate());
diff --git a/src/arm/ic-arm.cc b/src/arm/ic-arm.cc
index bae1e96..19583a9 100644
--- a/src/arm/ic-arm.cc
+++ b/src/arm/ic-arm.cc
@@ -145,25 +145,6 @@
 }
 
 
-// Helper function used to check that a value is either not an object
-// or is loaded if it is an object.
-static void GenerateCheckNonObjectOrLoaded(MacroAssembler* masm,
-                                           Label* miss,
-                                           Register value,
-                                           Register scratch) {
-  Label done;
-  // Check if the value is a Smi.
-  __ tst(value, Operand(kSmiTagMask));
-  __ b(eq, &done);
-  // Check if the object has been loaded.
-  __ ldr(scratch, FieldMemOperand(value, JSObject::kMapOffset));
-  __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitField2Offset));
-  __ tst(scratch, Operand(1 << Map::kNeedsLoading));
-  __ b(ne, miss);
-  __ bind(&done);
-}
-
-
 void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- r2    : name
@@ -293,12 +274,6 @@
   __ CompareObjectType(r1, r0, r0, JS_FUNCTION_TYPE);
   __ b(ne, miss);
 
-  // Check that the function has been loaded.
-  __ ldr(r0, FieldMemOperand(r1, JSObject::kMapOffset));
-  __ ldrb(r0, FieldMemOperand(r0, Map::kBitField2Offset));
-  __ tst(r0, Operand(1 << Map::kNeedsLoading));
-  __ b(ne, miss);
-
   // Patch the receiver with the global proxy if necessary.
   if (is_global_object) {
     __ ldr(r2, MemOperand(sp, argc * kPointerSize));
@@ -472,7 +447,6 @@
 
   __ bind(&probe);
   GenerateDictionaryLoad(masm, &miss, r1, r0);
-  GenerateCheckNonObjectOrLoaded(masm, &miss, r0, r1);
   __ Ret();
 
   // Global object access: Check access rights.
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index b39404e..1f08c7c 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -196,7 +196,7 @@
 void MacroAssembler::LoadRoot(Register destination,
                               Heap::RootListIndex index,
                               Condition cond) {
-  ldr(destination, MemOperand(r10, index << kPointerSizeLog2), cond);
+  ldr(destination, MemOperand(roots, index << kPointerSizeLog2), cond);
 }
 
 
@@ -940,6 +940,113 @@
 }
 
 
+void MacroAssembler::AllocateTwoByteString(Register result,
+                                           Register length,
+                                           Register scratch1,
+                                           Register scratch2,
+                                           Register scratch3,
+                                           Label* gc_required) {
+  // Calculate the number of bytes needed for the characters in the string while
+  // observing object alignment.
+  ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+  mov(scratch1, Operand(length, LSL, 1));  // Length in bytes, not chars.
+  add(scratch1, scratch1,
+      Operand(kObjectAlignmentMask + SeqTwoByteString::kHeaderSize));
+  // AllocateInNewSpace expects the size in words, so we can round down
+  // to kObjectAlignment and divide by kPointerSize in the same shift.
+  ASSERT_EQ(kPointerSize, kObjectAlignmentMask + 1);
+  mov(scratch1, Operand(scratch1, ASR, kPointerSizeLog2));
+
+  // Allocate two-byte string in new space.
+  AllocateInNewSpace(scratch1,
+                     result,
+                     scratch2,
+                     scratch3,
+                     gc_required,
+                     TAG_OBJECT);
+
+  // Set the map, length and hash field.
+  LoadRoot(scratch1, Heap::kStringMapRootIndex);
+  str(length, FieldMemOperand(result, String::kLengthOffset));
+  str(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
+  mov(scratch2, Operand(String::kEmptyHashField));
+  str(scratch2, FieldMemOperand(result, String::kHashFieldOffset));
+}
+
+
+void MacroAssembler::AllocateAsciiString(Register result,
+                                         Register length,
+                                         Register scratch1,
+                                         Register scratch2,
+                                         Register scratch3,
+                                         Label* gc_required) {
+  // Calculate the number of bytes needed for the characters in the string while
+  // observing object alignment.
+  ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
+  ASSERT(kCharSize == 1);
+  add(scratch1, length,
+      Operand(kObjectAlignmentMask + SeqAsciiString::kHeaderSize));
+  // AllocateInNewSpace expects the size in words, so we can round down
+  // to kObjectAlignment and divide by kPointerSize in the same shift.
+  ASSERT_EQ(kPointerSize, kObjectAlignmentMask + 1);
+  mov(scratch1, Operand(scratch1, ASR, kPointerSizeLog2));
+
+  // Allocate ASCII string in new space.
+  AllocateInNewSpace(scratch1,
+                     result,
+                     scratch2,
+                     scratch3,
+                     gc_required,
+                     TAG_OBJECT);
+
+  // Set the map, length and hash field.
+  LoadRoot(scratch1, Heap::kAsciiStringMapRootIndex);
+  mov(scratch1, Operand(Factory::ascii_string_map()));
+  str(length, FieldMemOperand(result, String::kLengthOffset));
+  str(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
+  mov(scratch2, Operand(String::kEmptyHashField));
+  str(scratch2, FieldMemOperand(result, String::kHashFieldOffset));
+}
+
+
+void MacroAssembler::AllocateTwoByteConsString(Register result,
+                                               Register length,
+                                               Register scratch1,
+                                               Register scratch2,
+                                               Label* gc_required) {
+  AllocateInNewSpace(ConsString::kSize / kPointerSize,
+                     result,
+                     scratch1,
+                     scratch2,
+                     gc_required,
+                     TAG_OBJECT);
+  LoadRoot(scratch1, Heap::kConsStringMapRootIndex);
+  mov(scratch2, Operand(String::kEmptyHashField));
+  str(length, FieldMemOperand(result, String::kLengthOffset));
+  str(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
+  str(scratch2, FieldMemOperand(result, String::kHashFieldOffset));
+}
+
+
+void MacroAssembler::AllocateAsciiConsString(Register result,
+                                             Register length,
+                                             Register scratch1,
+                                             Register scratch2,
+                                             Label* gc_required) {
+  AllocateInNewSpace(ConsString::kSize / kPointerSize,
+                     result,
+                     scratch1,
+                     scratch2,
+                     gc_required,
+                     TAG_OBJECT);
+  LoadRoot(scratch1, Heap::kConsAsciiStringMapRootIndex);
+  mov(scratch2, Operand(String::kEmptyHashField));
+  str(length, FieldMemOperand(result, String::kLengthOffset));
+  str(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
+  str(scratch2, FieldMemOperand(result, String::kHashFieldOffset));
+}
+
+
 void MacroAssembler::CompareObjectType(Register function,
                                        Register map,
                                        Register type_reg,
@@ -957,6 +1064,21 @@
 }
 
 
+void MacroAssembler::CheckMap(Register obj,
+                              Register scratch,
+                              Handle<Map> map,
+                              Label* fail,
+                              bool is_heap_object) {
+  if (!is_heap_object) {
+    BranchOnSmi(obj, fail);
+  }
+  ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
+  mov(ip, Operand(map));
+  cmp(scratch, ip);
+  b(ne, fail);
+}
+
+
 void MacroAssembler::TryGetFunctionPrototype(Register function,
                                              Register result,
                                              Register scratch,
@@ -1010,10 +1132,17 @@
 }
 
 
+void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
+  ASSERT(allow_stub_calls());  // stub calls are not allowed in some stubs
+  Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
+}
+
+
 void MacroAssembler::StubReturn(int argc) {
   ASSERT(argc >= 1 && generating_stub());
-  if (argc > 1)
+  if (argc > 1) {
     add(sp, sp, Operand((argc - 1) * kPointerSize));
+  }
   Ret();
 }
 
@@ -1037,6 +1166,18 @@
 }
 
 
+void MacroAssembler::GetLeastBitsFromSmi(Register dst,
+                                         Register src,
+                                         int num_least_bits) {
+  if (CpuFeatures::IsSupported(ARMv7)) {
+    ubfx(dst, src, Operand(kSmiTagSize), Operand(num_least_bits - 1));
+  } else {
+    mov(dst, Operand(src, ASR, kSmiTagSize));
+    and_(dst, dst, Operand((1 << num_least_bits) - 1));
+  }
+}
+
+
 void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) {
   // All parameters are on the stack.  r0 has the return value after call.
 
@@ -1238,6 +1379,26 @@
 }
 
 
+void MacroAssembler::JumpIfNotBothSmi(Register reg1,
+                                      Register reg2,
+                                      Label* on_not_both_smi) {
+  ASSERT_EQ(0, kSmiTag);
+  tst(reg1, Operand(kSmiTagMask));
+  tst(reg2, Operand(kSmiTagMask), eq);
+  b(ne, on_not_both_smi);
+}
+
+
+void MacroAssembler::JumpIfEitherSmi(Register reg1,
+                                     Register reg2,
+                                     Label* on_either_smi) {
+  ASSERT_EQ(0, kSmiTag);
+  tst(reg1, Operand(kSmiTagMask));
+  tst(reg2, Operand(kSmiTagMask), ne);
+  b(eq, on_either_smi);
+}
+
+
 void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
     Register first,
     Register second,
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index efc5bfa..66ef4f9 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -33,10 +33,18 @@
 namespace v8 {
 namespace internal {
 
+// ----------------------------------------------------------------------------
+// Static helper functions
+
+// Generate a MemOperand for loading a field from an object.
+static inline MemOperand FieldMemOperand(Register object, int offset) {
+  return MemOperand(object, offset - kHeapObjectTag);
+}
+
 
 // Give alias names to registers
 const Register cp = { 8 };  // JavaScript context pointer
-
+const Register roots = { 10 };  // Roots array pointer.
 
 enum InvokeJSFlags {
   CALL_JS,
@@ -49,14 +57,7 @@
  public:
   MacroAssembler(void* buffer, int size);
 
-  // ---------------------------------------------------------------------------
-  // Low-level helpers for compiler
-
-  // Jump, Call, and Ret pseudo instructions implementing inter-working
- private:
-  void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
-  void Call(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
- public:
+  // Jump, Call, and Ret pseudo instructions implementing inter-working.
   void Jump(Register target, Condition cond = al);
   void Jump(byte* target, RelocInfo::Mode rmode, Condition cond = al);
   void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
@@ -209,6 +210,31 @@
   // allocation is undone.
   void UndoAllocationInNewSpace(Register object, Register scratch);
 
+
+  void AllocateTwoByteString(Register result,
+                             Register length,
+                             Register scratch1,
+                             Register scratch2,
+                             Register scratch3,
+                             Label* gc_required);
+  void AllocateAsciiString(Register result,
+                           Register length,
+                           Register scratch1,
+                           Register scratch2,
+                           Register scratch3,
+                           Label* gc_required);
+  void AllocateTwoByteConsString(Register result,
+                                 Register length,
+                                 Register scratch1,
+                                 Register scratch2,
+                                 Label* gc_required);
+  void AllocateAsciiConsString(Register result,
+                               Register length,
+                               Register scratch1,
+                               Register scratch2,
+                               Label* gc_required);
+
+
   // ---------------------------------------------------------------------------
   // Support functions.
 
@@ -243,6 +269,29 @@
                            Register type_reg,
                            InstanceType type);
 
+
+  // Check if the map of an object is equal to a specified map and
+  // branch to label if not. Skip the smi check if not required
+  // (object is known to be a heap object)
+  void CheckMap(Register obj,
+                Register scratch,
+                Handle<Map> map,
+                Label* fail,
+                bool is_heap_object);
+
+  // Load and check the instance type of an object for being a string.
+  // Loads the type into the second argument register.
+  // Returns a condition that will be enabled if the object was a string.
+  Condition IsObjectStringType(Register obj,
+                               Register type) {
+    ldr(type, FieldMemOperand(obj, HeapObject::kMapOffset));
+    ldrb(type, FieldMemOperand(type, Map::kInstanceTypeOffset));
+    tst(type, Operand(kIsNotStringMask));
+    ASSERT_EQ(0, kStringTag);
+    return eq;
+  }
+
+
   inline void BranchOnSmi(Register value, Label* smi_label) {
     tst(value, Operand(kSmiTagMask));
     b(eq, smi_label);
@@ -257,6 +306,9 @@
   // occurred.
   void IllegalOperation(int num_arguments);
 
+  // Get the number of least significant bits from a register
+  void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits);
+
   // Uses VFP instructions to Convert a Smi to a double.
   void IntegerToDoubleConversionWithVFP3(Register inReg,
                                          Register outHighReg,
@@ -269,6 +321,9 @@
   // Call a code stub.
   void CallStub(CodeStub* stub, Condition cond = al);
 
+  // Call a code stub.
+  void TailCallStub(CodeStub* stub, Condition cond = al);
+
   // Return from a code stub after popping its arguments.
   void StubReturn(int argc);
 
@@ -338,6 +393,14 @@
   bool allow_stub_calls() { return allow_stub_calls_; }
 
   // ---------------------------------------------------------------------------
+  // Smi utilities
+
+  // Jump if either of the registers contain a non-smi.
+  void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi);
+  // Jump if either of the registers contain a smi.
+  void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
+
+  // ---------------------------------------------------------------------------
   // String utilities
 
   // Checks if both objects are sequential ASCII strings and jumps to label
@@ -357,11 +420,8 @@
                                            Label* not_flat_ascii_strings);
 
  private:
-  List<Unresolved> unresolved_;
-  bool generating_stub_;
-  bool allow_stub_calls_;
-  Handle<Object> code_object_;  // This handle will be patched with the code
-                                // object on installation.
+  void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
+  void Call(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
 
   // Helper functions for generating invokes.
   void InvokePrologue(const ParameterCount& expected,
@@ -386,6 +446,12 @@
   // Activation support.
   void EnterFrame(StackFrame::Type type);
   void LeaveFrame(StackFrame::Type type);
+
+  List<Unresolved> unresolved_;
+  bool generating_stub_;
+  bool allow_stub_calls_;
+  // This handle will be patched with the code object on installation.
+  Handle<Object> code_object_;
 };
 
 
@@ -421,12 +487,6 @@
 // -----------------------------------------------------------------------------
 // Static helper functions.
 
-// Generate a MemOperand for loading a field from an object.
-static inline MemOperand FieldMemOperand(Register object, int offset) {
-  return MemOperand(object, offset - kHeapObjectTag);
-}
-
-
 #ifdef GENERATED_CODE_COVERAGE
 #define CODE_COVERAGE_STRINGIFY(x) #x
 #define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
diff --git a/src/arm/simulator-arm.cc b/src/arm/simulator-arm.cc
index f543151..cee5aea 100644
--- a/src/arm/simulator-arm.cc
+++ b/src/arm/simulator-arm.cc
@@ -1741,7 +1741,7 @@
 
 
 void Simulator::DecodeType3(Instr* instr) {
-  ASSERT(instr->Bit(4) == 0);
+  ASSERT(instr->Bits(6, 4) == 0x5 || instr->Bit(4) == 0);
   int rd = instr->RdField();
   int rn = instr->RnField();
   int32_t rn_val = get_register(rn);
@@ -1768,10 +1768,26 @@
       break;
     }
     case 3: {
-      // Format(instr, "'memop'cond'b 'rd, ['rn, +'shift_rm]'w");
-      addr = rn_val + shifter_operand;
-      if (instr->HasW()) {
-        set_register(rn, addr);
+      if (instr->HasW() && (instr->Bits(6, 4) == 0x5)) {
+        uint32_t widthminus1 = static_cast<uint32_t>(instr->Bits(20, 16));
+        uint32_t lsbit = static_cast<uint32_t>(instr->ShiftAmountField());
+        uint32_t msbit = widthminus1 + lsbit;
+        if (msbit <= 31) {
+          uint32_t rm_val =
+              static_cast<uint32_t>(get_register(instr->RmField()));
+          uint32_t extr_val = rm_val << (31 - msbit);
+          extr_val = extr_val >> (31 - widthminus1);
+          set_register(instr->RdField(), extr_val);
+        } else {
+          UNREACHABLE();
+        }
+        return;
+      } else {
+        // Format(instr, "'memop'cond'b 'rd, ['rn, +'shift_rm]'w");
+        addr = rn_val + shifter_operand;
+        if (instr->HasW()) {
+          set_register(rn, addr);
+        }
       }
       break;
     }
@@ -1785,7 +1801,8 @@
       uint8_t byte = ReadB(addr);
       set_register(rd, byte);
     } else {
-      UNIMPLEMENTED();
+      uint8_t byte = get_register(rd);
+      WriteB(addr, byte);
     }
   } else {
     if (instr->HasL()) {
diff --git a/src/assembler.h b/src/assembler.h
index ec47d57..942ce47 100644
--- a/src/assembler.h
+++ b/src/assembler.h
@@ -506,8 +506,10 @@
   return -(1 << (n-1)) <= x && x < (1 << (n-1));
 }
 
-static inline bool is_int24(int x)  { return is_intn(x, 24); }
 static inline bool is_int8(int x)  { return is_intn(x, 8); }
+static inline bool is_int16(int x)  { return is_intn(x, 16); }
+static inline bool is_int18(int x)  { return is_intn(x, 18); }
+static inline bool is_int24(int x)  { return is_intn(x, 24); }
 
 static inline bool is_uintn(int x, int n) {
   return (x & -(1 << n)) == 0;
@@ -519,9 +521,20 @@
 static inline bool is_uint5(int x)  { return is_uintn(x, 5); }
 static inline bool is_uint6(int x)  { return is_uintn(x, 6); }
 static inline bool is_uint8(int x)  { return is_uintn(x, 8); }
+static inline bool is_uint10(int x)  { return is_uintn(x, 10); }
 static inline bool is_uint12(int x)  { return is_uintn(x, 12); }
 static inline bool is_uint16(int x)  { return is_uintn(x, 16); }
 static inline bool is_uint24(int x)  { return is_uintn(x, 24); }
+static inline bool is_uint26(int x)  { return is_uintn(x, 26); }
+static inline bool is_uint28(int x)  { return is_uintn(x, 28); }
+
+static inline int NumberOfBitsSet(uint32_t x) {
+  unsigned int num_bits_set;
+  for (num_bits_set = 0; x; x >>= 1) {
+    num_bits_set += x & 1;
+  }
+  return num_bits_set;
+}
 
 } }  // namespace v8::internal
 
diff --git a/src/bootstrapper.cc b/src/bootstrapper.cc
index 78d0995..62edae5 100644
--- a/src/bootstrapper.cc
+++ b/src/bootstrapper.cc
@@ -91,7 +91,6 @@
   DISALLOW_COPY_AND_ASSIGN(SourceCodeCache);
 };
 
-static SourceCodeCache natives_cache(Script::TYPE_NATIVE);
 static SourceCodeCache extensions_cache(Script::TYPE_EXTENSION);
 // This is for delete, not delete[].
 static List<char*>* delete_these_non_arrays_on_tear_down = NULL;
@@ -134,20 +133,7 @@
 }
 
 
-bool Bootstrapper::NativesCacheLookup(Vector<const char> name,
-                                      Handle<JSFunction>* handle) {
-  return natives_cache.Lookup(name, handle);
-}
-
-
-void Bootstrapper::NativesCacheAdd(Vector<const char> name,
-                                   Handle<JSFunction> fun) {
-  natives_cache.Add(name, fun);
-}
-
-
 void Bootstrapper::Initialize(bool create_heap_objects) {
-  natives_cache.Initialize(create_heap_objects);
   extensions_cache.Initialize(create_heap_objects);
 }
 
@@ -187,8 +173,7 @@
     delete_these_arrays_on_tear_down = NULL;
   }
 
-  natives_cache.Initialize(false);  // Yes, symmetrical
-  extensions_cache.Initialize(false);
+  extensions_cache.Initialize(false);  // Yes, symmetrical
 }
 
 
@@ -307,17 +292,11 @@
   Genesis(Handle<Object> global_object,
           v8::Handle<v8::ObjectTemplate> global_template,
           v8::ExtensionConfiguration* extensions);
-  ~Genesis();
+  ~Genesis() { }
 
   Handle<Context> result() { return result_; }
 
   Genesis* previous() { return previous_; }
-  static Genesis* current() { return current_; }
-
-  // Support for thread preemption.
-  static int ArchiveSpacePerThread();
-  static char* ArchiveState(char* to);
-  static char* RestoreState(char* from);
 
  private:
   Handle<Context> global_context_;
@@ -326,21 +305,48 @@
   // triggered during environment creation there may be weak handle
   // processing callbacks which may create new environments.
   Genesis* previous_;
-  static Genesis* current_;
 
   Handle<Context> global_context() { return global_context_; }
 
-  void CreateRoots(v8::Handle<v8::ObjectTemplate> global_template,
-                   Handle<Object> global_object);
+  // Creates some basic objects. Used for creating a context from scratch.
+  void CreateRoots();
+  // Creates the empty function.  Used for creating a context from scratch.
+  Handle<JSFunction> CreateEmptyFunction();
+  // Creates the global objects using the global and the template passed in
+  // through the API.  We call this regardless of whether we are building a
+  // context from scratch or using a deserialized one from the partial snapshot
+  // but in the latter case we don't use the objects it produces directly, as
+  // we have to used the deserialized ones that are linked together with the
+  // rest of the context snapshot.
+  Handle<JSGlobalProxy> CreateNewGlobals(
+      v8::Handle<v8::ObjectTemplate> global_template,
+      Handle<Object> global_object,
+      Handle<GlobalObject>* global_proxy_out);
+  // Hooks the given global proxy into the context.  If the context was created
+  // by deserialization then this will unhook the global proxy that was
+  // deserialized, leaving the GC to pick it up.
+  void HookUpGlobalProxy(Handle<GlobalObject> inner_global,
+                         Handle<JSGlobalProxy> global_proxy);
+  // New context initialization.  Used for creating a context from scratch.
+  void InitializeGlobal(Handle<GlobalObject> inner_global,
+                        Handle<JSFunction> empty_function);
+  // Installs the contents of the native .js files on the global objects.
+  // Used for creating a context from scratch.
   void InstallNativeFunctions();
   bool InstallNatives();
-  bool InstallExtensions(v8::ExtensionConfiguration* extensions);
-  bool InstallExtension(const char* name);
-  bool InstallExtension(v8::RegisteredExtension* current);
-  bool InstallSpecialObjects();
+  // Used both for deserialized and from-scratch contexts to add the extensions
+  // provided.
+  static bool InstallExtensions(Handle<Context> global_context,
+                                v8::ExtensionConfiguration* extensions);
+  static bool InstallExtension(const char* name);
+  static bool InstallExtension(v8::RegisteredExtension* current);
+  static void InstallSpecialObjects(Handle<Context> global_context);
   bool ConfigureApiObject(Handle<JSObject> object,
                           Handle<ObjectTemplateInfo> object_template);
   bool ConfigureGlobalObjects(v8::Handle<v8::ObjectTemplate> global_template);
+  void TransferMapsToDeserializedGlobals(
+    Handle<GlobalObject> inner_global_outside_snapshot,
+    Handle<GlobalObject> inner_global_from_snapshot);
 
   // Migrates all properties from the 'from' object to the 'to'
   // object and overrides the prototype in 'to' with the one from
@@ -366,17 +372,17 @@
                                   Handle<String> source,
                                   SourceCodeCache* cache,
                                   v8::Extension* extension,
+                                  Handle<Context> top_context,
                                   bool use_runtime_context);
 
   Handle<Context> result_;
+  Handle<JSFunction> empty_function_;
+  BootstrapperActive active_;
+  friend class Bootstrapper;
 };
 
-Genesis* Genesis::current_ = NULL;
-
 
 void Bootstrapper::Iterate(ObjectVisitor* v) {
-  natives_cache.Iterate(v);
-  v->Synchronize("NativesCache");
   extensions_cache.Iterate(v);
   v->Synchronize("Extensions");
   PendingFixups::Iterate(v);
@@ -391,17 +397,20 @@
 }
 
 
-bool Bootstrapper::IsActive() {
-  return Genesis::current() != NULL;
-}
-
-
 Handle<Context> Bootstrapper::CreateEnvironment(
     Handle<Object> global_object,
     v8::Handle<v8::ObjectTemplate> global_template,
     v8::ExtensionConfiguration* extensions) {
+  HandleScope scope;
+  Handle<Context> env;
   Genesis genesis(global_object, global_template, extensions);
-  return genesis.result();
+  env = genesis.result();
+  if (!env.is_null()) {
+    if (InstallExtensions(env, extensions)) {
+      return env;
+    }
+  }
+  return Handle<Context>();
 }
 
 
@@ -423,12 +432,6 @@
 }
 
 
-Genesis::~Genesis() {
-  ASSERT(current_ == this);
-  current_ = previous_;
-}
-
-
 static Handle<JSFunction> InstallFunction(Handle<JSObject> target,
                                           const char* name,
                                           InstanceType type,
@@ -508,22 +511,7 @@
 }
 
 
-void Genesis::CreateRoots(v8::Handle<v8::ObjectTemplate> global_template,
-                          Handle<Object> global_object) {
-  HandleScope scope;
-  // Allocate the global context FixedArray first and then patch the
-  // closure and extension object later (we need the empty function
-  // and the global object, but in order to create those, we need the
-  // global context).
-  global_context_ =
-      Handle<Context>::cast(
-          GlobalHandles::Create(*Factory::NewGlobalContext()));
-  Top::set_context(*global_context());
-
-  // Allocate the message listeners object.
-  v8::NeanderArray listeners;
-  global_context()->set_message_listeners(*listeners.value());
-
+Handle<JSFunction> Genesis::CreateEmptyFunction() {
   // Allocate the map for function instances.
   Handle<Map> fm = Factory::NewMap(JS_FUNCTION_TYPE, JSFunction::kSize);
   global_context()->set_function_instance_map(*fm);
@@ -567,138 +555,174 @@
   Handle<JSFunction> empty_function =
       Factory::NewFunction(symbol, Factory::null_value());
 
-  {  // --- E m p t y ---
-    Handle<Code> code =
-        Handle<Code>(Builtins::builtin(Builtins::EmptyFunction));
-    empty_function->set_code(*code);
-    Handle<String> source = Factory::NewStringFromAscii(CStrVector("() {}"));
-    Handle<Script> script = Factory::NewScript(source);
-    script->set_type(Smi::FromInt(Script::TYPE_NATIVE));
-    empty_function->shared()->set_script(*script);
-    empty_function->shared()->set_start_position(0);
-    empty_function->shared()->set_end_position(source->length());
-    empty_function->shared()->DontAdaptArguments();
-    global_context()->function_map()->set_prototype(*empty_function);
-    global_context()->function_instance_map()->set_prototype(*empty_function);
+  // --- E m p t y ---
+  Handle<Code> code =
+      Handle<Code>(Builtins::builtin(Builtins::EmptyFunction));
+  empty_function->set_code(*code);
+  Handle<String> source = Factory::NewStringFromAscii(CStrVector("() {}"));
+  Handle<Script> script = Factory::NewScript(source);
+  script->set_type(Smi::FromInt(Script::TYPE_NATIVE));
+  empty_function->shared()->set_script(*script);
+  empty_function->shared()->set_start_position(0);
+  empty_function->shared()->set_end_position(source->length());
+  empty_function->shared()->DontAdaptArguments();
+  global_context()->function_map()->set_prototype(*empty_function);
+  global_context()->function_instance_map()->set_prototype(*empty_function);
 
-    // Allocate the function map first and then patch the prototype later
-    Handle<Map> empty_fm = Factory::CopyMapDropDescriptors(fm);
-    empty_fm->set_instance_descriptors(*function_map_descriptors);
-    empty_fm->set_prototype(global_context()->object_function()->prototype());
-    empty_function->set_map(*empty_fm);
+  // Allocate the function map first and then patch the prototype later
+  Handle<Map> empty_fm = Factory::CopyMapDropDescriptors(fm);
+  empty_fm->set_instance_descriptors(*function_map_descriptors);
+  empty_fm->set_prototype(global_context()->object_function()->prototype());
+  empty_function->set_map(*empty_fm);
+  return empty_function;
+}
+
+
+void Genesis::CreateRoots() {
+  // Allocate the global context FixedArray first and then patch the
+  // closure and extension object later (we need the empty function
+  // and the global object, but in order to create those, we need the
+  // global context).
+  global_context_ =
+      Handle<Context>::cast(
+          GlobalHandles::Create(*Factory::NewGlobalContext()));
+  Top::set_context(*global_context());
+
+  // Allocate the message listeners object.
+  {
+    v8::NeanderArray listeners;
+    global_context()->set_message_listeners(*listeners.value());
+  }
+}
+
+
+Handle<JSGlobalProxy> Genesis::CreateNewGlobals(
+    v8::Handle<v8::ObjectTemplate> global_template,
+    Handle<Object> global_object,
+    Handle<GlobalObject>* inner_global_out) {
+  // The argument global_template aka data is an ObjectTemplateInfo.
+  // It has a constructor pointer that points at global_constructor which is a
+  // FunctionTemplateInfo.
+  // The global_constructor is used to create or reinitialize the global_proxy.
+  // The global_constructor also has a prototype_template pointer that points at
+  // js_global_template which is an ObjectTemplateInfo.
+  // That in turn has a constructor pointer that points at
+  // js_global_constructor which is a FunctionTemplateInfo.
+  // js_global_constructor is used to make js_global_function
+  // js_global_function is used to make the new inner_global.
+  //
+  // --- G l o b a l ---
+  // Step 1: Create a fresh inner JSGlobalObject.
+  Handle<JSFunction> js_global_function;
+  Handle<ObjectTemplateInfo> js_global_template;
+  if (!global_template.IsEmpty()) {
+    // Get prototype template of the global_template.
+    Handle<ObjectTemplateInfo> data =
+        v8::Utils::OpenHandle(*global_template);
+    Handle<FunctionTemplateInfo> global_constructor =
+        Handle<FunctionTemplateInfo>(
+            FunctionTemplateInfo::cast(data->constructor()));
+    Handle<Object> proto_template(global_constructor->prototype_template());
+    if (!proto_template->IsUndefined()) {
+      js_global_template =
+          Handle<ObjectTemplateInfo>::cast(proto_template);
+    }
   }
 
-  {  // --- G l o b a l ---
-    // Step 1: create a fresh inner JSGlobalObject
-    Handle<GlobalObject> object;
-    {
-      Handle<JSFunction> js_global_function;
-      Handle<ObjectTemplateInfo> js_global_template;
-      if (!global_template.IsEmpty()) {
-        // Get prototype template of the global_template
-        Handle<ObjectTemplateInfo> data =
-            v8::Utils::OpenHandle(*global_template);
-        Handle<FunctionTemplateInfo> global_constructor =
-            Handle<FunctionTemplateInfo>(
-                FunctionTemplateInfo::cast(data->constructor()));
-        Handle<Object> proto_template(global_constructor->prototype_template());
-        if (!proto_template->IsUndefined()) {
-          js_global_template =
-              Handle<ObjectTemplateInfo>::cast(proto_template);
-        }
-      }
-
-      if (js_global_template.is_null()) {
-        Handle<String> name = Handle<String>(Heap::empty_symbol());
-        Handle<Code> code = Handle<Code>(Builtins::builtin(Builtins::Illegal));
-        js_global_function =
-            Factory::NewFunction(name, JS_GLOBAL_OBJECT_TYPE,
-                                 JSGlobalObject::kSize, code, true);
-        // Change the constructor property of the prototype of the
-        // hidden global function to refer to the Object function.
-        Handle<JSObject> prototype =
-            Handle<JSObject>(
-                JSObject::cast(js_global_function->instance_prototype()));
-        SetProperty(prototype, Factory::constructor_symbol(),
-                    Top::object_function(), NONE);
-      } else {
-        Handle<FunctionTemplateInfo> js_global_constructor(
-            FunctionTemplateInfo::cast(js_global_template->constructor()));
-        js_global_function =
-            Factory::CreateApiFunction(js_global_constructor,
-                                       Factory::InnerGlobalObject);
-      }
-
-      js_global_function->initial_map()->set_is_hidden_prototype();
-      object = Factory::NewGlobalObject(js_global_function);
-    }
-
-    // Set the global context for the global object.
-    object->set_global_context(*global_context());
-
-    // Step 2: create or re-initialize the global proxy object.
-    Handle<JSGlobalProxy> global_proxy;
-    {
-      Handle<JSFunction> global_proxy_function;
-      if (global_template.IsEmpty()) {
-        Handle<String> name = Handle<String>(Heap::empty_symbol());
-        Handle<Code> code = Handle<Code>(Builtins::builtin(Builtins::Illegal));
-        global_proxy_function =
-            Factory::NewFunction(name, JS_GLOBAL_PROXY_TYPE,
-                                 JSGlobalProxy::kSize, code, true);
-      } else {
-        Handle<ObjectTemplateInfo> data =
-            v8::Utils::OpenHandle(*global_template);
-        Handle<FunctionTemplateInfo> global_constructor(
-                FunctionTemplateInfo::cast(data->constructor()));
-        global_proxy_function =
-            Factory::CreateApiFunction(global_constructor,
-                                       Factory::OuterGlobalObject);
-      }
-
-      Handle<String> global_name = Factory::LookupAsciiSymbol("global");
-      global_proxy_function->shared()->set_instance_class_name(*global_name);
-      global_proxy_function->initial_map()->set_is_access_check_needed(true);
-
-      // Set global_proxy.__proto__ to js_global after ConfigureGlobalObjects
-
-      if (global_object.location() != NULL) {
-        ASSERT(global_object->IsJSGlobalProxy());
-        global_proxy =
-            ReinitializeJSGlobalProxy(
-                global_proxy_function,
-                Handle<JSGlobalProxy>::cast(global_object));
-      } else {
-        global_proxy = Handle<JSGlobalProxy>::cast(
-            Factory::NewJSObject(global_proxy_function, TENURED));
-      }
-
-      // Security setup: Set the security token of the global object to
-      // its the inner global. This makes the security check between two
-      // different contexts fail by default even in case of global
-      // object reinitialization.
-      object->set_global_receiver(*global_proxy);
-      global_proxy->set_context(*global_context());
-    }
-
-    {  // --- G l o b a l   C o n t e x t ---
-      // use the empty function as closure (no scope info)
-      global_context()->set_closure(*empty_function);
-      global_context()->set_fcontext(*global_context());
-      global_context()->set_previous(NULL);
-
-      // set extension and global object
-      global_context()->set_extension(*object);
-      global_context()->set_global(*object);
-      global_context()->set_global_proxy(*global_proxy);
-      // use inner global object as security token by default
-      global_context()->set_security_token(*object);
-    }
-
-    Handle<JSObject> global = Handle<JSObject>(global_context()->global());
-    SetProperty(global, object_name, Top::object_function(), DONT_ENUM);
+  if (js_global_template.is_null()) {
+    Handle<String> name = Handle<String>(Heap::empty_symbol());
+    Handle<Code> code = Handle<Code>(Builtins::builtin(Builtins::Illegal));
+    js_global_function =
+        Factory::NewFunction(name, JS_GLOBAL_OBJECT_TYPE,
+                             JSGlobalObject::kSize + 17 * kPointerSize, code, true);
+    // Change the constructor property of the prototype of the
+    // hidden global function to refer to the Object function.
+    Handle<JSObject> prototype =
+        Handle<JSObject>(
+            JSObject::cast(js_global_function->instance_prototype()));
+    SetProperty(prototype, Factory::constructor_symbol(),
+                Top::object_function(), NONE);
+  } else {
+    Handle<FunctionTemplateInfo> js_global_constructor(
+        FunctionTemplateInfo::cast(js_global_template->constructor()));
+    js_global_function =
+        Factory::CreateApiFunction(js_global_constructor,
+                                   Factory::InnerGlobalObject);
   }
 
+  js_global_function->initial_map()->set_is_hidden_prototype();
+  Handle<GlobalObject> inner_global =
+      Factory::NewGlobalObject(js_global_function);
+  if (inner_global_out != NULL) {
+    *inner_global_out = inner_global;
+  }
+
+  // Step 2: create or re-initialize the global proxy object.
+  Handle<JSFunction> global_proxy_function;
+  if (global_template.IsEmpty()) {
+    Handle<String> name = Handle<String>(Heap::empty_symbol());
+    Handle<Code> code = Handle<Code>(Builtins::builtin(Builtins::Illegal));
+    global_proxy_function =
+        Factory::NewFunction(name, JS_GLOBAL_PROXY_TYPE,
+                             JSGlobalProxy::kSize, code, true);
+  } else {
+    Handle<ObjectTemplateInfo> data =
+        v8::Utils::OpenHandle(*global_template);
+    Handle<FunctionTemplateInfo> global_constructor(
+            FunctionTemplateInfo::cast(data->constructor()));
+    global_proxy_function =
+        Factory::CreateApiFunction(global_constructor,
+                                   Factory::OuterGlobalObject);
+  }
+
+  Handle<String> global_name = Factory::LookupAsciiSymbol("global");
+  global_proxy_function->shared()->set_instance_class_name(*global_name);
+  global_proxy_function->initial_map()->set_is_access_check_needed(true);
+
+  // Set global_proxy.__proto__ to js_global after ConfigureGlobalObjects
+  // Return the global proxy.
+
+  if (global_object.location() != NULL) {
+    ASSERT(global_object->IsJSGlobalProxy());
+    return ReinitializeJSGlobalProxy(
+        global_proxy_function,
+        Handle<JSGlobalProxy>::cast(global_object));
+  } else {
+    return Handle<JSGlobalProxy>::cast(
+        Factory::NewJSObject(global_proxy_function, TENURED));
+  }
+}
+
+
+void Genesis::HookUpGlobalProxy(Handle<GlobalObject> inner_global,
+                                Handle<JSGlobalProxy> global_proxy) {
+  // Set the global context for the global object.
+  inner_global->set_global_context(*global_context());
+  inner_global->set_global_receiver(*global_proxy);
+  global_proxy->set_context(*global_context());
+  global_context()->set_global_proxy(*global_proxy);
+}
+
+
+void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
+                               Handle<JSFunction> empty_function) {
+  // --- G l o b a l   C o n t e x t ---
+  // Use the empty function as closure (no scope info).
+  global_context()->set_closure(*empty_function);
+  global_context()->set_fcontext(*global_context());
+  global_context()->set_previous(NULL);
+  // Set extension and global object.
+  global_context()->set_extension(*inner_global);
+  global_context()->set_global(*inner_global);
+  // Security setup: Set the security token of the global object to
+  // its the inner global. This makes the security check between two
+  // different contexts fail by default even in case of global
+  // object reinitialization.
+  global_context()->set_security_token(*inner_global);
+
+  Handle<String> object_name = Handle<String>(Heap::Object_symbol());
+  SetProperty(inner_global, object_name, Top::object_function(), DONT_ENUM);
+
   Handle<JSObject> global = Handle<JSObject>(global_context()->global());
 
   // Install global Function object
@@ -917,8 +941,12 @@
 #ifdef ENABLE_DEBUGGER_SUPPORT
   Debugger::set_compiling_natives(true);
 #endif
-  bool result =
-      CompileScriptCached(name, source, &natives_cache, NULL, true);
+  bool result = CompileScriptCached(name,
+                                    source,
+                                    NULL,
+                                    NULL,
+                                    Handle<Context>(Top::context()),
+                                    true);
   ASSERT(Top::has_pending_exception() != result);
   if (!result) Top::clear_pending_exception();
 #ifdef ENABLE_DEBUGGER_SUPPORT
@@ -932,29 +960,36 @@
                                   Handle<String> source,
                                   SourceCodeCache* cache,
                                   v8::Extension* extension,
+                                  Handle<Context> top_context,
                                   bool use_runtime_context) {
   HandleScope scope;
   Handle<JSFunction> boilerplate;
 
   // If we can't find the function in the cache, we compile a new
   // function and insert it into the cache.
-  if (!cache->Lookup(name, &boilerplate)) {
+  if (cache == NULL || !cache->Lookup(name, &boilerplate)) {
     ASSERT(source->IsAsciiRepresentation());
     Handle<String> script_name = Factory::NewStringFromUtf8(name);
-    boilerplate =
-        Compiler::Compile(source, script_name, 0, 0, extension, NULL);
+    boilerplate = Compiler::Compile(
+        source,
+        script_name,
+        0,
+        0,
+        extension,
+        NULL,
+        use_runtime_context ? NATIVES_CODE : NOT_NATIVES_CODE);
     if (boilerplate.is_null()) return false;
-    cache->Add(name, boilerplate);
+    if (cache != NULL) cache->Add(name, boilerplate);
   }
 
   // Setup the function context. Conceptually, we should clone the
   // function before overwriting the context but since we're in a
   // single-threaded environment it is not strictly necessary.
-  ASSERT(Top::context()->IsGlobalContext());
+  ASSERT(top_context->IsGlobalContext());
   Handle<Context> context =
       Handle<Context>(use_runtime_context
-                      ? Top::context()->runtime_context()
-                      : Top::context());
+                      ? Handle<Context>(top_context->runtime_context())
+                      : top_context);
   Handle<JSFunction> fun =
       Factory::NewFunctionFromBoilerplate(boilerplate, context);
 
@@ -962,14 +997,14 @@
   // object as the receiver. Provide no parameters.
   Handle<Object> receiver =
       Handle<Object>(use_runtime_context
-                     ? Top::context()->builtins()
-                     : Top::context()->global());
+                     ? top_context->builtins()
+                     : top_context->global());
   bool has_pending_exception;
   Handle<Object> result =
       Execution::Call(fun, receiver, 0, NULL, &has_pending_exception);
   if (has_pending_exception) return false;
   return PendingFixups::Process(
-      Handle<JSBuiltinsObject>(Top::context()->builtins()));
+      Handle<JSBuiltinsObject>(top_context->builtins()));
 }
 
 
@@ -1167,45 +1202,15 @@
     // Allocate the empty script.
     Handle<Script> script = Factory::NewScript(Factory::empty_string());
     script->set_type(Smi::FromInt(Script::TYPE_NATIVE));
-    global_context()->set_empty_script(*script);
+    Heap::public_set_empty_script(*script);
   }
 
-  if (FLAG_natives_file == NULL) {
-    // Without natives file, install default natives.
-    for (int i = Natives::GetDelayCount();
-         i < Natives::GetBuiltinsCount();
-         i++) {
-      if (!CompileBuiltin(i)) return false;
-    }
-
-    // Setup natives with lazy loading.
-    SetupLazy(Handle<JSFunction>(global_context()->date_function()),
-              Natives::GetIndex("date"),
-              Top::global_context(),
-              Handle<Context>(Top::context()->runtime_context()));
-    SetupLazy(Handle<JSFunction>(global_context()->regexp_function()),
-              Natives::GetIndex("regexp"),
-              Top::global_context(),
-              Handle<Context>(Top::context()->runtime_context()));
-    SetupLazy(Handle<JSObject>(global_context()->json_object()),
-              Natives::GetIndex("json"),
-              Top::global_context(),
-              Handle<Context>(Top::context()->runtime_context()));
-
-  } else if (strlen(FLAG_natives_file) != 0) {
-    // Otherwise install natives from natives file if file exists and
-    // compiles.
-    bool exists;
-    Vector<const char> source = ReadFile(FLAG_natives_file, &exists);
-    Handle<String> source_string = Factory::NewStringFromAscii(source);
-    if (source.is_empty()) return false;
-    bool result = CompileNative(CStrVector(FLAG_natives_file), source_string);
-    if (!result) return false;
-
-  } else {
-    // Empty natives file name - do not install any natives.
-    PrintF("Warning: Running without installed natives!\n");
-    return true;
+  // Install natives.
+  for (int i = Natives::GetDebuggerCount();
+       i < Natives::GetBuiltinsCount();
+       i++) {
+    Vector<const char> name = Natives::GetScriptName(i);
+    if (!CompileBuiltin(i)) return false;
   }
 
   InstallNativeFunctions();
@@ -1246,14 +1251,29 @@
 #ifdef DEBUG
   builtins->Verify();
 #endif
+
   return true;
 }
 
 
-bool Genesis::InstallSpecialObjects() {
+int BootstrapperActive::nesting_ = 0;
+
+
+bool Bootstrapper::InstallExtensions(Handle<Context> global_context,
+                                     v8::ExtensionConfiguration* extensions) {
+  BootstrapperActive active;
+  SaveContext saved_context;
+  Top::set_context(*global_context);
+  if (!Genesis::InstallExtensions(global_context, extensions)) return false;
+  Genesis::InstallSpecialObjects(global_context);
+  return true;
+}
+
+
+void Genesis::InstallSpecialObjects(Handle<Context> global_context) {
   HandleScope scope;
   Handle<JSGlobalObject> js_global(
-      JSGlobalObject::cast(global_context()->global()));
+      JSGlobalObject::cast(global_context->global()));
   // Expose the natives in global if a name for it is specified.
   if (FLAG_expose_natives_as != NULL && strlen(FLAG_expose_natives_as) != 0) {
     Handle<String> natives_string =
@@ -1276,13 +1296,12 @@
   if (FLAG_expose_debug_as != NULL && strlen(FLAG_expose_debug_as) != 0) {
     // If loading fails we just bail out without installing the
     // debugger but without tanking the whole context.
-    if (!Debug::Load())
-      return true;
+    if (!Debug::Load()) return;
     // Set the security token for the debugger context to the same as
     // the shell global context to allow calling between these (otherwise
     // exposing debug global object doesn't make much sense).
     Debug::debug_context()->set_security_token(
-        global_context()->security_token());
+        global_context->security_token());
 
     Handle<String> debug_string =
         Factory::LookupAsciiSymbol(FLAG_expose_debug_as);
@@ -1290,19 +1309,18 @@
         Handle<Object>(Debug::debug_context()->global_proxy()), DONT_ENUM);
   }
 #endif
-
-  return true;
 }
 
 
-bool Genesis::InstallExtensions(v8::ExtensionConfiguration* extensions) {
+bool Genesis::InstallExtensions(Handle<Context> global_context,
+                                v8::ExtensionConfiguration* extensions) {
   // Clear coloring of extension list
   v8::RegisteredExtension* current = v8::RegisteredExtension::first_extension();
   while (current != NULL) {
     current->set_state(v8::UNVISITED);
     current = current->next();
   }
-  // Install auto extensions
+  // Install auto extensions.
   current = v8::RegisteredExtension::first_extension();
   while (current != NULL) {
     if (current->extension()->auto_enable())
@@ -1366,7 +1384,9 @@
   Handle<String> source_code = Factory::NewStringFromAscii(source);
   bool result = CompileScriptCached(CStrVector(extension->name()),
                                     source_code,
-                                    &extensions_cache, extension,
+                                    &extensions_cache,
+                                    extension,
+                                    Handle<Context>(Top::context()),
                                     false);
   ASSERT(Top::has_pending_exception() != result);
   if (!result) {
@@ -1377,6 +1397,20 @@
 }
 
 
+void Genesis::TransferMapsToDeserializedGlobals(
+    Handle<GlobalObject> inner_global_outside_snapshot,
+    Handle<GlobalObject> inner_global_from_snapshot) {
+  Handle<Map> from_map(inner_global_outside_snapshot->map());
+#ifdef DEBUG
+  Handle<Map> to_map(inner_global_from_snapshot->map());
+  ASSERT_EQ(to_map->instance_size(), from_map->instance_size());
+  ASSERT_EQ(0, to_map->inobject_properties());
+  ASSERT_EQ(0, from_map->inobject_properties());
+#endif
+  inner_global_from_snapshot->set_map(*from_map);
+}
+
+
 bool Genesis::ConfigureGlobalObjects(
     v8::Handle<v8::ObjectTemplate> global_proxy_template) {
   Handle<JSObject> global_proxy(
@@ -1453,15 +1487,13 @@
           // If the property is already there we skip it
           if (result.IsValid()) continue;
           HandleScope inner;
-          Handle<DescriptorArray> inst_descs =
-              Handle<DescriptorArray>(to->map()->instance_descriptors());
+          ASSERT(!to->HasFastProperties());
+          // Add to dictionary.
           Handle<String> key = Handle<String>(descs->GetKey(i));
-          Handle<Object> entry = Handle<Object>(descs->GetCallbacksObject(i));
-          inst_descs = Factory::CopyAppendProxyDescriptor(inst_descs,
-                                                          key,
-                                                          entry,
-                                                          details.attributes());
-          to->map()->set_instance_descriptors(*inst_descs);
+          Handle<Object> callbacks(descs->GetCallbacksObject(i));
+          PropertyDetails d =
+              PropertyDetails(details.attributes(), CALLBACKS, details.index());
+          SetNormalizedProperty(to, key, callbacks, d);
           break;
         }
         case MAP_TRANSITION:
@@ -1597,33 +1629,56 @@
 Genesis::Genesis(Handle<Object> global_object,
                  v8::Handle<v8::ObjectTemplate> global_template,
                  v8::ExtensionConfiguration* extensions) {
-  // Link this genesis object into the stacked genesis chain. This
-  // must be done before any early exits because the destructor
-  // will always do unlinking.
-  previous_ = current_;
-  current_  = this;
   result_ = Handle<Context>::null();
-
   // If V8 isn't running and cannot be initialized, just return.
   if (!V8::IsRunning() && !V8::Initialize(NULL)) return;
 
   // Before creating the roots we must save the context and restore it
   // on all function exits.
   HandleScope scope;
-  SaveContext context;
+  SaveContext saved_context;
 
-  CreateRoots(global_template, global_object);
+  Handle<Context> new_context = Snapshot::NewContextFromSnapshot();
+  if (!new_context.is_null()) {
+    global_context_ =
+      Handle<Context>::cast(GlobalHandles::Create(*new_context));
+    Top::set_context(*global_context_);
+    i::Counters::contexts_created_by_snapshot.Increment();
+    result_ = global_context_;
+    JSFunction* empty_function =
+        JSFunction::cast(result_->function_map()->prototype());
+    empty_function_ = Handle<JSFunction>(empty_function);
+    Handle<GlobalObject> inner_global_outside_snapshot;
+    Handle<JSGlobalProxy> global_proxy =
+        CreateNewGlobals(global_template,
+                         global_object,
+                         &inner_global_outside_snapshot);
+    // CreateNewGlobals returns an inner global that it just made, but
+    // we won't give that to HookUpGlobalProxy because we want to hook
+    // up the global proxy to the one from the snapshot.
+    Handle<GlobalObject> inner_global(
+        GlobalObject::cast(global_context_->extension()));
+    HookUpGlobalProxy(inner_global, global_proxy);
+    TransferMapsToDeserializedGlobals(inner_global_outside_snapshot,
+                                      inner_global);
+    if (!ConfigureGlobalObjects(global_template)) return;
+  } else {
+    // We get here if there was no context snapshot.
+    CreateRoots();
+    Handle<JSFunction> empty_function = CreateEmptyFunction();
+    Handle<GlobalObject> inner_global;
+    Handle<JSGlobalProxy> global_proxy =
+        CreateNewGlobals(global_template, global_object, &inner_global);
+    HookUpGlobalProxy(inner_global, global_proxy);
+    InitializeGlobal(inner_global, empty_function);
+    if (!InstallNatives()) return;
 
-  if (!InstallNatives()) return;
+    MakeFunctionInstancePrototypeWritable();
+    BuildSpecialFunctionTable();
 
-  MakeFunctionInstancePrototypeWritable();
-  BuildSpecialFunctionTable();
-
-  if (!ConfigureGlobalObjects(global_template)) return;
-
-  if (!InstallExtensions(extensions)) return;
-
-  if (!InstallSpecialObjects()) return;
+    if (!ConfigureGlobalObjects(global_template)) return;
+    i::Counters::contexts_created_from_scratch.Increment();
+  }
 
   result_ = global_context_;
 }
@@ -1633,46 +1688,46 @@
 
 // Reserve space for statics needing saving and restoring.
 int Bootstrapper::ArchiveSpacePerThread() {
-  return Genesis::ArchiveSpacePerThread();
+  return BootstrapperActive::ArchiveSpacePerThread();
 }
 
 
 // Archive statics that are thread local.
 char* Bootstrapper::ArchiveState(char* to) {
-  return Genesis::ArchiveState(to);
+  return BootstrapperActive::ArchiveState(to);
 }
 
 
 // Restore statics that are thread local.
 char* Bootstrapper::RestoreState(char* from) {
-  return Genesis::RestoreState(from);
+  return BootstrapperActive::RestoreState(from);
 }
 
 
 // Called when the top-level V8 mutex is destroyed.
 void Bootstrapper::FreeThreadResources() {
-  ASSERT(Genesis::current() == NULL);
+  ASSERT(!BootstrapperActive::IsActive());
 }
 
 
 // Reserve space for statics needing saving and restoring.
-int Genesis::ArchiveSpacePerThread() {
-  return sizeof(current_);
+int BootstrapperActive::ArchiveSpacePerThread() {
+  return sizeof(nesting_);
 }
 
 
 // Archive statics that are thread local.
-char* Genesis::ArchiveState(char* to) {
-  *reinterpret_cast<Genesis**>(to) = current_;
-  current_ = NULL;
-  return to + sizeof(current_);
+char* BootstrapperActive::ArchiveState(char* to) {
+  *reinterpret_cast<int*>(to) = nesting_;
+  nesting_ = 0;
+  return to + sizeof(nesting_);
 }
 
 
 // Restore statics that are thread local.
-char* Genesis::RestoreState(char* from) {
-  current_ = *reinterpret_cast<Genesis**>(from);
-  return from + sizeof(current_);
+char* BootstrapperActive::RestoreState(char* from) {
+  nesting_ = *reinterpret_cast<int*>(from);
+  return from + sizeof(nesting_);
 }
 
 } }  // namespace v8::internal
diff --git a/src/bootstrapper.h b/src/bootstrapper.h
index 7cd3a2b..19fc39a 100644
--- a/src/bootstrapper.h
+++ b/src/bootstrapper.h
@@ -32,6 +32,24 @@
 namespace v8 {
 namespace internal {
 
+
+class BootstrapperActive BASE_EMBEDDED {
+ public:
+  BootstrapperActive() { nesting_++; }
+  ~BootstrapperActive() { nesting_--; }
+
+  // Support for thread preemption.
+  static int ArchiveSpacePerThread();
+  static char* ArchiveState(char* to);
+  static char* RestoreState(char* from);
+
+ private:
+  static bool IsActive() { return nesting_ != 0; }
+  static int nesting_;
+  friend class Bootstrapper;
+};
+
+
 // The Boostrapper is the public interface for creating a JavaScript global
 // context.
 class Bootstrapper : public AllStatic {
@@ -55,15 +73,12 @@
 
   // Accessors for the native scripts cache. Used in lazy loading.
   static Handle<String> NativesSourceLookup(int index);
-  static bool NativesCacheLookup(Vector<const char> name,
-                                 Handle<JSFunction>* handle);
-  static void NativesCacheAdd(Vector<const char> name, Handle<JSFunction> fun);
 
   // Append code that needs fixup at the end of boot strapping.
   static void AddFixup(Code* code, MacroAssembler* masm);
 
   // Tells whether bootstrapping is active.
-  static bool IsActive();
+  static bool IsActive() { return BootstrapperActive::IsActive(); }
 
   // Encoding/decoding support for fixup flags.
   class FixupFlagsUseCodeObject: public BitField<bool, 0, 1> {};
@@ -78,6 +93,10 @@
   // This will allocate a char array that is deleted when V8 is shut down.
   // It should only be used for strictly finite allocations.
   static char* AllocateAutoDeletedArray(int bytes);
+
+  // Used for new context creation.
+  static bool InstallExtensions(Handle<Context> global_context,
+                                v8::ExtensionConfiguration* extensions);
 };
 
 
diff --git a/src/code-stubs.cc b/src/code-stubs.cc
index 09581aa..95f0760 100644
--- a/src/code-stubs.cc
+++ b/src/code-stubs.cc
@@ -149,13 +149,16 @@
 }
 
 
-const char* CodeStub::MajorName(CodeStub::Major major_key) {
+const char* CodeStub::MajorName(CodeStub::Major major_key,
+                                bool allow_unknown_keys) {
   switch (major_key) {
 #define DEF_CASE(name) case name: return #name;
     CODE_STUB_LIST(DEF_CASE)
 #undef DEF_CASE
     default:
-      UNREACHABLE();
+      if (!allow_unknown_keys) {
+        UNREACHABLE();
+      }
       return NULL;
   }
 }
diff --git a/src/code-stubs.h b/src/code-stubs.h
index 16267f6..d502f14 100644
--- a/src/code-stubs.h
+++ b/src/code-stubs.h
@@ -100,7 +100,7 @@
   static int MinorKeyFromKey(uint32_t key) {
     return MinorKeyBits::decode(key);
   };
-  static const char* MajorName(Major major_key);
+  static const char* MajorName(Major major_key, bool allow_unknown_keys);
 
   virtual ~CodeStub() {}
 
@@ -138,7 +138,7 @@
   virtual InLoopFlag InLoop() { return NOT_IN_LOOP; }
 
   // Returns a name for logging/debugging purposes.
-  virtual const char* GetName() { return MajorName(MajorKey()); }
+  virtual const char* GetName() { return MajorName(MajorKey(), false); }
 
 #ifdef DEBUG
   virtual void Print() { PrintF("%s\n", GetName()); }
diff --git a/src/codegen-inl.h b/src/codegen-inl.h
index bee237d..da8cbf7 100644
--- a/src/codegen-inl.h
+++ b/src/codegen-inl.h
@@ -30,6 +30,7 @@
 #define V8_CODEGEN_INL_H_
 
 #include "codegen.h"
+#include "compiler.h"
 #include "register-allocator-inl.h"
 
 #if V8_TARGET_ARCH_IA32
@@ -38,6 +39,8 @@
 #include "x64/codegen-x64-inl.h"
 #elif V8_TARGET_ARCH_ARM
 #include "arm/codegen-arm-inl.h"
+#elif V8_TARGET_ARCH_MIPS
+#include "mips/codegen-mips-inl.h"
 #else
 #error Unsupported target architecture.
 #endif
@@ -46,42 +49,8 @@
 namespace v8 {
 namespace internal {
 
-#define __ ACCESS_MASM(masm_)
-
-// -----------------------------------------------------------------------------
-// Support for "structured" code comments.
-//
-// By selecting matching brackets in disassembler output,
-// code segments can be identified more easily.
-
-#ifdef DEBUG
-
-class Comment BASE_EMBEDDED {
- public:
-  Comment(MacroAssembler* masm, const char* msg) : masm_(masm), msg_(msg) {
-    __ RecordComment(msg);
-  }
-
-  ~Comment() {
-    if (msg_[0] == '[') __ RecordComment("]");
-  }
-
- private:
-  MacroAssembler* masm_;
-  const char* msg_;
-};
-
-#else
-
-class Comment BASE_EMBEDDED {
- public:
-  Comment(MacroAssembler*, const char*)  {}
-};
-
-#endif  // DEBUG
-
-#undef __
-
+Handle<Script> CodeGenerator::script() { return info_->script(); }
+bool CodeGenerator::is_eval() { return info_->is_eval(); }
 
 } }  // namespace v8::internal
 
diff --git a/src/codegen.cc b/src/codegen.cc
index 8822edd..24eb476 100644
--- a/src/codegen.cc
+++ b/src/codegen.cc
@@ -42,6 +42,24 @@
 namespace v8 {
 namespace internal {
 
+#define __ ACCESS_MASM(masm_)
+
+#ifdef DEBUG
+
+Comment::Comment(MacroAssembler* masm, const char* msg)
+    : masm_(masm), msg_(msg) {
+  __ RecordComment(msg);
+}
+
+
+Comment::~Comment() {
+  if (msg_[0] == '[') __ RecordComment("]");
+}
+
+#endif  // DEBUG
+
+#undef __
+
 
 CodeGenerator* CodeGeneratorScope::top_ = NULL;
 
@@ -126,7 +144,7 @@
 }
 
 
-void CodeGenerator::MakeCodePrologue(FunctionLiteral* fun) {
+void CodeGenerator::MakeCodePrologue(CompilationInfo* info) {
 #ifdef DEBUG
   bool print_source = false;
   bool print_ast = false;
@@ -147,34 +165,35 @@
 
   if (FLAG_trace_codegen || print_source || print_ast) {
     PrintF("*** Generate code for %s function: ", ftype);
-    fun->name()->ShortPrint();
+    info->function()->name()->ShortPrint();
     PrintF(" ***\n");
   }
 
   if (print_source) {
-    PrintF("--- Source from AST ---\n%s\n", PrettyPrinter().PrintProgram(fun));
+    PrintF("--- Source from AST ---\n%s\n",
+           PrettyPrinter().PrintProgram(info->function()));
   }
 
   if (print_ast) {
-    PrintF("--- AST ---\n%s\n", AstPrinter().PrintProgram(fun));
+    PrintF("--- AST ---\n%s\n",
+           AstPrinter().PrintProgram(info->function()));
   }
 
   if (print_json_ast) {
     JsonAstBuilder builder;
-    PrintF("%s", builder.BuildProgram(fun));
+    PrintF("%s", builder.BuildProgram(info->function()));
   }
 #endif  // DEBUG
 }
 
 
-Handle<Code> CodeGenerator::MakeCodeEpilogue(FunctionLiteral* fun,
-                                             MacroAssembler* masm,
+Handle<Code> CodeGenerator::MakeCodeEpilogue(MacroAssembler* masm,
                                              Code::Flags flags,
-                                             Handle<Script> script) {
+                                             CompilationInfo* info) {
   // Allocate and install the code.
   CodeDesc desc;
   masm->GetCode(&desc);
-  ZoneScopeInfo sinfo(fun->scope());
+  ZoneScopeInfo sinfo(info->scope());
   Handle<Code> code =
       Factory::NewCode(desc, &sinfo, flags, masm->CodeObject());
 
@@ -187,20 +206,23 @@
       : FLAG_print_code;
   if (print_code) {
     // Print the source code if available.
+    Handle<Script> script = info->script();
+    FunctionLiteral* function = info->function();
     if (!script->IsUndefined() && !script->source()->IsUndefined()) {
       PrintF("--- Raw source ---\n");
       StringInputBuffer stream(String::cast(script->source()));
-      stream.Seek(fun->start_position());
+      stream.Seek(function->start_position());
       // fun->end_position() points to the last character in the stream. We
       // need to compensate by adding one to calculate the length.
-      int source_len = fun->end_position() - fun->start_position() + 1;
+      int source_len =
+          function->end_position() - function->start_position() + 1;
       for (int i = 0; i < source_len; i++) {
         if (stream.has_more()) PrintF("%c", stream.GetNext());
       }
       PrintF("\n\n");
     }
     PrintF("--- Code ---\n");
-    code->Disassemble(*fun->name()->ToCString());
+    code->Disassemble(*function->name()->ToCString());
   }
 #endif  // ENABLE_DISASSEMBLER
 
@@ -214,21 +236,19 @@
 // Generate the code. Takes a function literal, generates code for it, assemble
 // all the pieces into a Code object. This function is only to be called by
 // the compiler.cc code.
-Handle<Code> CodeGenerator::MakeCode(FunctionLiteral* fun,
-                                     Handle<Script> script,
-                                     bool is_eval,
-                                     CompilationInfo* info) {
+Handle<Code> CodeGenerator::MakeCode(CompilationInfo* info) {
+  Handle<Script> script = info->script();
   if (!script->IsUndefined() && !script->source()->IsUndefined()) {
     int len = String::cast(script->source())->length();
     Counters::total_old_codegen_source_size.Increment(len);
   }
-  MakeCodePrologue(fun);
+  MakeCodePrologue(info);
   // Generate code.
   const int kInitialBufferSize = 4 * KB;
   MacroAssembler masm(NULL, kInitialBufferSize);
-  CodeGenerator cgen(&masm, script, is_eval);
+  CodeGenerator cgen(&masm);
   CodeGeneratorScope scope(&cgen);
-  cgen.Generate(fun, PRIMARY, info);
+  cgen.Generate(info, PRIMARY);
   if (cgen.HasStackOverflow()) {
     ASSERT(!Top::has_pending_exception());
     return Handle<Code>::null();
@@ -236,7 +256,7 @@
 
   InLoopFlag in_loop = (cgen.loop_nesting() != 0) ? IN_LOOP : NOT_IN_LOOP;
   Code::Flags flags = Code::ComputeFlags(Code::FUNCTION, in_loop);
-  return MakeCodeEpilogue(fun, cgen.masm(), flags, script);
+  return MakeCodeEpilogue(cgen.masm(), flags, info);
 }
 
 
diff --git a/src/codegen.h b/src/codegen.h
index d0be5f1..3afa041 100644
--- a/src/codegen.h
+++ b/src/codegen.h
@@ -86,6 +86,8 @@
 #include "x64/codegen-x64.h"
 #elif V8_TARGET_ARCH_ARM
 #include "arm/codegen-arm.h"
+#elif V8_TARGET_ARCH_MIPS
+#include "mips/codegen-mips.h"
 #else
 #error Unsupported target architecture.
 #endif
@@ -96,6 +98,29 @@
 namespace internal {
 
 
+// Support for "structured" code comments.
+#ifdef DEBUG
+
+class Comment BASE_EMBEDDED {
+ public:
+  Comment(MacroAssembler* masm, const char* msg);
+  ~Comment();
+
+ private:
+  MacroAssembler* masm_;
+  const char* msg_;
+};
+
+#else
+
+class Comment BASE_EMBEDDED {
+ public:
+  Comment(MacroAssembler*, const char*)  {}
+};
+
+#endif  // DEBUG
+
+
 // Code generation can be nested.  Code generation scopes form a stack
 // of active code generators.
 class CodeGeneratorScope BASE_EMBEDDED {
diff --git a/src/compilation-cache.cc b/src/compilation-cache.cc
index 5427367..d049d26 100644
--- a/src/compilation-cache.cc
+++ b/src/compilation-cache.cc
@@ -28,6 +28,7 @@
 #include "v8.h"
 
 #include "compilation-cache.h"
+#include "serialize.h"
 
 namespace v8 {
 namespace internal {
@@ -37,19 +38,12 @@
 static const int kSubCacheCount = 4;
 
 // The number of generations for each sub cache.
-#if defined(ANDROID)
-static const int kScriptGenerations = 1;
-static const int kEvalGlobalGenerations = 1;
-static const int kEvalContextualGenerations = 1;
-static const int kRegExpGenerations = 1;
-#else
 // The number of ScriptGenerations is carefully chosen based on histograms.
 // See issue 458: http://code.google.com/p/v8/issues/detail?id=458
 static const int kScriptGenerations = 5;
 static const int kEvalGlobalGenerations = 2;
 static const int kEvalContextualGenerations = 2;
 static const int kRegExpGenerations = 2;
-#endif
 
 // Initial size of each compilation cache table allocated.
 static const int kInitialCacheSize = 64;
diff --git a/src/compiler.cc b/src/compiler.cc
index a5e1e5c..6556d37 100755
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -43,15 +43,11 @@
 namespace internal {
 
 
-static Handle<Code> MakeCode(FunctionLiteral* literal,
-                             Handle<Script> script,
-                             Handle<Context> context,
-                             bool is_eval,
-                             CompilationInfo* info) {
-  ASSERT(literal != NULL);
-
+static Handle<Code> MakeCode(Handle<Context> context, CompilationInfo* info) {
+  FunctionLiteral* function = info->function();
+  ASSERT(function != NULL);
   // Rewrite the AST by introducing .result assignments where needed.
-  if (!Rewriter::Process(literal) || !AnalyzeVariableUsage(literal)) {
+  if (!Rewriter::Process(function) || !AnalyzeVariableUsage(function)) {
     // Signal a stack overflow by returning a null handle.  The stack
     // overflow exception will be thrown by the caller.
     return Handle<Code>::null();
@@ -62,7 +58,7 @@
     // the top scope only contains the single lazily compiled function,
     // so this doesn't re-allocate variables repeatedly.
     HistogramTimerScope timer(&Counters::variable_allocation);
-    Scope* top = literal->scope();
+    Scope* top = info->scope();
     while (top->outer_scope() != NULL) top = top->outer_scope();
     top->AllocateVariables(context);
   }
@@ -71,12 +67,12 @@
   if (Bootstrapper::IsActive() ?
       FLAG_print_builtin_scopes :
       FLAG_print_scopes) {
-    literal->scope()->Print();
+    info->scope()->Print();
   }
 #endif
 
   // Optimize the AST.
-  if (!Rewriter::Optimize(literal)) {
+  if (!Rewriter::Optimize(function)) {
     // Signal a stack overflow by returning a null handle.  The stack
     // overflow exception will be thrown by the caller.
     return Handle<Code>::null();
@@ -98,25 +94,25 @@
 
   Handle<SharedFunctionInfo> shared = info->shared_info();
   bool is_run_once = (shared.is_null())
-      ? literal->scope()->is_global_scope()
+      ? info->scope()->is_global_scope()
       : (shared->is_toplevel() || shared->try_full_codegen());
 
   if (FLAG_always_full_compiler || (FLAG_full_compiler && is_run_once)) {
     FullCodeGenSyntaxChecker checker;
-    checker.Check(literal);
+    checker.Check(function);
     if (checker.has_supported_syntax()) {
-      return FullCodeGenerator::MakeCode(literal, script, is_eval);
+      return FullCodeGenerator::MakeCode(info);
     }
   } else if (FLAG_always_fast_compiler ||
              (FLAG_fast_compiler && !is_run_once)) {
     FastCodeGenSyntaxChecker checker;
-    checker.Check(literal, info);
+    checker.Check(info);
     if (checker.has_supported_syntax()) {
-      return FastCodeGenerator::MakeCode(literal, script, is_eval, info);
+      return FastCodeGenerator::MakeCode(info);
     }
   }
 
-  return CodeGenerator::MakeCode(literal, script, is_eval, info);
+  return CodeGenerator::MakeCode(info);
 }
 
 
@@ -180,10 +176,8 @@
   HistogramTimerScope timer(rate);
 
   // Compile the code.
-  CompilationInfo info(Handle<SharedFunctionInfo>::null(),
-                       Handle<Object>::null(),  // No receiver.
-                       0);  // Not nested in a loop.
-  Handle<Code> code = MakeCode(lit, script, context, is_eval, &info);
+  CompilationInfo info(lit, script, is_eval);
+  Handle<Code> code = MakeCode(context, &info);
 
   // Check for stack-overflow exceptions.
   if (code.is_null()) {
@@ -243,7 +237,8 @@
                                      Handle<Object> script_name,
                                      int line_offset, int column_offset,
                                      v8::Extension* extension,
-                                     ScriptDataImpl* input_pre_data) {
+                                     ScriptDataImpl* input_pre_data,
+                                     NativesFlag natives) {
   int source_length = source->length();
   Counters::total_load_size.Increment(source_length);
   Counters::total_compile_size.Increment(source_length);
@@ -271,6 +266,9 @@
 
     // Create a script object describing the script to be compiled.
     Handle<Script> script = Factory::NewScript(source);
+    if (natives == NATIVES_CODE) {
+      script->set_type(Smi::FromInt(Script::TYPE_NATIVE));
+    }
     if (!script_name.is_null()) {
       script->set_name(*script_name);
       script->set_line_offset(Smi::FromInt(line_offset));
@@ -355,7 +353,6 @@
   // Compute name, source code and script data.
   Handle<SharedFunctionInfo> shared = info->shared_info();
   Handle<String> name(String::cast(shared->name()));
-  Handle<Script> script(Script::cast(shared->script()));
 
   int start_position = shared->start_position();
   int end_position = shared->end_position();
@@ -364,7 +361,8 @@
 
   // Generate the AST for the lazily compiled function. The AST may be
   // NULL in case of parser stack overflow.
-  FunctionLiteral* lit = MakeLazyAST(script, name,
+  FunctionLiteral* lit = MakeLazyAST(info->script(),
+                                     name,
                                      start_position,
                                      end_position,
                                      is_expression);
@@ -374,6 +372,7 @@
     ASSERT(Top::has_pending_exception());
     return false;
   }
+  info->set_function(lit);
 
   // Measure how long it takes to do the lazy compilation; only take
   // the rest of the function into account to avoid overlap with the
@@ -381,11 +380,7 @@
   HistogramTimerScope timer(&Counters::compile_lazy);
 
   // Compile the code.
-  Handle<Code> code = MakeCode(lit,
-                               script,
-                               Handle<Context>::null(),
-                               false,
-                               info);
+  Handle<Code> code = MakeCode(Handle<Context>::null(), info);
 
   // Check for stack-overflow exception.
   if (code.is_null()) {
@@ -394,28 +389,12 @@
   }
 
 #if defined ENABLE_LOGGING_AND_PROFILING || defined ENABLE_OPROFILE_AGENT
-  // Log the code generation. If source information is available include script
-  // name and line number. Check explicit whether logging is enabled as finding
-  // the line number is not for free.
-  if (Logger::is_logging() || OProfileAgent::is_enabled()) {
-    Handle<String> func_name(name->length() > 0 ?
-                             *name : shared->inferred_name());
-    if (script->name()->IsString()) {
-      int line_num = GetScriptLineNumber(script, start_position) + 1;
-      LOG(CodeCreateEvent(Logger::LAZY_COMPILE_TAG, *code, *func_name,
-                          String::cast(script->name()), line_num));
-      OProfileAgent::CreateNativeCodeRegion(*func_name,
-                                            String::cast(script->name()),
-                                            line_num,
-                                            code->instruction_start(),
-                                            code->instruction_size());
-    } else {
-      LOG(CodeCreateEvent(Logger::LAZY_COMPILE_TAG, *code, *func_name));
-      OProfileAgent::CreateNativeCodeRegion(*func_name,
-                                            code->instruction_start(),
-                                            code->instruction_size());
-    }
-  }
+  LogCodeCreateEvent(Logger::LAZY_COMPILE_TAG,
+                     name,
+                     Handle<String>(shared->inferred_name()),
+                     start_position,
+                     info->script(),
+                     code);
 #endif
 
   // Update the shared function info with the compiled code.
@@ -466,9 +445,7 @@
     // Generate code and return it.  The way that the compilation mode
     // is controlled by the command-line flags is described in
     // the static helper function MakeCode.
-    CompilationInfo info(Handle<SharedFunctionInfo>::null(),
-                         Handle<Object>::null(),  // No receiver.
-                         0);  // Not nested in a loop.
+    CompilationInfo info(literal, script, false);
 
     CHECK(!FLAG_always_full_compiler || !FLAG_always_fast_compiler);
     bool is_run_once = literal->try_full_codegen();
@@ -477,9 +454,7 @@
       FullCodeGenSyntaxChecker checker;
       checker.Check(literal);
       if (checker.has_supported_syntax()) {
-        code = FullCodeGenerator::MakeCode(literal,
-                                           script,
-                                           false);  // Not eval.
+        code = FullCodeGenerator::MakeCode(&info);
         is_compiled = true;
       }
     } else if (FLAG_always_fast_compiler ||
@@ -487,19 +462,16 @@
       // Since we are not lazily compiling we do not have a receiver to
       // specialize for.
       FastCodeGenSyntaxChecker checker;
-      checker.Check(literal, &info);
+      checker.Check(&info);
       if (checker.has_supported_syntax()) {
-        code = FastCodeGenerator::MakeCode(literal, script, false, &info);
+        code = FastCodeGenerator::MakeCode(&info);
         is_compiled = true;
       }
     }
 
     if (!is_compiled) {
       // We fall back to the classic V8 code generator.
-      code = CodeGenerator::MakeCode(literal,
-                                     script,
-                                     false,  // Not eval.
-                                     &info);
+      code = CodeGenerator::MakeCode(&info);
     }
 
     // Check for stack-overflow exception.
@@ -509,12 +481,14 @@
     }
 
     // Function compilation complete.
-    LOG(CodeCreateEvent(Logger::FUNCTION_TAG, *code, *literal->name()));
 
-#ifdef ENABLE_OPROFILE_AGENT
-    OProfileAgent::CreateNativeCodeRegion(*literal->name(),
-                                          code->instruction_start(),
-                                          code->instruction_size());
+#if defined ENABLE_LOGGING_AND_PROFILING || defined ENABLE_OPROFILE_AGENT
+    LogCodeCreateEvent(Logger::FUNCTION_TAG,
+                       literal->name(),
+                       literal->inferred_name(),
+                       literal->start_position(),
+                       script,
+                       code);
 #endif
   }
 
@@ -562,4 +536,35 @@
 }
 
 
+#if defined ENABLE_LOGGING_AND_PROFILING || defined ENABLE_OPROFILE_AGENT
+void Compiler::LogCodeCreateEvent(Logger::LogEventsAndTags tag,
+                                  Handle<String> name,
+                                  Handle<String> inferred_name,
+                                  int start_position,
+                                  Handle<Script> script,
+                                  Handle<Code> code) {
+  // Log the code generation. If source information is available
+  // include script name and line number. Check explicitly whether
+  // logging is enabled as finding the line number is not free.
+  if (Logger::is_logging() || OProfileAgent::is_enabled()) {
+    Handle<String> func_name(name->length() > 0 ? *name : *inferred_name);
+    if (script->name()->IsString()) {
+      int line_num = GetScriptLineNumber(script, start_position) + 1;
+      LOG(CodeCreateEvent(tag, *code, *func_name,
+                          String::cast(script->name()), line_num));
+      OProfileAgent::CreateNativeCodeRegion(*func_name,
+                                            String::cast(script->name()),
+                                            line_num,
+                                            code->instruction_start(),
+                                            code->instruction_size());
+    } else {
+      LOG(CodeCreateEvent(tag, *code, *func_name));
+      OProfileAgent::CreateNativeCodeRegion(*func_name,
+                                            code->instruction_start(),
+                                            code->instruction_size());
+    }
+  }
+}
+#endif
+
 } }  // namespace v8::internal
diff --git a/src/compiler.h b/src/compiler.h
index 19499de..88f4479 100644
--- a/src/compiler.h
+++ b/src/compiler.h
@@ -35,38 +35,127 @@
 namespace v8 {
 namespace internal {
 
-// CompilationInfo encapsulates some information known at compile time.
+// CompilationInfo encapsulates some information known at compile time.  It
+// is constructed based on the resources available at compile-time.
 class CompilationInfo BASE_EMBEDDED {
  public:
-  CompilationInfo(Handle<SharedFunctionInfo> shared_info,
-                  Handle<Object> receiver,
-                  int loop_nesting)
-      : shared_info_(shared_info),
-        receiver_(receiver),
+  // Lazy compilation of a JSFunction.
+  CompilationInfo(Handle<JSFunction> closure,
+                  int loop_nesting,
+                  Handle<Object> receiver)
+      : closure_(closure),
+        function_(NULL),
+        is_eval_(false),
         loop_nesting_(loop_nesting),
-        has_this_properties_(false),
-        has_globals_(false) {
+        receiver_(receiver) {
+    Initialize();
+    ASSERT(!closure_.is_null() &&
+           shared_info_.is_null() &&
+           script_.is_null());
   }
 
-  Handle<SharedFunctionInfo> shared_info() { return shared_info_; }
+  // Lazy compilation based on SharedFunctionInfo.
+  explicit CompilationInfo(Handle<SharedFunctionInfo> shared_info)
+      : shared_info_(shared_info),
+        function_(NULL),
+        is_eval_(false),
+        loop_nesting_(0) {
+    Initialize();
+    ASSERT(closure_.is_null() &&
+           !shared_info_.is_null() &&
+           script_.is_null());
+  }
 
+  // Eager compilation.
+  CompilationInfo(FunctionLiteral* literal, Handle<Script> script, bool is_eval)
+      : script_(script),
+        function_(literal),
+        is_eval_(is_eval),
+        loop_nesting_(0) {
+    Initialize();
+    ASSERT(closure_.is_null() &&
+           shared_info_.is_null() &&
+           !script_.is_null());
+  }
+
+  // We can only get a JSFunction if we actually have one.
+  Handle<JSFunction> closure() { return closure_; }
+
+  // We can get a SharedFunctionInfo from a JSFunction or if we actually
+  // have one.
+  Handle<SharedFunctionInfo> shared_info() {
+    if (!closure().is_null()) {
+      return Handle<SharedFunctionInfo>(closure()->shared());
+    } else {
+      return shared_info_;
+    }
+  }
+
+  // We can always get a script.  Either we have one or we can get a shared
+  // function info.
+  Handle<Script> script() {
+    if (!script_.is_null()) {
+      return script_;
+    } else {
+      ASSERT(shared_info()->script()->IsScript());
+      return Handle<Script>(Script::cast(shared_info()->script()));
+    }
+  }
+
+  // There should always be a function literal, but it may be set after
+  // construction (for lazy compilation).
+  FunctionLiteral* function() { return function_; }
+  void set_function(FunctionLiteral* literal) {
+    ASSERT(function_ == NULL);
+    function_ = literal;
+  }
+
+  // Simple accessors.
+  bool is_eval() { return is_eval_; }
+  int loop_nesting() { return loop_nesting_; }
   bool has_receiver() { return !receiver_.is_null(); }
   Handle<Object> receiver() { return receiver_; }
 
-  int loop_nesting() { return loop_nesting_; }
-
+  // Accessors for mutable fields, possibly set by analysis passes with
+  // default values given by Initialize.
   bool has_this_properties() { return has_this_properties_; }
   void set_has_this_properties(bool flag) { has_this_properties_ = flag; }
 
+  bool has_global_object() {
+    return !closure().is_null() && (closure()->context()->global() != NULL);
+  }
+
+  GlobalObject* global_object() {
+    return has_global_object() ? closure()->context()->global() : NULL;
+  }
+
   bool has_globals() { return has_globals_; }
   void set_has_globals(bool flag) { has_globals_ = flag; }
 
+  // Derived accessors.
+  Scope* scope() { return function()->scope(); }
+
  private:
+  void Initialize() {
+    has_this_properties_ = false;
+    has_globals_ = false;
+  }
+
+  Handle<JSFunction> closure_;
   Handle<SharedFunctionInfo> shared_info_;
-  Handle<Object> receiver_;
+  Handle<Script> script_;
+
+  FunctionLiteral* function_;
+
+  bool is_eval_;
   int loop_nesting_;
+
+  Handle<Object> receiver_;
+
   bool has_this_properties_;
   bool has_globals_;
+
+  DISALLOW_COPY_AND_ASSIGN(CompilationInfo);
 };
 
 
@@ -94,7 +183,8 @@
                                     Handle<Object> script_name,
                                     int line_offset, int column_offset,
                                     v8::Extension* extension,
-                                    ScriptDataImpl* script_Data);
+                                    ScriptDataImpl* script_Data,
+                                    NativesFlag is_natives_code);
 
   // Compile a String source within a context for Eval.
   static Handle<JSFunction> CompileEval(Handle<String> source,
@@ -119,6 +209,17 @@
                               FunctionLiteral* lit,
                               bool is_toplevel,
                               Handle<Script> script);
+
+ private:
+
+#if defined ENABLE_LOGGING_AND_PROFILING || defined ENABLE_OPROFILE_AGENT
+  static void LogCodeCreateEvent(Logger::LogEventsAndTags tag,
+                                 Handle<String> name,
+                                 Handle<String> inferred_name,
+                                 int start_position,
+                                 Handle<Script> script,
+                                 Handle<Code> code);
+#endif
 };
 
 
diff --git a/src/contexts.h b/src/contexts.h
index 66c1575..3feb6ce 100644
--- a/src/contexts.h
+++ b/src/contexts.h
@@ -94,7 +94,6 @@
   V(CALL_AS_FUNCTION_DELEGATE_INDEX, JSFunction, call_as_function_delegate) \
   V(CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, JSFunction, \
     call_as_constructor_delegate) \
-  V(EMPTY_SCRIPT_INDEX, Script, empty_script) \
   V(SCRIPT_FUNCTION_INDEX, JSFunction, script_function) \
   V(CONTEXT_EXTENSION_FUNCTION_INDEX, JSFunction, context_extension_function) \
   V(OUT_OF_MEMORY_INDEX, Object, out_of_memory) \
@@ -215,7 +214,6 @@
     RUNTIME_CONTEXT_INDEX,
     CALL_AS_FUNCTION_DELEGATE_INDEX,
     CALL_AS_CONSTRUCTOR_DELEGATE_INDEX,
-    EMPTY_SCRIPT_INDEX,
     SCRIPT_FUNCTION_INDEX,
     CONTEXT_EXTENSION_FUNCTION_INDEX,
     OUT_OF_MEMORY_INDEX,
diff --git a/src/data-flow.cc b/src/data-flow.cc
index 0e30b31..22ec66f 100644
--- a/src/data-flow.cc
+++ b/src/data-flow.cc
@@ -33,8 +33,9 @@
 namespace internal {
 
 
-void AstLabeler::Label(FunctionLiteral* fun) {
-  VisitStatements(fun->body());
+void AstLabeler::Label(CompilationInfo* info) {
+  info_ = info;
+  VisitStatements(info_->function()->body());
 }
 
 
@@ -162,6 +163,10 @@
 
 void AstLabeler::VisitVariableProxy(VariableProxy* expr) {
   expr->set_num(next_number_++);
+  Variable* var = expr->var();
+  if (var->is_global() && !var->is_this()) {
+    info_->set_has_globals(true);
+  }
 }
 
 
@@ -198,7 +203,7 @@
     ASSERT(prop->key()->IsPropertyName());
     VariableProxy* proxy = prop->obj()->AsVariableProxy();
     if (proxy != NULL && proxy->var()->is_this()) {
-      has_this_properties_ = true;
+      info()->set_has_this_properties(true);
     } else {
       Visit(prop->obj());
     }
diff --git a/src/data-flow.h b/src/data-flow.h
index ac83503..7c16d5d 100644
--- a/src/data-flow.h
+++ b/src/data-flow.h
@@ -29,7 +29,7 @@
 #define V8_DATAFLOW_H_
 
 #include "ast.h"
-#include "scopes.h"
+#include "compiler.h"
 
 namespace v8 {
 namespace internal {
@@ -38,13 +38,13 @@
 // their evaluation order (post-order left-to-right traversal).
 class AstLabeler: public AstVisitor {
  public:
-  AstLabeler() : next_number_(0), has_this_properties_(false) {}
+  AstLabeler() : next_number_(0) {}
 
-  void Label(FunctionLiteral* fun);
-
-  bool has_this_properties() { return has_this_properties_; }
+  void Label(CompilationInfo* info);
 
  private:
+  CompilationInfo* info() { return info_; }
+
   void VisitDeclarations(ZoneList<Declaration*>* decls);
   void VisitStatements(ZoneList<Statement*>* stmts);
 
@@ -56,7 +56,7 @@
   // Traversal number for labelling AST nodes.
   int next_number_;
 
-  bool has_this_properties_;
+  CompilationInfo* info_;
 
   DISALLOW_COPY_AND_ASSIGN(AstLabeler);
 };
diff --git a/src/date.js b/src/date.js
new file mode 100644
index 0000000..2ffd006
--- /dev/null
+++ b/src/date.js
@@ -0,0 +1,1139 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+// This file relies on the fact that the following declarations have been made
+// in v8natives.js:
+// const $isFinite = GlobalIsFinite;
+
+// -------------------------------------------------------------------
+
+// This file contains date support implemented in JavaScript.
+
+
+// Keep reference to original values of some global properties.  This
+// has the added benefit that the code in this file is isolated from
+// changes to these properties.
+const $Date = global.Date;
+
+// Helper function to throw error.
+function ThrowDateTypeError() {
+  throw new $TypeError('this is not a Date object.');
+}
+
+// ECMA 262 - 5.2
+function Modulo(value, remainder) {
+  var mod = value % remainder;
+  // Guard against returning -0.
+  if (mod == 0) return 0;
+  return mod >= 0 ? mod : mod + remainder;
+}
+
+
+function TimeWithinDay(time) {
+  return Modulo(time, msPerDay);
+}
+
+
+// ECMA 262 - 15.9.1.3
+function DaysInYear(year) {
+  if (year % 4 != 0) return 365;
+  if ((year % 100 == 0) && (year % 400 != 0)) return 365;
+  return 366;
+}
+
+
+function DayFromYear(year) {
+  return 365 * (year-1970)
+      + FLOOR((year-1969)/4)
+      - FLOOR((year-1901)/100)
+      + FLOOR((year-1601)/400);
+}
+
+
+function TimeFromYear(year) {
+  return msPerDay * DayFromYear(year);
+}
+
+
+function InLeapYear(time) {
+  return DaysInYear(YEAR_FROM_TIME(time)) == 366 ? 1 : 0;
+}
+
+
+function DayWithinYear(time) {
+  return DAY(time) - DayFromYear(YEAR_FROM_TIME(time));
+}
+
+
+// ECMA 262 - 15.9.1.9
+function EquivalentYear(year) {
+  // Returns an equivalent year in the range [2008-2035] matching
+  // - leap year.
+  // - week day of first day.
+  var time = TimeFromYear(year);
+  var recent_year = (InLeapYear(time) == 0 ? 1967 : 1956) +
+      (WeekDay(time) * 12) % 28;
+  // Find the year in the range 2008..2037 that is equivalent mod 28.
+  // Add 3*28 to give a positive argument to the modulus operator.
+  return 2008 + (recent_year + 3*28 - 2008) % 28;
+}
+
+
+function EquivalentTime(t) {
+  // The issue here is that some library calls don't work right for dates
+  // that cannot be represented using a non-negative signed 32 bit integer
+  // (measured in whole seconds based on the 1970 epoch).
+  // We solve this by mapping the time to a year with same leap-year-ness
+  // and same starting day for the year.  The ECMAscript specification says
+  // we must do this, but for compatibility with other browsers, we use
+  // the actual year if it is in the range 1970..2037
+  if (t >= 0 && t <= 2.1e12) return t;
+  var day = MakeDay(EquivalentYear(YEAR_FROM_TIME(t)), MONTH_FROM_TIME(t), DATE_FROM_TIME(t));
+  return TimeClip(MakeDate(day, TimeWithinDay(t)));
+}
+
+
+// Because computing the DST offset is a pretty expensive operation
+// we keep a cache of last computed offset along with a time interval
+// where we know the cache is valid.
+var DST_offset_cache = {
+  // Cached DST offset.
+  offset: 0,
+  // Time interval where the cached offset is valid.
+  start: 0, end: -1,
+  // Size of next interval expansion.
+  increment: 0
+};
+
+
+// NOTE: The implementation relies on the fact that no time zones have
+// more than one daylight savings offset change per month.
+// If this function is called with NaN it returns NaN.
+function DaylightSavingsOffset(t) {
+  // Load the cache object from the builtins object.
+  var cache = DST_offset_cache;
+
+  // Cache the start and the end in local variables for fast access.
+  var start = cache.start;
+  var end = cache.end;
+
+  if (start <= t) {
+    // If the time fits in the cached interval, return the cached offset.
+    if (t <= end) return cache.offset;
+
+    // Compute a possible new interval end.
+    var new_end = end + cache.increment;
+
+    if (t <= new_end) {
+      var end_offset = %DateDaylightSavingsOffset(EquivalentTime(new_end));
+      if (cache.offset == end_offset) {
+        // If the offset at the end of the new interval still matches
+        // the offset in the cache, we grow the cached time interval
+        // and return the offset.
+        cache.end = new_end;
+        cache.increment = msPerMonth;
+        return end_offset;
+      } else {
+        var offset = %DateDaylightSavingsOffset(EquivalentTime(t));
+        if (offset == end_offset) {
+          // The offset at the given time is equal to the offset at the
+          // new end of the interval, so that means that we've just skipped
+          // the point in time where the DST offset change occurred. Updated
+          // the interval to reflect this and reset the increment.
+          cache.start = t;
+          cache.end = new_end;
+          cache.increment = msPerMonth;
+        } else {
+          // The interval contains a DST offset change and the given time is
+          // before it. Adjust the increment to avoid a linear search for
+          // the offset change point and change the end of the interval.
+          cache.increment /= 3;
+          cache.end = t;
+        }
+        // Update the offset in the cache and return it.
+        cache.offset = offset;
+        return offset;
+      }
+    }
+  }
+
+  // Compute the DST offset for the time and shrink the cache interval
+  // to only contain the time. This allows fast repeated DST offset
+  // computations for the same time.
+  var offset = %DateDaylightSavingsOffset(EquivalentTime(t));
+  cache.offset = offset;
+  cache.start = cache.end = t;
+  cache.increment = msPerMonth;
+  return offset;
+}
+
+
+var timezone_cache_time = $NaN;
+var timezone_cache_timezone;
+
+function LocalTimezone(t) {
+  if (NUMBER_IS_NAN(t)) return "";
+  if (t == timezone_cache_time) {
+    return timezone_cache_timezone;
+  }
+  var timezone = %DateLocalTimezone(EquivalentTime(t));
+  timezone_cache_time = t;
+  timezone_cache_timezone = timezone;
+  return timezone;
+}
+
+
+function WeekDay(time) {
+  return Modulo(DAY(time) + 4, 7);
+}
+
+var local_time_offset = %DateLocalTimeOffset();
+
+function LocalTime(time) {
+  if (NUMBER_IS_NAN(time)) return time;
+  return time + local_time_offset + DaylightSavingsOffset(time);
+}
+
+function LocalTimeNoCheck(time) {
+  // Inline the DST offset cache checks for speed.
+  var cache = DST_offset_cache;
+  if (cache.start <= time && time <= cache.end) {
+    var dst_offset = cache.offset;
+  } else {
+    var dst_offset = DaylightSavingsOffset(time);
+  }
+  return time + local_time_offset + dst_offset;
+}
+
+
+function UTC(time) {
+  if (NUMBER_IS_NAN(time)) return time;
+  var tmp = time - local_time_offset;
+  return tmp - DaylightSavingsOffset(tmp);
+}
+
+
+// ECMA 262 - 15.9.1.11
+function MakeTime(hour, min, sec, ms) {
+  if (!$isFinite(hour)) return $NaN;
+  if (!$isFinite(min)) return $NaN;
+  if (!$isFinite(sec)) return $NaN;
+  if (!$isFinite(ms)) return $NaN;
+  return TO_INTEGER(hour) * msPerHour
+      + TO_INTEGER(min) * msPerMinute
+      + TO_INTEGER(sec) * msPerSecond
+      + TO_INTEGER(ms);
+}
+
+
+// ECMA 262 - 15.9.1.12
+function TimeInYear(year) {
+  return DaysInYear(year) * msPerDay;
+}
+
+
+// Compute modified Julian day from year, month, date.
+function ToJulianDay(year, month, date) {
+  var jy = (month > 1) ? year : year - 1;
+  var jm = (month > 1) ? month + 2 : month + 14;
+  var ja = FLOOR(jy / 100);
+  return FLOOR(FLOOR(365.25*jy) + FLOOR(30.6001*jm) + date + 1720995) + 2 - ja + FLOOR(0.25*ja);
+}
+
+var four_year_cycle_table = CalculateDateTable();
+
+
+function CalculateDateTable() {
+  var month_lengths = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31];
+  var four_year_cycle_table = new $Array(1461);
+
+  var cumulative = 0;
+  var position = 0;
+  var leap_position = 0;
+  for (var month = 0; month < 12; month++) {
+    var month_bits = month << kMonthShift;
+    var length = month_lengths[month];
+    for (var day = 1; day <= length; day++) {
+      four_year_cycle_table[leap_position] =
+        month_bits + day;
+      four_year_cycle_table[366 + position] =
+        (1 << kYearShift) + month_bits + day;
+      four_year_cycle_table[731 + position] =
+        (2 << kYearShift) + month_bits + day;
+      four_year_cycle_table[1096 + position] =
+        (3 << kYearShift) + month_bits + day;
+      leap_position++;
+      position++;
+    }
+    if (month == 1) {
+      four_year_cycle_table[leap_position++] = month_bits + 29;
+    }
+  }
+  return four_year_cycle_table;
+}
+
+
+// Constructor for creating objects holding year, month, and date.
+// Introduced to ensure the two return points in FromJulianDay match same map.
+function DayTriplet(year, month, date) {
+  this.year = year;
+  this.month = month;
+  this.date = date;
+}
+
+var julian_day_cache_triplet;
+var julian_day_cache_day = $NaN;
+
+// Compute year, month, and day from modified Julian day.
+// The missing days in 1582 are ignored for JavaScript compatibility.
+function FromJulianDay(julian) {
+  if (julian_day_cache_day == julian) {
+    return julian_day_cache_triplet;
+  }
+  var result;
+  // Avoid floating point and non-Smi maths in common case.  This is also a period of
+  // time where leap years are very regular.  The range is not too large to avoid overflow
+  // when doing the multiply-to-divide trick.
+  if (julian > kDayZeroInJulianDay &&
+      (julian - kDayZeroInJulianDay) < 40177) { // 1970 - 2080
+    var jsimple = (julian - kDayZeroInJulianDay) + 731; // Day 0 is 1st January 1968
+    var y = 1968;
+    // Divide by 1461 by multiplying with 22967 and shifting down by 25!
+    var after_1968 = (jsimple * 22967) >> 25;
+    y += after_1968 << 2;
+    jsimple -= 1461 * after_1968;
+    var four_year_cycle = four_year_cycle_table[jsimple];
+    result = new DayTriplet(y + (four_year_cycle >> kYearShift),
+                            (four_year_cycle & kMonthMask) >> kMonthShift,
+                            four_year_cycle & kDayMask);
+  } else {
+    var jalpha = FLOOR((julian - 1867216.25) / 36524.25);
+    var jb = julian + 1 + jalpha - FLOOR(0.25 * jalpha) + 1524;
+    var jc = FLOOR(6680.0 + ((jb-2439870) - 122.1)/365.25);
+    var jd = FLOOR(365 * jc + (0.25 * jc));
+    var je = FLOOR((jb - jd)/30.6001);
+    var m = je - 1;
+    if (m > 12) m -= 13;
+    var y = jc - 4715;
+    if (m > 2) { --y; --m; }
+    var d = jb - jd - FLOOR(30.6001 * je);
+    result = new DayTriplet(y, m, d);
+  }
+  julian_day_cache_day = julian;
+  julian_day_cache_triplet = result;
+  return result;
+}
+
+
+// Compute number of days given a year, month, date.
+// Note that month and date can lie outside the normal range.
+//   For example:
+//     MakeDay(2007, -4, 20) --> MakeDay(2006, 8, 20)
+//     MakeDay(2007, -33, 1) --> MakeDay(2004, 3, 1)
+//     MakeDay(2007, 14, -50) --> MakeDay(2007, 8, 11)
+function MakeDay(year, month, date) {
+  if (!$isFinite(year) || !$isFinite(month) || !$isFinite(date)) return $NaN;
+
+  // Conversion to integers.
+  year = TO_INTEGER(year);
+  month = TO_INTEGER(month);
+  date = TO_INTEGER(date);
+
+  // Overflow months into year.
+  year = year + FLOOR(month/12);
+  month = month % 12;
+  if (month < 0) {
+    month += 12;
+  }
+
+  // Return days relative to Jan 1 1970.
+  return ToJulianDay(year, month, date) - kDayZeroInJulianDay;
+}
+
+
+// ECMA 262 - 15.9.1.13
+function MakeDate(day, time) {
+  if (!$isFinite(day)) return $NaN;
+  if (!$isFinite(time)) return $NaN;
+  return day * msPerDay + time;
+}
+
+
+// ECMA 262 - 15.9.1.14
+function TimeClip(time) {
+  if (!$isFinite(time)) return $NaN;
+  if ($abs(time) > 8.64E15) return $NaN;
+  return TO_INTEGER(time);
+}
+
+
+// The Date cache is used to limit the cost of parsing the same Date
+// strings over and over again.
+var Date_cache = {
+  // Cached time value.
+  time: $NaN,
+  // Cached year when interpreting the time as a local time. Only
+  // valid when the time matches cached time.
+  year: $NaN,
+  // String input for which the cached time is valid.
+  string: null
+};
+
+
+%SetCode($Date, function(year, month, date, hours, minutes, seconds, ms) {
+  if (!%_IsConstructCall()) {
+    // ECMA 262 - 15.9.2
+    return (new $Date()).toString();
+  }
+
+  // ECMA 262 - 15.9.3
+  var argc = %_ArgumentsLength();
+  var value;
+  if (argc == 0) {
+    value = %DateCurrentTime();
+
+  } else if (argc == 1) {
+    if (IS_NUMBER(year)) {
+      value = TimeClip(year);
+
+    } else if (IS_STRING(year)) {
+      // Probe the Date cache. If we already have a time value for the
+      // given time, we re-use that instead of parsing the string again.
+      var cache = Date_cache;
+      if (cache.string === year) {
+        value = cache.time;
+      } else {
+        value = DateParse(year);
+        if (!NUMBER_IS_NAN(value)) {
+          cache.time = value;
+          cache.year = YEAR_FROM_TIME(LocalTimeNoCheck(value));
+          cache.string = year;
+        }
+      }
+
+    } else {
+      // According to ECMA 262, no hint should be given for this
+      // conversion. However, ToPrimitive defaults to STRING_HINT for
+      // Date objects which will lose precision when the Date
+      // constructor is called with another Date object as its
+      // argument. We therefore use NUMBER_HINT for the conversion,
+      // which is the default for everything else than Date objects.
+      // This makes us behave like KJS and SpiderMonkey.
+      var time = ToPrimitive(year, NUMBER_HINT);
+      value = IS_STRING(time) ? DateParse(time) : TimeClip(ToNumber(time));
+    }
+
+  } else {
+    year = ToNumber(year);
+    month = ToNumber(month);
+    date = argc > 2 ? ToNumber(date) : 1;
+    hours = argc > 3 ? ToNumber(hours) : 0;
+    minutes = argc > 4 ? ToNumber(minutes) : 0;
+    seconds = argc > 5 ? ToNumber(seconds) : 0;
+    ms = argc > 6 ? ToNumber(ms) : 0;
+    year = (!NUMBER_IS_NAN(year) && 0 <= TO_INTEGER(year) && TO_INTEGER(year) <= 99)
+        ? 1900 + TO_INTEGER(year) : year;
+    var day = MakeDay(year, month, date);
+    var time = MakeTime(hours, minutes, seconds, ms);
+    value = TimeClip(UTC(MakeDate(day, time)));
+  }
+  %_SetValueOf(this, value);
+});
+
+
+// Helper functions.
+function GetTimeFrom(aDate) {
+  return DATE_VALUE(aDate);
+}
+
+function GetMillisecondsFrom(aDate) {
+  var t = DATE_VALUE(aDate);
+  if (NUMBER_IS_NAN(t)) return t;
+  return MS_FROM_TIME(LocalTimeNoCheck(t));
+}
+
+
+function GetUTCMillisecondsFrom(aDate) {
+  var t = DATE_VALUE(aDate);
+  if (NUMBER_IS_NAN(t)) return t;
+  return MS_FROM_TIME(t);
+}
+
+
+function GetSecondsFrom(aDate) {
+  var t = DATE_VALUE(aDate);
+  if (NUMBER_IS_NAN(t)) return t;
+  return SEC_FROM_TIME(LocalTimeNoCheck(t));
+}
+
+
+function GetUTCSecondsFrom(aDate) {
+  var t = DATE_VALUE(aDate);
+  if (NUMBER_IS_NAN(t)) return t;
+  return SEC_FROM_TIME(t);
+}
+
+
+function GetMinutesFrom(aDate) {
+  var t = DATE_VALUE(aDate);
+  if (NUMBER_IS_NAN(t)) return t;
+  return MIN_FROM_TIME(LocalTimeNoCheck(t));
+}
+
+
+function GetUTCMinutesFrom(aDate) {
+  var t = DATE_VALUE(aDate);
+  if (NUMBER_IS_NAN(t)) return t;
+  return MIN_FROM_TIME(t);
+}
+
+
+function GetHoursFrom(aDate) {
+  var t = DATE_VALUE(aDate);
+  if (NUMBER_IS_NAN(t)) return t;
+  return HOUR_FROM_TIME(LocalTimeNoCheck(t));
+}
+
+
+function GetUTCHoursFrom(aDate) {
+  var t = DATE_VALUE(aDate);
+  if (NUMBER_IS_NAN(t)) return t;
+  return HOUR_FROM_TIME(t);
+}
+
+
+function GetFullYearFrom(aDate) {
+  var t = DATE_VALUE(aDate);
+  if (NUMBER_IS_NAN(t)) return t;
+  var cache = Date_cache;
+  if (cache.time === t) return cache.year;
+  return YEAR_FROM_TIME(LocalTimeNoCheck(t));
+}
+
+
+function GetUTCFullYearFrom(aDate) {
+  var t = DATE_VALUE(aDate);
+  if (NUMBER_IS_NAN(t)) return t;
+  return YEAR_FROM_TIME(t);
+}
+
+
+function GetMonthFrom(aDate) {
+  var t = DATE_VALUE(aDate);
+  if (NUMBER_IS_NAN(t)) return t;
+  return MONTH_FROM_TIME(LocalTimeNoCheck(t));
+}
+
+
+function GetUTCMonthFrom(aDate) {
+  var t = DATE_VALUE(aDate);
+  if (NUMBER_IS_NAN(t)) return t;
+  return MONTH_FROM_TIME(t);
+}
+
+
+function GetDateFrom(aDate) {
+  var t = DATE_VALUE(aDate);
+  if (NUMBER_IS_NAN(t)) return t;
+  return DATE_FROM_TIME(LocalTimeNoCheck(t));
+}
+
+
+function GetUTCDateFrom(aDate) {
+  var t = DATE_VALUE(aDate);
+  if (NUMBER_IS_NAN(t)) return t;
+  return DATE_FROM_TIME(t);
+}
+
+
+%FunctionSetPrototype($Date, new $Date($NaN));
+
+
+var WeekDays = ['Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat'];
+var Months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'];
+
+
+function TwoDigitString(value) {
+  return value < 10 ? "0" + value : "" + value;
+}
+
+
+function DateString(time) {
+  var YMD = FromJulianDay(DAY(time) + kDayZeroInJulianDay);
+  return WeekDays[WeekDay(time)] + ' '
+      + Months[YMD.month] + ' '
+      + TwoDigitString(YMD.date) + ' '
+      + YMD.year;
+}
+
+
+var LongWeekDays = ['Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday'];
+var LongMonths = ['January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December'];
+
+
+function LongDateString(time) {
+  var YMD = FromJulianDay(DAY(time) + kDayZeroInJulianDay);
+  return LongWeekDays[WeekDay(time)] + ', '
+      + LongMonths[YMD.month] + ' '
+      + TwoDigitString(YMD.date) + ', '
+      + YMD.year;
+}
+
+
+function TimeString(time) {
+  return TwoDigitString(HOUR_FROM_TIME(time)) + ':'
+      + TwoDigitString(MIN_FROM_TIME(time)) + ':'
+      + TwoDigitString(SEC_FROM_TIME(time));
+}
+
+
+function LocalTimezoneString(time) {
+  var timezoneOffset =
+      (local_time_offset + DaylightSavingsOffset(time)) / msPerMinute;
+  var sign = (timezoneOffset >= 0) ? 1 : -1;
+  var hours = FLOOR((sign * timezoneOffset)/60);
+  var min   = FLOOR((sign * timezoneOffset)%60);
+  var gmt = ' GMT' + ((sign == 1) ? '+' : '-') +
+      TwoDigitString(hours) + TwoDigitString(min);
+  return gmt + ' (' +  LocalTimezone(time) + ')';
+}
+
+
+function DatePrintString(time) {
+  return DateString(time) + ' ' + TimeString(time);
+}
+
+// -------------------------------------------------------------------
+
+// Reused output buffer. Used when parsing date strings.
+var parse_buffer = $Array(7);
+
+// ECMA 262 - 15.9.4.2
+function DateParse(string) {
+  var arr = %DateParseString(ToString(string), parse_buffer);
+  if (IS_NULL(arr)) return $NaN;
+
+  var day = MakeDay(arr[0], arr[1], arr[2]);
+  var time = MakeTime(arr[3], arr[4], arr[5], 0);
+  var date = MakeDate(day, time);
+
+  if (IS_NULL(arr[6])) {
+    return TimeClip(UTC(date));
+  } else {
+    return TimeClip(date - arr[6] * 1000);
+  }
+}
+
+
+// ECMA 262 - 15.9.4.3
+function DateUTC(year, month, date, hours, minutes, seconds, ms) {
+  year = ToNumber(year);
+  month = ToNumber(month);
+  var argc = %_ArgumentsLength();
+  date = argc > 2 ? ToNumber(date) : 1;
+  hours = argc > 3 ? ToNumber(hours) : 0;
+  minutes = argc > 4 ? ToNumber(minutes) : 0;
+  seconds = argc > 5 ? ToNumber(seconds) : 0;
+  ms = argc > 6 ? ToNumber(ms) : 0;
+  year = (!NUMBER_IS_NAN(year) && 0 <= TO_INTEGER(year) && TO_INTEGER(year) <= 99)
+      ? 1900 + TO_INTEGER(year) : year;
+  var day = MakeDay(year, month, date);
+  var time = MakeTime(hours, minutes, seconds, ms);
+  return %_SetValueOf(this, TimeClip(MakeDate(day, time)));
+}
+
+
+// Mozilla-specific extension. Returns the number of milliseconds
+// elapsed since 1 January 1970 00:00:00 UTC.
+function DateNow() {
+  return %DateCurrentTime();
+}
+
+
+// ECMA 262 - 15.9.5.2
+function DateToString() {
+  var t = DATE_VALUE(this);
+  if (NUMBER_IS_NAN(t)) return kInvalidDate;
+  return DatePrintString(LocalTimeNoCheck(t)) + LocalTimezoneString(t);
+}
+
+
+// ECMA 262 - 15.9.5.3
+function DateToDateString() {
+  var t = DATE_VALUE(this);
+  if (NUMBER_IS_NAN(t)) return kInvalidDate;
+  return DateString(LocalTimeNoCheck(t));
+}
+
+
+// ECMA 262 - 15.9.5.4
+function DateToTimeString() {
+  var t = DATE_VALUE(this);
+  if (NUMBER_IS_NAN(t)) return kInvalidDate;
+  var lt = LocalTimeNoCheck(t);
+  return TimeString(lt) + LocalTimezoneString(lt);
+}
+
+
+// ECMA 262 - 15.9.5.5
+function DateToLocaleString() {
+  return DateToString.call(this);
+}
+
+
+// ECMA 262 - 15.9.5.6
+function DateToLocaleDateString() {
+  var t = DATE_VALUE(this);
+  if (NUMBER_IS_NAN(t)) return kInvalidDate;
+  return LongDateString(LocalTimeNoCheck(t));
+}
+
+
+// ECMA 262 - 15.9.5.7
+function DateToLocaleTimeString() {
+  var t = DATE_VALUE(this);
+  if (NUMBER_IS_NAN(t)) return kInvalidDate;
+  var lt = LocalTimeNoCheck(t);
+  return TimeString(lt);
+}
+
+
+// ECMA 262 - 15.9.5.8
+function DateValueOf() {
+  return DATE_VALUE(this);
+}
+
+
+// ECMA 262 - 15.9.5.9
+function DateGetTime(logMarker) {
+  if (logMarker) %ProfileLogMarker(logMarker);
+  return DATE_VALUE(this);
+}
+
+
+// ECMA 262 - 15.9.5.10
+function DateGetFullYear() {
+  return GetFullYearFrom(this)
+}
+
+
+// ECMA 262 - 15.9.5.11
+function DateGetUTCFullYear() {
+  return GetUTCFullYearFrom(this)
+}
+
+
+// ECMA 262 - 15.9.5.12
+function DateGetMonth() {
+  return GetMonthFrom(this);
+}
+
+
+// ECMA 262 - 15.9.5.13
+function DateGetUTCMonth() {
+  return GetUTCMonthFrom(this);
+}
+
+
+// ECMA 262 - 15.9.5.14
+function DateGetDate() {
+  return GetDateFrom(this);
+}
+
+
+// ECMA 262 - 15.9.5.15
+function DateGetUTCDate() {
+  return GetUTCDateFrom(this);
+}
+
+
+// ECMA 262 - 15.9.5.16
+function DateGetDay() {
+  var t = %_ValueOf(this);
+  if (NUMBER_IS_NAN(t)) return t;
+  return WeekDay(LocalTimeNoCheck(t));
+}
+
+
+// ECMA 262 - 15.9.5.17
+function DateGetUTCDay() {
+  var t = %_ValueOf(this);
+  if (NUMBER_IS_NAN(t)) return t;
+  return WeekDay(t);
+}
+
+
+// ECMA 262 - 15.9.5.18
+function DateGetHours() {
+  return GetHoursFrom(this);
+}
+
+
+// ECMA 262 - 15.9.5.19
+function DateGetUTCHours() {
+  return GetUTCHoursFrom(this);
+}
+
+
+// ECMA 262 - 15.9.5.20
+function DateGetMinutes() {
+  return GetMinutesFrom(this);
+}
+
+
+// ECMA 262 - 15.9.5.21
+function DateGetUTCMinutes() {
+  return GetUTCMinutesFrom(this);
+}
+
+
+// ECMA 262 - 15.9.5.22
+function DateGetSeconds() {
+  return GetSecondsFrom(this);
+}
+
+
+// ECMA 262 - 15.9.5.23
+function DateGetUTCSeconds() {
+  return GetUTCSecondsFrom(this);
+}
+
+
+// ECMA 262 - 15.9.5.24
+function DateGetMilliseconds() {
+  return GetMillisecondsFrom(this);
+}
+
+
+// ECMA 262 - 15.9.5.25
+function DateGetUTCMilliseconds() {
+  return GetUTCMillisecondsFrom(this);
+}
+
+
+// ECMA 262 - 15.9.5.26
+function DateGetTimezoneOffset() {
+  var t = DATE_VALUE(this);
+  if (NUMBER_IS_NAN(t)) return t;
+  return (t - LocalTimeNoCheck(t)) / msPerMinute;
+}
+
+
+// ECMA 262 - 15.9.5.27
+function DateSetTime(ms) {
+  if (!IS_DATE(this)) ThrowDateTypeError();
+  return %_SetValueOf(this, TimeClip(ToNumber(ms)));
+}
+
+
+// ECMA 262 - 15.9.5.28
+function DateSetMilliseconds(ms) {
+  var t = LocalTime(DATE_VALUE(this));
+  ms = ToNumber(ms);
+  var time = MakeTime(HOUR_FROM_TIME(t), MIN_FROM_TIME(t), SEC_FROM_TIME(t), ms);
+  return %_SetValueOf(this, TimeClip(UTC(MakeDate(DAY(t), time))));
+}
+
+
+// ECMA 262 - 15.9.5.29
+function DateSetUTCMilliseconds(ms) {
+  var t = DATE_VALUE(this);
+  ms = ToNumber(ms);
+  var time = MakeTime(HOUR_FROM_TIME(t), MIN_FROM_TIME(t), SEC_FROM_TIME(t), ms);
+  return %_SetValueOf(this, TimeClip(MakeDate(DAY(t), time)));
+}
+
+
+// ECMA 262 - 15.9.5.30
+function DateSetSeconds(sec, ms) {
+  var t = LocalTime(DATE_VALUE(this));
+  sec = ToNumber(sec);
+  ms = %_ArgumentsLength() < 2 ? GetMillisecondsFrom(this) : ToNumber(ms);
+  var time = MakeTime(HOUR_FROM_TIME(t), MIN_FROM_TIME(t), sec, ms);
+  return %_SetValueOf(this, TimeClip(UTC(MakeDate(DAY(t), time))));
+}
+
+
+// ECMA 262 - 15.9.5.31
+function DateSetUTCSeconds(sec, ms) {
+  var t = DATE_VALUE(this);
+  sec = ToNumber(sec);
+  ms = %_ArgumentsLength() < 2 ? GetUTCMillisecondsFrom(this) : ToNumber(ms);
+  var time = MakeTime(HOUR_FROM_TIME(t), MIN_FROM_TIME(t), sec, ms);
+  return %_SetValueOf(this, TimeClip(MakeDate(DAY(t), time)));
+}
+
+
+// ECMA 262 - 15.9.5.33
+function DateSetMinutes(min, sec, ms) {
+  var t = LocalTime(DATE_VALUE(this));
+  min = ToNumber(min);
+  var argc = %_ArgumentsLength();
+  sec = argc < 2 ? GetSecondsFrom(this) : ToNumber(sec);
+  ms = argc < 3 ? GetMillisecondsFrom(this) : ToNumber(ms);
+  var time = MakeTime(HOUR_FROM_TIME(t), min, sec, ms);
+  return %_SetValueOf(this, TimeClip(UTC(MakeDate(DAY(t), time))));
+}
+
+
+// ECMA 262 - 15.9.5.34
+function DateSetUTCMinutes(min, sec, ms) {
+  var t = DATE_VALUE(this);
+  min = ToNumber(min);
+  var argc = %_ArgumentsLength();
+  sec = argc < 2 ? GetUTCSecondsFrom(this) : ToNumber(sec);
+  ms = argc < 3 ? GetUTCMillisecondsFrom(this) : ToNumber(ms);
+  var time = MakeTime(HOUR_FROM_TIME(t), min, sec, ms);
+  return %_SetValueOf(this, TimeClip(MakeDate(DAY(t), time)));
+}
+
+
+// ECMA 262 - 15.9.5.35
+function DateSetHours(hour, min, sec, ms) {
+  var t = LocalTime(DATE_VALUE(this));
+  hour = ToNumber(hour);
+  var argc = %_ArgumentsLength();
+  min = argc < 2 ? GetMinutesFrom(this) : ToNumber(min);
+  sec = argc < 3 ? GetSecondsFrom(this) : ToNumber(sec);
+  ms = argc < 4 ? GetMillisecondsFrom(this) : ToNumber(ms);
+  var time = MakeTime(hour, min, sec, ms);
+  return %_SetValueOf(this, TimeClip(UTC(MakeDate(DAY(t), time))));
+}
+
+
+// ECMA 262 - 15.9.5.34
+function DateSetUTCHours(hour, min, sec, ms) {
+  var t = DATE_VALUE(this);
+  hour = ToNumber(hour);
+  var argc = %_ArgumentsLength();
+  min = argc < 2 ? GetUTCMinutesFrom(this) : ToNumber(min);
+  sec = argc < 3 ? GetUTCSecondsFrom(this) : ToNumber(sec);
+  ms = argc < 4 ? GetUTCMillisecondsFrom(this) : ToNumber(ms);
+  var time = MakeTime(hour, min, sec, ms);
+  return %_SetValueOf(this, TimeClip(MakeDate(DAY(t), time)));
+}
+
+
+// ECMA 262 - 15.9.5.36
+function DateSetDate(date) {
+  var t = LocalTime(DATE_VALUE(this));
+  date = ToNumber(date);
+  var day = MakeDay(YEAR_FROM_TIME(t), MONTH_FROM_TIME(t), date);
+  return %_SetValueOf(this, TimeClip(UTC(MakeDate(day, TimeWithinDay(t)))));
+}
+
+
+// ECMA 262 - 15.9.5.37
+function DateSetUTCDate(date) {
+  var t = DATE_VALUE(this);
+  date = ToNumber(date);
+  var day = MakeDay(YEAR_FROM_TIME(t), MONTH_FROM_TIME(t), date);
+  return %_SetValueOf(this, TimeClip(MakeDate(day, TimeWithinDay(t))));
+}
+
+
+// ECMA 262 - 15.9.5.38
+function DateSetMonth(month, date) {
+  var t = LocalTime(DATE_VALUE(this));
+  month = ToNumber(month);
+  date = %_ArgumentsLength() < 2 ? GetDateFrom(this) : ToNumber(date);
+  var day = MakeDay(YEAR_FROM_TIME(t), month, date);
+  return %_SetValueOf(this, TimeClip(UTC(MakeDate(day, TimeWithinDay(t)))));
+}
+
+
+// ECMA 262 - 15.9.5.39
+function DateSetUTCMonth(month, date) {
+  var t = DATE_VALUE(this);
+  month = ToNumber(month);
+  date = %_ArgumentsLength() < 2 ? GetUTCDateFrom(this) : ToNumber(date);
+  var day = MakeDay(YEAR_FROM_TIME(t), month, date);
+  return %_SetValueOf(this, TimeClip(MakeDate(day, TimeWithinDay(t))));
+}
+
+
+// ECMA 262 - 15.9.5.40
+function DateSetFullYear(year, month, date) {
+  var t = DATE_VALUE(this);
+  t = NUMBER_IS_NAN(t) ? 0 : LocalTimeNoCheck(t);
+  year = ToNumber(year);
+  var argc = %_ArgumentsLength();
+  month = argc < 2 ? MONTH_FROM_TIME(t) : ToNumber(month);
+  date = argc < 3 ? DATE_FROM_TIME(t) : ToNumber(date);
+  var day = MakeDay(year, month, date);
+  return %_SetValueOf(this, TimeClip(UTC(MakeDate(day, TimeWithinDay(t)))));
+}
+
+
+// ECMA 262 - 15.9.5.41
+function DateSetUTCFullYear(year, month, date) {
+  var t = DATE_VALUE(this);
+  if (NUMBER_IS_NAN(t)) t = 0;
+  var argc = %_ArgumentsLength();
+  year = ToNumber(year);
+  month = argc < 2 ? MONTH_FROM_TIME(t) : ToNumber(month);
+  date = argc < 3 ? DATE_FROM_TIME(t) : ToNumber(date);
+  var day = MakeDay(year, month, date);
+  return %_SetValueOf(this, TimeClip(MakeDate(day, TimeWithinDay(t))));
+}
+
+
+// ECMA 262 - 15.9.5.42
+function DateToUTCString() {
+  var t = DATE_VALUE(this);
+  if (NUMBER_IS_NAN(t)) return kInvalidDate;
+  // Return UTC string of the form: Sat, 31 Jan 1970 23:00:00 GMT
+  return WeekDays[WeekDay(t)] + ', '
+      + TwoDigitString(DATE_FROM_TIME(t)) + ' '
+      + Months[MONTH_FROM_TIME(t)] + ' '
+      + YEAR_FROM_TIME(t) + ' '
+      + TimeString(t) + ' GMT';
+}
+
+
+// ECMA 262 - B.2.4
+function DateGetYear() {
+  var t = DATE_VALUE(this);
+  if (NUMBER_IS_NAN(t)) return $NaN;
+  return YEAR_FROM_TIME(LocalTimeNoCheck(t)) - 1900;
+}
+
+
+// ECMA 262 - B.2.5
+function DateSetYear(year) {
+  var t = LocalTime(DATE_VALUE(this));
+  if (NUMBER_IS_NAN(t)) t = 0;
+  year = ToNumber(year);
+  if (NUMBER_IS_NAN(year)) return %_SetValueOf(this, $NaN);
+  year = (0 <= TO_INTEGER(year) && TO_INTEGER(year) <= 99)
+      ? 1900 + TO_INTEGER(year) : year;
+  var day = MakeDay(year, MONTH_FROM_TIME(t), DATE_FROM_TIME(t));
+  return %_SetValueOf(this, TimeClip(UTC(MakeDate(day, TimeWithinDay(t)))));
+}
+
+
+// ECMA 262 - B.2.6
+//
+// Notice that this does not follow ECMA 262 completely.  ECMA 262
+// says that toGMTString should be the same Function object as
+// toUTCString.  JSC does not do this, so for compatibility we do not
+// do that either.  Instead, we create a new function whose name
+// property will return toGMTString.
+function DateToGMTString() {
+  return DateToUTCString.call(this);
+}
+
+
+function PadInt(n, digits) {
+  if (digits == 1) return n;
+  return n < MathPow(10, digits - 1) ? '0' + PadInt(n, digits - 1) : n;
+}
+
+
+function DateToISOString() {
+  var t = DATE_VALUE(this);
+  if (NUMBER_IS_NAN(t)) return kInvalidDate;
+  return this.getUTCFullYear() + '-' + PadInt(this.getUTCMonth() + 1, 2) +
+      '-' + PadInt(this.getUTCDate(), 2) + 'T' + PadInt(this.getUTCHours(), 2) +
+      ':' + PadInt(this.getUTCMinutes(), 2) + ':' + PadInt(this.getUTCSeconds(), 2) +
+      '.' + PadInt(this.getUTCMilliseconds(), 3) +
+      'Z';
+}
+
+
+function DateToJSON(key) {
+  return CheckJSONPrimitive(this.toISOString());
+}
+
+
+// -------------------------------------------------------------------
+
+function SetupDate() {
+  // Setup non-enumerable properties of the Date object itself.
+  InstallFunctions($Date, DONT_ENUM, $Array(
+    "UTC", DateUTC,
+    "parse", DateParse,
+    "now", DateNow
+  ));
+
+  // Setup non-enumerable constructor property of the Date prototype object.
+  %SetProperty($Date.prototype, "constructor", $Date, DONT_ENUM);
+
+  // Setup non-enumerable functions of the Date prototype object and
+  // set their names.
+  InstallFunctionsOnHiddenPrototype($Date.prototype, DONT_ENUM, $Array(
+    "toString", DateToString,
+    "toDateString", DateToDateString,
+    "toTimeString", DateToTimeString,
+    "toLocaleString", DateToLocaleString,
+    "toLocaleDateString", DateToLocaleDateString,
+    "toLocaleTimeString", DateToLocaleTimeString,
+    "valueOf", DateValueOf,
+    "getTime", DateGetTime,
+    "getFullYear", DateGetFullYear,
+    "getUTCFullYear", DateGetUTCFullYear,
+    "getMonth", DateGetMonth,
+    "getUTCMonth", DateGetUTCMonth,
+    "getDate", DateGetDate,
+    "getUTCDate", DateGetUTCDate,
+    "getDay", DateGetDay,
+    "getUTCDay", DateGetUTCDay,
+    "getHours", DateGetHours,
+    "getUTCHours", DateGetUTCHours,
+    "getMinutes", DateGetMinutes,
+    "getUTCMinutes", DateGetUTCMinutes,
+    "getSeconds", DateGetSeconds,
+    "getUTCSeconds", DateGetUTCSeconds,
+    "getMilliseconds", DateGetMilliseconds,
+    "getUTCMilliseconds", DateGetUTCMilliseconds,
+    "getTimezoneOffset", DateGetTimezoneOffset,
+    "setTime", DateSetTime,
+    "setMilliseconds", DateSetMilliseconds,
+    "setUTCMilliseconds", DateSetUTCMilliseconds,
+    "setSeconds", DateSetSeconds,
+    "setUTCSeconds", DateSetUTCSeconds,
+    "setMinutes", DateSetMinutes,
+    "setUTCMinutes", DateSetUTCMinutes,
+    "setHours", DateSetHours,
+    "setUTCHours", DateSetUTCHours,
+    "setDate", DateSetDate,
+    "setUTCDate", DateSetUTCDate,
+    "setMonth", DateSetMonth,
+    "setUTCMonth", DateSetUTCMonth,
+    "setFullYear", DateSetFullYear,
+    "setUTCFullYear", DateSetUTCFullYear,
+    "toGMTString", DateToGMTString,
+    "toUTCString", DateToUTCString,
+    "getYear", DateGetYear,
+    "setYear", DateSetYear,
+    "toISOString", DateToISOString,
+    "toJSON", DateToJSON
+  ));
+}
+
+SetupDate();
diff --git a/src/debug-debugger.js b/src/debug-debugger.js
new file mode 100644
index 0000000..14d8c88
--- /dev/null
+++ b/src/debug-debugger.js
@@ -0,0 +1,2073 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Default number of frames to include in the response to backtrace request.
+const kDefaultBacktraceLength = 10;
+
+const Debug = {};
+
+// Regular expression to skip "crud" at the beginning of a source line which is
+// not really code. Currently the regular expression matches whitespace and
+// comments.
+const sourceLineBeginningSkip = /^(?:\s*(?:\/\*.*?\*\/)*)*/;
+
+// Debug events which can occour in the V8 JavaScript engine. These originate
+// from the API include file debug.h.
+Debug.DebugEvent = { Break: 1,
+                     Exception: 2,
+                     NewFunction: 3,
+                     BeforeCompile: 4,
+                     AfterCompile: 5,
+                     ScriptCollected: 6 };
+
+// Types of exceptions that can be broken upon.
+Debug.ExceptionBreak = { All : 0,
+                         Uncaught: 1 };
+
+// The different types of steps.
+Debug.StepAction = { StepOut: 0,
+                     StepNext: 1,
+                     StepIn: 2,
+                     StepMin: 3,
+                     StepInMin: 4 };
+
+// The different types of scripts matching enum ScriptType in objects.h.
+Debug.ScriptType = { Native: 0,
+                     Extension: 1,
+                     Normal: 2 };
+
+// The different types of script compilations matching enum
+// Script::CompilationType in objects.h.
+Debug.ScriptCompilationType = { Host: 0,
+                                Eval: 1,
+                                JSON: 2 };
+
+// The different script break point types.
+Debug.ScriptBreakPointType = { ScriptId: 0,
+                               ScriptName: 1 };
+
+function ScriptTypeFlag(type) {
+  return (1 << type);
+}
+
+// Globals.
+var next_response_seq = 0;
+var next_break_point_number = 1;
+var break_points = [];
+var script_break_points = [];
+
+
+// Create a new break point object and add it to the list of break points.
+function MakeBreakPoint(source_position, opt_line, opt_column, opt_script_break_point) {
+  var break_point = new BreakPoint(source_position, opt_line, opt_column, opt_script_break_point);
+  break_points.push(break_point);
+  return break_point;
+}
+
+
+// Object representing a break point.
+// NOTE: This object does not have a reference to the function having break
+// point as this would cause function not to be garbage collected when it is
+// not used any more. We do not want break points to keep functions alive.
+function BreakPoint(source_position, opt_line, opt_column, opt_script_break_point) {
+  this.source_position_ = source_position;
+  this.source_line_ = opt_line;
+  this.source_column_ = opt_column;
+  if (opt_script_break_point) {
+    this.script_break_point_ = opt_script_break_point;
+  } else {
+    this.number_ = next_break_point_number++;
+  }
+  this.hit_count_ = 0;
+  this.active_ = true;
+  this.condition_ = null;
+  this.ignoreCount_ = 0;
+}
+
+
+BreakPoint.prototype.number = function() {
+  return this.number_;
+};
+
+
+BreakPoint.prototype.func = function() {
+  return this.func_;
+};
+
+
+BreakPoint.prototype.source_position = function() {
+  return this.source_position_;
+};
+
+
+BreakPoint.prototype.hit_count = function() {
+  return this.hit_count_;
+};
+
+
+BreakPoint.prototype.active = function() {
+  if (this.script_break_point()) {
+    return this.script_break_point().active();
+  }
+  return this.active_;
+};
+
+
+BreakPoint.prototype.condition = function() {
+  if (this.script_break_point() && this.script_break_point().condition()) {
+    return this.script_break_point().condition();
+  }
+  return this.condition_;
+};
+
+
+BreakPoint.prototype.ignoreCount = function() {
+  return this.ignoreCount_;
+};
+
+
+BreakPoint.prototype.script_break_point = function() {
+  return this.script_break_point_;
+};
+
+
+BreakPoint.prototype.enable = function() {
+  this.active_ = true;
+};
+
+
+BreakPoint.prototype.disable = function() {
+  this.active_ = false;
+};
+
+
+BreakPoint.prototype.setCondition = function(condition) {
+  this.condition_ = condition;
+};
+
+
+BreakPoint.prototype.setIgnoreCount = function(ignoreCount) {
+  this.ignoreCount_ = ignoreCount;
+};
+
+
+BreakPoint.prototype.isTriggered = function(exec_state) {
+  // Break point not active - not triggered.
+  if (!this.active()) return false;
+
+  // Check for conditional break point.
+  if (this.condition()) {
+    // If break point has condition try to evaluate it in the top frame.
+    try {
+      var mirror = exec_state.frame(0).evaluate(this.condition());
+      // If no sensible mirror or non true value break point not triggered.
+      if (!(mirror instanceof ValueMirror) || !%ToBoolean(mirror.value_)) {
+        return false;
+      }
+    } catch (e) {
+      // Exception evaluating condition counts as not triggered.
+      return false;
+    }
+  }
+
+  // Update the hit count.
+  this.hit_count_++;
+  if (this.script_break_point_) {
+    this.script_break_point_.hit_count_++;
+  }
+
+  // If the break point has an ignore count it is not triggered.
+  if (this.ignoreCount_ > 0) {
+    this.ignoreCount_--;
+    return false;
+  }
+
+  // Break point triggered.
+  return true;
+};
+
+
+// Function called from the runtime when a break point is hit. Returns true if
+// the break point is triggered and supposed to break execution.
+function IsBreakPointTriggered(break_id, break_point) {
+  return break_point.isTriggered(MakeExecutionState(break_id));
+}
+
+
+// Object representing a script break point. The script is referenced by its
+// script name or script id and the break point is represented as line and
+// column.
+function ScriptBreakPoint(type, script_id_or_name, opt_line, opt_column,
+                          opt_groupId) {
+  this.type_ = type;
+  if (type == Debug.ScriptBreakPointType.ScriptId) {
+    this.script_id_ = script_id_or_name;
+  } else {  // type == Debug.ScriptBreakPointType.ScriptName
+    this.script_name_ = script_id_or_name;
+  }
+  this.line_ = opt_line || 0;
+  this.column_ = opt_column;
+  this.groupId_ = opt_groupId;
+  this.hit_count_ = 0;
+  this.active_ = true;
+  this.condition_ = null;
+  this.ignoreCount_ = 0;
+}
+
+
+ScriptBreakPoint.prototype.number = function() {
+  return this.number_;
+};
+
+
+ScriptBreakPoint.prototype.groupId = function() {
+  return this.groupId_;
+};
+
+
+ScriptBreakPoint.prototype.type = function() {
+  return this.type_;
+};
+
+
+ScriptBreakPoint.prototype.script_id = function() {
+  return this.script_id_;
+};
+
+
+ScriptBreakPoint.prototype.script_name = function() {
+  return this.script_name_;
+};
+
+
+ScriptBreakPoint.prototype.line = function() {
+  return this.line_;
+};
+
+
+ScriptBreakPoint.prototype.column = function() {
+  return this.column_;
+};
+
+
+ScriptBreakPoint.prototype.hit_count = function() {
+  return this.hit_count_;
+};
+
+
+ScriptBreakPoint.prototype.active = function() {
+  return this.active_;
+};
+
+
+ScriptBreakPoint.prototype.condition = function() {
+  return this.condition_;
+};
+
+
+ScriptBreakPoint.prototype.ignoreCount = function() {
+  return this.ignoreCount_;
+};
+
+
+ScriptBreakPoint.prototype.enable = function() {
+  this.active_ = true;
+};
+
+
+ScriptBreakPoint.prototype.disable = function() {
+  this.active_ = false;
+};
+
+
+ScriptBreakPoint.prototype.setCondition = function(condition) {
+  this.condition_ = condition;
+};
+
+
+ScriptBreakPoint.prototype.setIgnoreCount = function(ignoreCount) {
+  this.ignoreCount_ = ignoreCount;
+
+  // Set ignore count on all break points created from this script break point.
+  for (var i = 0; i < break_points.length; i++) {
+    if (break_points[i].script_break_point() === this) {
+      break_points[i].setIgnoreCount(ignoreCount);
+    }
+  }
+};
+
+
+// Check whether a script matches this script break point. Currently this is
+// only based on script name.
+ScriptBreakPoint.prototype.matchesScript = function(script) {
+  if (this.type_ == Debug.ScriptBreakPointType.ScriptId) {
+    return this.script_id_ == script.id;
+  } else {  // this.type_ == Debug.ScriptBreakPointType.ScriptName
+    return this.script_name_ == script.name &&
+           script.line_offset <= this.line_  &&
+           this.line_ < script.line_offset + script.lineCount();
+  }
+};
+
+
+// Set the script break point in a script.
+ScriptBreakPoint.prototype.set = function (script) {
+  var column = this.column();
+  var line = this.line();
+  // If the column is undefined the break is on the line. To help locate the
+  // first piece of breakable code on the line try to find the column on the
+  // line which contains some source.
+  if (IS_UNDEFINED(column)) {
+    var source_line = script.sourceLine(this.line());
+
+    // Allocate array for caching the columns where the actual source starts.
+    if (!script.sourceColumnStart_) {
+      script.sourceColumnStart_ = new Array(script.lineCount());
+    }
+
+    // Fill cache if needed and get column where the actual source starts.
+    if (IS_UNDEFINED(script.sourceColumnStart_[line])) {
+      script.sourceColumnStart_[line] =
+          source_line.match(sourceLineBeginningSkip)[0].length;
+    }
+    column = script.sourceColumnStart_[line];
+  }
+
+  // Convert the line and column into an absolute position within the script.
+  var pos = Debug.findScriptSourcePosition(script, this.line(), column);
+
+  // If the position is not found in the script (the script might be shorter
+  // than it used to be) just ignore it.
+  if (pos === null) return;
+
+  // Create a break point object and set the break point.
+  break_point = MakeBreakPoint(pos, this.line(), this.column(), this);
+  break_point.setIgnoreCount(this.ignoreCount());
+  %SetScriptBreakPoint(script, pos, break_point);
+
+  return break_point;
+};
+
+
+// Clear all the break points created from this script break point
+ScriptBreakPoint.prototype.clear = function () {
+  var remaining_break_points = [];
+  for (var i = 0; i < break_points.length; i++) {
+    if (break_points[i].script_break_point() &&
+        break_points[i].script_break_point() === this) {
+      %ClearBreakPoint(break_points[i]);
+    } else {
+      remaining_break_points.push(break_points[i]);
+    }
+  }
+  break_points = remaining_break_points;
+};
+
+
+// Function called from runtime when a new script is compiled to set any script
+// break points set in this script.
+function UpdateScriptBreakPoints(script) {
+  for (var i = 0; i < script_break_points.length; i++) {
+    if (script_break_points[i].type() == Debug.ScriptBreakPointType.ScriptName &&
+        script_break_points[i].matchesScript(script)) {
+      script_break_points[i].set(script);
+    }
+  }
+}
+
+
+Debug.setListener = function(listener, opt_data) {
+  if (!IS_FUNCTION(listener) && !IS_UNDEFINED(listener) && !IS_NULL(listener)) {
+    throw new Error('Parameters have wrong types.');
+  }
+  %SetDebugEventListener(listener, opt_data);
+};
+
+
+Debug.breakExecution = function(f) {
+  %Break();
+};
+
+Debug.breakLocations = function(f) {
+  if (!IS_FUNCTION(f)) throw new Error('Parameters have wrong types.');
+  return %GetBreakLocations(f);
+};
+
+// Returns a Script object. If the parameter is a function the return value
+// is the script in which the function is defined. If the parameter is a string
+// the return value is the script for which the script name has that string
+// value.  If it is a regexp and there is a unique script whose name matches
+// we return that, otherwise undefined.
+Debug.findScript = function(func_or_script_name) {
+  if (IS_FUNCTION(func_or_script_name)) {
+    return %FunctionGetScript(func_or_script_name);
+  } else if (IS_REGEXP(func_or_script_name)) {
+    var scripts = Debug.scripts();
+    var last_result = null;
+    var result_count = 0;
+    for (var i in scripts) {
+      var script = scripts[i];
+      if (func_or_script_name.test(script.name)) {
+        last_result = script;
+        result_count++;
+      }
+    }
+    // Return the unique script matching the regexp.  If there are more
+    // than one we don't return a value since there is no good way to
+    // decide which one to return.  Returning a "random" one, say the
+    // first, would introduce nondeterminism (or something close to it)
+    // because the order is the heap iteration order.
+    if (result_count == 1) {
+      return last_result;
+    } else {
+      return undefined;
+    }
+  } else {
+    return %GetScript(func_or_script_name);
+  }
+};
+
+// Returns the script source. If the parameter is a function the return value
+// is the script source for the script in which the function is defined. If the
+// parameter is a string the return value is the script for which the script
+// name has that string value.
+Debug.scriptSource = function(func_or_script_name) {
+  return this.findScript(func_or_script_name).source;
+};
+
+Debug.source = function(f) {
+  if (!IS_FUNCTION(f)) throw new Error('Parameters have wrong types.');
+  return %FunctionGetSourceCode(f);
+};
+
+Debug.disassemble = function(f) {
+  if (!IS_FUNCTION(f)) throw new Error('Parameters have wrong types.');
+  return %DebugDisassembleFunction(f);
+};
+
+Debug.disassembleConstructor = function(f) {
+  if (!IS_FUNCTION(f)) throw new Error('Parameters have wrong types.');
+  return %DebugDisassembleConstructor(f);
+};
+
+Debug.sourcePosition = function(f) {
+  if (!IS_FUNCTION(f)) throw new Error('Parameters have wrong types.');
+  return %FunctionGetScriptSourcePosition(f);
+};
+
+
+Debug.findFunctionSourceLocation = function(func, opt_line, opt_column) {
+  var script = %FunctionGetScript(func);
+  var script_offset = %FunctionGetScriptSourcePosition(func);
+  return script.locationFromLine(opt_line, opt_column, script_offset);
+}
+
+
+// Returns the character position in a script based on a line number and an
+// optional position within that line.
+Debug.findScriptSourcePosition = function(script, opt_line, opt_column) {
+  var location = script.locationFromLine(opt_line, opt_column);
+  return location ? location.position : null;
+}
+
+
+Debug.findBreakPoint = function(break_point_number, remove) {
+  var break_point;
+  for (var i = 0; i < break_points.length; i++) {
+    if (break_points[i].number() == break_point_number) {
+      break_point = break_points[i];
+      // Remove the break point from the list if requested.
+      if (remove) {
+        break_points.splice(i, 1);
+      }
+      break;
+    }
+  }
+  if (break_point) {
+    return break_point;
+  } else {
+    return this.findScriptBreakPoint(break_point_number, remove);
+  }
+};
+
+
+Debug.setBreakPoint = function(func, opt_line, opt_column, opt_condition) {
+  if (!IS_FUNCTION(func)) throw new Error('Parameters have wrong types.');
+  // Break points in API functions are not supported.
+  if (%FunctionIsAPIFunction(func)) {
+    throw new Error('Cannot set break point in native code.');
+  }
+  // Find source position relative to start of the function
+  var break_position =
+      this.findFunctionSourceLocation(func, opt_line, opt_column).position;
+  var source_position = break_position - this.sourcePosition(func);
+  // Find the script for the function.
+  var script = %FunctionGetScript(func);
+  // Break in builtin JavaScript code is not supported.
+  if (script.type == Debug.ScriptType.Native) {
+    throw new Error('Cannot set break point in native code.');
+  }
+  // If the script for the function has a name convert this to a script break
+  // point.
+  if (script && script.id) {
+    // Adjust the source position to be script relative.
+    source_position += %FunctionGetScriptSourcePosition(func);
+    // Find line and column for the position in the script and set a script
+    // break point from that.
+    var location = script.locationFromPosition(source_position, false);
+    return this.setScriptBreakPointById(script.id,
+                                        location.line, location.column,
+                                        opt_condition);
+  } else {
+    // Set a break point directly on the function.
+    var break_point = MakeBreakPoint(source_position, opt_line, opt_column);
+    %SetFunctionBreakPoint(func, source_position, break_point);
+    break_point.setCondition(opt_condition);
+    return break_point.number();
+  }
+};
+
+
+Debug.enableBreakPoint = function(break_point_number) {
+  var break_point = this.findBreakPoint(break_point_number, false);
+  break_point.enable();
+};
+
+
+Debug.disableBreakPoint = function(break_point_number) {
+  var break_point = this.findBreakPoint(break_point_number, false);
+  break_point.disable();
+};
+
+
+Debug.changeBreakPointCondition = function(break_point_number, condition) {
+  var break_point = this.findBreakPoint(break_point_number, false);
+  break_point.setCondition(condition);
+};
+
+
+Debug.changeBreakPointIgnoreCount = function(break_point_number, ignoreCount) {
+  if (ignoreCount < 0) {
+    throw new Error('Invalid argument');
+  }
+  var break_point = this.findBreakPoint(break_point_number, false);
+  break_point.setIgnoreCount(ignoreCount);
+};
+
+
+Debug.clearBreakPoint = function(break_point_number) {
+  var break_point = this.findBreakPoint(break_point_number, true);
+  if (break_point) {
+    return %ClearBreakPoint(break_point);
+  } else {
+    break_point = this.findScriptBreakPoint(break_point_number, true);
+    if (!break_point) {
+      throw new Error('Invalid breakpoint');
+    }
+  }
+};
+
+
+Debug.clearAllBreakPoints = function() {
+  for (var i = 0; i < break_points.length; i++) {
+    break_point = break_points[i];
+    %ClearBreakPoint(break_point);
+  }
+  break_points = [];
+};
+
+
+Debug.findScriptBreakPoint = function(break_point_number, remove) {
+  var script_break_point;
+  for (var i = 0; i < script_break_points.length; i++) {
+    if (script_break_points[i].number() == break_point_number) {
+      script_break_point = script_break_points[i];
+      // Remove the break point from the list if requested.
+      if (remove) {
+        script_break_point.clear();
+        script_break_points.splice(i,1);
+      }
+      break;
+    }
+  }
+  return script_break_point;
+}
+
+
+// Sets a breakpoint in a script identified through id or name at the
+// specified source line and column within that line.
+Debug.setScriptBreakPoint = function(type, script_id_or_name,
+                                     opt_line, opt_column, opt_condition,
+                                     opt_groupId) {
+  // Create script break point object.
+  var script_break_point =
+      new ScriptBreakPoint(type, script_id_or_name, opt_line, opt_column,
+                           opt_groupId);
+
+  // Assign number to the new script break point and add it.
+  script_break_point.number_ = next_break_point_number++;
+  script_break_point.setCondition(opt_condition);
+  script_break_points.push(script_break_point);
+
+  // Run through all scripts to see if this script break point matches any
+  // loaded scripts.
+  var scripts = this.scripts();
+  for (var i = 0; i < scripts.length; i++) {
+    if (script_break_point.matchesScript(scripts[i])) {
+      script_break_point.set(scripts[i]);
+    }
+  }
+
+  return script_break_point.number();
+}
+
+
+Debug.setScriptBreakPointById = function(script_id,
+                                         opt_line, opt_column,
+                                         opt_condition, opt_groupId) {
+  return this.setScriptBreakPoint(Debug.ScriptBreakPointType.ScriptId,
+                                  script_id, opt_line, opt_column,
+                                  opt_condition, opt_groupId);
+}
+
+
+Debug.setScriptBreakPointByName = function(script_name,
+                                           opt_line, opt_column,
+                                           opt_condition, opt_groupId) {
+  return this.setScriptBreakPoint(Debug.ScriptBreakPointType.ScriptName,
+                                  script_name, opt_line, opt_column,
+                                  opt_condition, opt_groupId);
+}
+
+
+Debug.enableScriptBreakPoint = function(break_point_number) {
+  var script_break_point = this.findScriptBreakPoint(break_point_number, false);
+  script_break_point.enable();
+};
+
+
+Debug.disableScriptBreakPoint = function(break_point_number) {
+  var script_break_point = this.findScriptBreakPoint(break_point_number, false);
+  script_break_point.disable();
+};
+
+
+Debug.changeScriptBreakPointCondition = function(break_point_number, condition) {
+  var script_break_point = this.findScriptBreakPoint(break_point_number, false);
+  script_break_point.setCondition(condition);
+};
+
+
+Debug.changeScriptBreakPointIgnoreCount = function(break_point_number, ignoreCount) {
+  if (ignoreCount < 0) {
+    throw new Error('Invalid argument');
+  }
+  var script_break_point = this.findScriptBreakPoint(break_point_number, false);
+  script_break_point.setIgnoreCount(ignoreCount);
+};
+
+
+Debug.scriptBreakPoints = function() {
+  return script_break_points;
+}
+
+
+Debug.clearStepping = function() {
+  %ClearStepping();
+}
+
+Debug.setBreakOnException = function() {
+  return %ChangeBreakOnException(Debug.ExceptionBreak.All, true);
+};
+
+Debug.clearBreakOnException = function() {
+  return %ChangeBreakOnException(Debug.ExceptionBreak.All, false);
+};
+
+Debug.setBreakOnUncaughtException = function() {
+  return %ChangeBreakOnException(Debug.ExceptionBreak.Uncaught, true);
+};
+
+Debug.clearBreakOnUncaughtException = function() {
+  return %ChangeBreakOnException(Debug.ExceptionBreak.Uncaught, false);
+};
+
+Debug.showBreakPoints = function(f, full) {
+  if (!IS_FUNCTION(f)) throw new Error('Parameters have wrong types.');
+  var source = full ? this.scriptSource(f) : this.source(f);
+  var offset = full ? this.sourcePosition(f) : 0;
+  var locations = this.breakLocations(f);
+  if (!locations) return source;
+  locations.sort(function(x, y) { return x - y; });
+  var result = "";
+  var prev_pos = 0;
+  var pos;
+  for (var i = 0; i < locations.length; i++) {
+    pos = locations[i] - offset;
+    result += source.slice(prev_pos, pos);
+    result += "[B" + i + "]";
+    prev_pos = pos;
+  }
+  pos = source.length;
+  result += source.substring(prev_pos, pos);
+  return result;
+};
+
+
+// Get all the scripts currently loaded. Locating all the scripts is based on
+// scanning the heap.
+Debug.scripts = function() {
+  // Collect all scripts in the heap.
+  return %DebugGetLoadedScripts();
+}
+
+function MakeExecutionState(break_id) {
+  return new ExecutionState(break_id);
+}
+
+function ExecutionState(break_id) {
+  this.break_id = break_id;
+  this.selected_frame = 0;
+}
+
+ExecutionState.prototype.prepareStep = function(opt_action, opt_count) {
+  var action = Debug.StepAction.StepIn;
+  if (!IS_UNDEFINED(opt_action)) action = %ToNumber(opt_action);
+  var count = opt_count ? %ToNumber(opt_count) : 1;
+
+  return %PrepareStep(this.break_id, action, count);
+}
+
+ExecutionState.prototype.evaluateGlobal = function(source, disable_break) {
+  return MakeMirror(
+      %DebugEvaluateGlobal(this.break_id, source, Boolean(disable_break)));
+};
+
+ExecutionState.prototype.frameCount = function() {
+  return %GetFrameCount(this.break_id);
+};
+
+ExecutionState.prototype.threadCount = function() {
+  return %GetThreadCount(this.break_id);
+};
+
+ExecutionState.prototype.frame = function(opt_index) {
+  // If no index supplied return the selected frame.
+  if (opt_index == null) opt_index = this.selected_frame;
+  return new FrameMirror(this.break_id, opt_index);
+};
+
+ExecutionState.prototype.cframesValue = function(opt_from_index, opt_to_index) {
+  return %GetCFrames(this.break_id);
+};
+
+ExecutionState.prototype.setSelectedFrame = function(index) {
+  var i = %ToNumber(index);
+  if (i < 0 || i >= this.frameCount()) throw new Error('Illegal frame index.');
+  this.selected_frame = i;
+};
+
+ExecutionState.prototype.selectedFrame = function() {
+  return this.selected_frame;
+};
+
+ExecutionState.prototype.debugCommandProcessor = function(opt_is_running) {
+  return new DebugCommandProcessor(this, opt_is_running);
+};
+
+
+function MakeBreakEvent(exec_state, break_points_hit) {
+  return new BreakEvent(exec_state, break_points_hit);
+}
+
+
+function BreakEvent(exec_state, break_points_hit) {
+  this.exec_state_ = exec_state;
+  this.break_points_hit_ = break_points_hit;
+}
+
+
+BreakEvent.prototype.executionState = function() {
+  return this.exec_state_;
+};
+
+
+BreakEvent.prototype.eventType = function() {
+  return Debug.DebugEvent.Break;
+};
+
+
+BreakEvent.prototype.func = function() {
+  return this.exec_state_.frame(0).func();
+};
+
+
+BreakEvent.prototype.sourceLine = function() {
+  return this.exec_state_.frame(0).sourceLine();
+};
+
+
+BreakEvent.prototype.sourceColumn = function() {
+  return this.exec_state_.frame(0).sourceColumn();
+};
+
+
+BreakEvent.prototype.sourceLineText = function() {
+  return this.exec_state_.frame(0).sourceLineText();
+};
+
+
+BreakEvent.prototype.breakPointsHit = function() {
+  return this.break_points_hit_;
+};
+
+
+BreakEvent.prototype.toJSONProtocol = function() {
+  var o = { seq: next_response_seq++,
+            type: "event",
+            event: "break",
+            body: { invocationText: this.exec_state_.frame(0).invocationText(),
+                  }
+          };
+
+  // Add script related information to the event if available.
+  var script = this.func().script();
+  if (script) {
+    o.body.sourceLine = this.sourceLine(),
+    o.body.sourceColumn = this.sourceColumn(),
+    o.body.sourceLineText = this.sourceLineText(),
+    o.body.script = MakeScriptObject_(script, false);
+  }
+
+  // Add an Array of break points hit if any.
+  if (this.breakPointsHit()) {
+    o.body.breakpoints = [];
+    for (var i = 0; i < this.breakPointsHit().length; i++) {
+      // Find the break point number. For break points originating from a
+      // script break point supply the script break point number.
+      var breakpoint = this.breakPointsHit()[i];
+      var script_break_point = breakpoint.script_break_point();
+      var number;
+      if (script_break_point) {
+        number = script_break_point.number();
+      } else {
+        number = breakpoint.number();
+      }
+      o.body.breakpoints.push(number);
+    }
+  }
+  return JSON.stringify(ObjectToProtocolObject_(o));
+};
+
+
+function MakeExceptionEvent(exec_state, exception, uncaught) {
+  return new ExceptionEvent(exec_state, exception, uncaught);
+}
+
+
+function ExceptionEvent(exec_state, exception, uncaught) {
+  this.exec_state_ = exec_state;
+  this.exception_ = exception;
+  this.uncaught_ = uncaught;
+}
+
+
+ExceptionEvent.prototype.executionState = function() {
+  return this.exec_state_;
+};
+
+
+ExceptionEvent.prototype.eventType = function() {
+  return Debug.DebugEvent.Exception;
+};
+
+
+ExceptionEvent.prototype.exception = function() {
+  return this.exception_;
+}
+
+
+ExceptionEvent.prototype.uncaught = function() {
+  return this.uncaught_;
+}
+
+
+ExceptionEvent.prototype.func = function() {
+  return this.exec_state_.frame(0).func();
+};
+
+
+ExceptionEvent.prototype.sourceLine = function() {
+  return this.exec_state_.frame(0).sourceLine();
+};
+
+
+ExceptionEvent.prototype.sourceColumn = function() {
+  return this.exec_state_.frame(0).sourceColumn();
+};
+
+
+ExceptionEvent.prototype.sourceLineText = function() {
+  return this.exec_state_.frame(0).sourceLineText();
+};
+
+
+ExceptionEvent.prototype.toJSONProtocol = function() {
+  var o = new ProtocolMessage();
+  o.event = "exception";
+  o.body = { uncaught: this.uncaught_,
+             exception: MakeMirror(this.exception_)
+           };
+
+  // Exceptions might happen whithout any JavaScript frames.
+  if (this.exec_state_.frameCount() > 0) {
+    o.body.sourceLine = this.sourceLine();
+    o.body.sourceColumn = this.sourceColumn();
+    o.body.sourceLineText = this.sourceLineText();
+
+    // Add script information to the event if available.
+    var script = this.func().script();
+    if (script) {
+      o.body.script = MakeScriptObject_(script, false);
+    }
+  } else {
+    o.body.sourceLine = -1;
+  }
+
+  return o.toJSONProtocol();
+};
+
+
+function MakeCompileEvent(exec_state, script, before) {
+  return new CompileEvent(exec_state, script, before);
+}
+
+
+function CompileEvent(exec_state, script, before) {
+  this.exec_state_ = exec_state;
+  this.script_ = MakeMirror(script);
+  this.before_ = before;
+}
+
+
+CompileEvent.prototype.executionState = function() {
+  return this.exec_state_;
+};
+
+
+CompileEvent.prototype.eventType = function() {
+  if (this.before_) {
+    return Debug.DebugEvent.BeforeCompile;
+  } else {
+    return Debug.DebugEvent.AfterCompile;
+  }
+};
+
+
+CompileEvent.prototype.script = function() {
+  return this.script_;
+};
+
+
+CompileEvent.prototype.toJSONProtocol = function() {
+  var o = new ProtocolMessage();
+  o.running = true;
+  if (this.before_) {
+    o.event = "beforeCompile";
+  } else {
+    o.event = "afterCompile";
+  }
+  o.body = {};
+  o.body.script = this.script_;
+
+  return o.toJSONProtocol();
+}
+
+
+function MakeNewFunctionEvent(func) {
+  return new NewFunctionEvent(func);
+}
+
+
+function NewFunctionEvent(func) {
+  this.func = func;
+}
+
+
+NewFunctionEvent.prototype.eventType = function() {
+  return Debug.DebugEvent.NewFunction;
+};
+
+
+NewFunctionEvent.prototype.name = function() {
+  return this.func.name;
+};
+
+
+NewFunctionEvent.prototype.setBreakPoint = function(p) {
+  Debug.setBreakPoint(this.func, p || 0);
+};
+
+
+function MakeScriptCollectedEvent(exec_state, id) {
+  return new ScriptCollectedEvent(exec_state, id);
+}
+
+
+function ScriptCollectedEvent(exec_state, id) {
+  this.exec_state_ = exec_state;
+  this.id_ = id;
+}
+
+
+ScriptCollectedEvent.prototype.id = function() {
+  return this.id_;
+};
+
+
+ScriptCollectedEvent.prototype.executionState = function() {
+  return this.exec_state_;
+};
+
+
+ScriptCollectedEvent.prototype.toJSONProtocol = function() {
+  var o = new ProtocolMessage();
+  o.running = true;
+  o.event = "scriptCollected";
+  o.body = {};
+  o.body.script = { id: this.id() };
+  return o.toJSONProtocol();
+}
+
+
+function MakeScriptObject_(script, include_source) {
+  var o = { id: script.id(),
+            name: script.name(),
+            lineOffset: script.lineOffset(),
+            columnOffset: script.columnOffset(),
+            lineCount: script.lineCount(),
+          };
+  if (!IS_UNDEFINED(script.data())) {
+    o.data = script.data();
+  }
+  if (include_source) {
+    o.source = script.source();
+  }
+  return o;
+};
+
+
+function DebugCommandProcessor(exec_state, opt_is_running) {
+  this.exec_state_ = exec_state;
+  this.running_ = opt_is_running || false;
+};
+
+
+DebugCommandProcessor.prototype.processDebugRequest = function (request) {
+  return this.processDebugJSONRequest(request);
+}
+
+
+function ProtocolMessage(request) {
+  // Update sequence number.
+  this.seq = next_response_seq++;
+
+  if (request) {
+    // If message is based on a request this is a response. Fill the initial
+    // response from the request.
+    this.type = 'response';
+    this.request_seq = request.seq;
+    this.command = request.command;
+  } else {
+    // If message is not based on a request it is a dabugger generated event.
+    this.type = 'event';
+  }
+  this.success = true;
+  // Handler may set this field to control debugger state.
+  this.running = undefined;
+}
+
+
+ProtocolMessage.prototype.setOption = function(name, value) {
+  if (!this.options_) {
+    this.options_ = {};
+  }
+  this.options_[name] = value;
+}
+
+
+ProtocolMessage.prototype.failed = function(message) {
+  this.success = false;
+  this.message = message;
+}
+
+
+ProtocolMessage.prototype.toJSONProtocol = function() {
+  // Encode the protocol header.
+  var json = {};
+  json.seq= this.seq;
+  if (this.request_seq) {
+    json.request_seq = this.request_seq;
+  }
+  json.type = this.type;
+  if (this.event) {
+    json.event = this.event;
+  }
+  if (this.command) {
+    json.command = this.command;
+  }
+  if (this.success) {
+    json.success = this.success;
+  } else {
+    json.success = false;
+  }
+  if (this.body) {
+    // Encode the body part.
+    var bodyJson;
+    var serializer = MakeMirrorSerializer(true, this.options_);
+    if (this.body instanceof Mirror) {
+      bodyJson = serializer.serializeValue(this.body);
+    } else if (this.body instanceof Array) {
+      bodyJson = [];
+      for (var i = 0; i < this.body.length; i++) {
+        if (this.body[i] instanceof Mirror) {
+          bodyJson.push(serializer.serializeValue(this.body[i]));
+        } else {
+          bodyJson.push(ObjectToProtocolObject_(this.body[i], serializer));
+        }
+      }
+    } else {
+      bodyJson = ObjectToProtocolObject_(this.body, serializer);
+    }
+    json.body = bodyJson;
+    json.refs = serializer.serializeReferencedObjects();
+  }
+  if (this.message) {
+    json.message = this.message;
+  }
+  json.running = this.running;
+  return JSON.stringify(json);
+}
+
+
+DebugCommandProcessor.prototype.createResponse = function(request) {
+  return new ProtocolMessage(request);
+};
+
+
+DebugCommandProcessor.prototype.processDebugJSONRequest = function(json_request) {
+  var request;  // Current request.
+  var response;  // Generated response.
+  try {
+    try {
+      // Convert the JSON string to an object.
+      request = %CompileString('(' + json_request + ')', false)();
+
+      // Create an initial response.
+      response = this.createResponse(request);
+
+      if (!request.type) {
+        throw new Error('Type not specified');
+      }
+
+      if (request.type != 'request') {
+        throw new Error("Illegal type '" + request.type + "' in request");
+      }
+
+      if (!request.command) {
+        throw new Error('Command not specified');
+      }
+
+      // TODO(yurys): remove request.arguments.compactFormat check once
+      // ChromeDevTools are switched to 'inlineRefs'
+      if (request.arguments && (request.arguments.inlineRefs ||
+                                request.arguments.compactFormat)) {
+        response.setOption('inlineRefs', true);
+      }
+
+      if (request.command == 'continue') {
+        this.continueRequest_(request, response);
+      } else if (request.command == 'break') {
+        this.breakRequest_(request, response);
+      } else if (request.command == 'setbreakpoint') {
+        this.setBreakPointRequest_(request, response);
+      } else if (request.command == 'changebreakpoint') {
+        this.changeBreakPointRequest_(request, response);
+      } else if (request.command == 'clearbreakpoint') {
+        this.clearBreakPointRequest_(request, response);
+      } else if (request.command == 'clearbreakpointgroup') {
+        this.clearBreakPointGroupRequest_(request, response);
+      } else if (request.command == 'backtrace') {
+        this.backtraceRequest_(request, response);
+      } else if (request.command == 'frame') {
+        this.frameRequest_(request, response);
+      } else if (request.command == 'scopes') {
+        this.scopesRequest_(request, response);
+      } else if (request.command == 'scope') {
+        this.scopeRequest_(request, response);
+      } else if (request.command == 'evaluate') {
+        this.evaluateRequest_(request, response);
+      } else if (request.command == 'lookup') {
+        this.lookupRequest_(request, response);
+      } else if (request.command == 'references') {
+        this.referencesRequest_(request, response);
+      } else if (request.command == 'source') {
+        this.sourceRequest_(request, response);
+      } else if (request.command == 'scripts') {
+        this.scriptsRequest_(request, response);
+      } else if (request.command == 'threads') {
+        this.threadsRequest_(request, response);
+      } else if (request.command == 'suspend') {
+        this.suspendRequest_(request, response);
+      } else if (request.command == 'version') {
+        this.versionRequest_(request, response);
+      } else if (request.command == 'profile') {
+        this.profileRequest_(request, response);
+      } else {
+        throw new Error('Unknown command "' + request.command + '" in request');
+      }
+    } catch (e) {
+      // If there is no response object created one (without command).
+      if (!response) {
+        response = this.createResponse();
+      }
+      response.success = false;
+      response.message = %ToString(e);
+    }
+
+    // Return the response as a JSON encoded string.
+    try {
+      if (!IS_UNDEFINED(response.running)) {
+        // Response controls running state.
+        this.running_ = response.running;
+      }
+      response.running = this.running_; 
+      return response.toJSONProtocol();
+    } catch (e) {
+      // Failed to generate response - return generic error.
+      return '{"seq":' + response.seq + ',' +
+              '"request_seq":' + request.seq + ',' +
+              '"type":"response",' +
+              '"success":false,' +
+              '"message":"Internal error: ' + %ToString(e) + '"}';
+    }
+  } catch (e) {
+    // Failed in one of the catch blocks above - most generic error.
+    return '{"seq":0,"type":"response","success":false,"message":"Internal error"}';
+  }
+};
+
+
+DebugCommandProcessor.prototype.continueRequest_ = function(request, response) {
+  // Check for arguments for continue.
+  if (request.arguments) {
+    var count = 1;
+    var action = Debug.StepAction.StepIn;
+
+    // Pull out arguments.
+    var stepaction = request.arguments.stepaction;
+    var stepcount = request.arguments.stepcount;
+
+    // Get the stepcount argument if any.
+    if (stepcount) {
+      count = %ToNumber(stepcount);
+      if (count < 0) {
+        throw new Error('Invalid stepcount argument "' + stepcount + '".');
+      }
+    }
+
+    // Get the stepaction argument.
+    if (stepaction) {
+      if (stepaction == 'in') {
+        action = Debug.StepAction.StepIn;
+      } else if (stepaction == 'min') {
+        action = Debug.StepAction.StepMin;
+      } else if (stepaction == 'next') {
+        action = Debug.StepAction.StepNext;
+      } else if (stepaction == 'out') {
+        action = Debug.StepAction.StepOut;
+      } else {
+        throw new Error('Invalid stepaction argument "' + stepaction + '".');
+      }
+    }
+
+    // Setup the VM for stepping.
+    this.exec_state_.prepareStep(action, count);
+  }
+
+  // VM should be running after executing this request.
+  response.running = true;
+};
+
+
+DebugCommandProcessor.prototype.breakRequest_ = function(request, response) {
+  // Ignore as break command does not do anything when broken.
+};
+
+
+DebugCommandProcessor.prototype.setBreakPointRequest_ =
+    function(request, response) {
+  // Check for legal request.
+  if (!request.arguments) {
+    response.failed('Missing arguments');
+    return;
+  }
+
+  // Pull out arguments.
+  var type = request.arguments.type;
+  var target = request.arguments.target;
+  var line = request.arguments.line;
+  var column = request.arguments.column;
+  var enabled = IS_UNDEFINED(request.arguments.enabled) ?
+      true : request.arguments.enabled;
+  var condition = request.arguments.condition;
+  var ignoreCount = request.arguments.ignoreCount;
+  var groupId = request.arguments.groupId;
+
+  // Check for legal arguments.
+  if (!type || IS_UNDEFINED(target)) {
+    response.failed('Missing argument "type" or "target"');
+    return;
+  }
+  if (type != 'function' && type != 'handle' &&
+      type != 'script' && type != 'scriptId') {
+    response.failed('Illegal type "' + type + '"');
+    return;
+  }
+
+  // Either function or script break point.
+  var break_point_number;
+  if (type == 'function') {
+    // Handle function break point.
+    if (!IS_STRING(target)) {
+      response.failed('Argument "target" is not a string value');
+      return;
+    }
+    var f;
+    try {
+      // Find the function through a global evaluate.
+      f = this.exec_state_.evaluateGlobal(target).value();
+    } catch (e) {
+      response.failed('Error: "' + %ToString(e) +
+                      '" evaluating "' + target + '"');
+      return;
+    }
+    if (!IS_FUNCTION(f)) {
+      response.failed('"' + target + '" does not evaluate to a function');
+      return;
+    }
+
+    // Set function break point.
+    break_point_number = Debug.setBreakPoint(f, line, column, condition);
+  } else if (type == 'handle') {
+    // Find the object pointed by the specified handle.
+    var handle = parseInt(target, 10);
+    var mirror = LookupMirror(handle);
+    if (!mirror) {
+      return response.failed('Object #' + handle + '# not found');
+    }
+    if (!mirror.isFunction()) {
+      return response.failed('Object #' + handle + '# is not a function');
+    }
+
+    // Set function break point.
+    break_point_number = Debug.setBreakPoint(mirror.value(),
+                                             line, column, condition);
+  } else if (type == 'script') {
+    // set script break point.
+    break_point_number =
+        Debug.setScriptBreakPointByName(target, line, column, condition,
+                                        groupId);
+  } else {  // type == 'scriptId.
+    break_point_number =
+        Debug.setScriptBreakPointById(target, line, column, condition, groupId);
+  }
+
+  // Set additional break point properties.
+  var break_point = Debug.findBreakPoint(break_point_number);
+  if (ignoreCount) {
+    Debug.changeBreakPointIgnoreCount(break_point_number, ignoreCount);
+  }
+  if (!enabled) {
+    Debug.disableBreakPoint(break_point_number);
+  }
+
+  // Add the break point number to the response.
+  response.body = { type: type,
+                    breakpoint: break_point_number }
+
+  // Add break point information to the response.
+  if (break_point instanceof ScriptBreakPoint) {
+    if (break_point.type() == Debug.ScriptBreakPointType.ScriptId) {
+      response.body.type = 'scriptId';
+      response.body.script_id = break_point.script_id();
+    } else {
+      response.body.type = 'scriptName';
+      response.body.script_name = break_point.script_name();
+    }
+    response.body.line = break_point.line();
+    response.body.column = break_point.column();
+  } else {
+    response.body.type = 'function';
+  }
+};
+
+
+DebugCommandProcessor.prototype.changeBreakPointRequest_ = function(request, response) {
+  // Check for legal request.
+  if (!request.arguments) {
+    response.failed('Missing arguments');
+    return;
+  }
+
+  // Pull out arguments.
+  var break_point = %ToNumber(request.arguments.breakpoint);
+  var enabled = request.arguments.enabled;
+  var condition = request.arguments.condition;
+  var ignoreCount = request.arguments.ignoreCount;
+
+  // Check for legal arguments.
+  if (!break_point) {
+    response.failed('Missing argument "breakpoint"');
+    return;
+  }
+
+  // Change enabled state if supplied.
+  if (!IS_UNDEFINED(enabled)) {
+    if (enabled) {
+      Debug.enableBreakPoint(break_point);
+    } else {
+      Debug.disableBreakPoint(break_point);
+    }
+  }
+
+  // Change condition if supplied
+  if (!IS_UNDEFINED(condition)) {
+    Debug.changeBreakPointCondition(break_point, condition);
+  }
+
+  // Change ignore count if supplied
+  if (!IS_UNDEFINED(ignoreCount)) {
+    Debug.changeBreakPointIgnoreCount(break_point, ignoreCount);
+  }
+}
+
+
+DebugCommandProcessor.prototype.clearBreakPointGroupRequest_ = function(request, response) {
+  // Check for legal request.
+  if (!request.arguments) {
+    response.failed('Missing arguments');
+    return;
+  }
+
+  // Pull out arguments.
+  var group_id = request.arguments.groupId;
+
+  // Check for legal arguments.
+  if (!group_id) {
+    response.failed('Missing argument "groupId"');
+    return;
+  }
+
+  var cleared_break_points = [];
+  var new_script_break_points = [];
+  for (var i = 0; i < script_break_points.length; i++) {
+    var next_break_point = script_break_points[i];
+    if (next_break_point.groupId() == group_id) {
+      cleared_break_points.push(next_break_point.number());
+      next_break_point.clear();
+    } else {
+      new_script_break_points.push(next_break_point);
+    }
+  }
+  script_break_points = new_script_break_points;
+
+  // Add the cleared break point numbers to the response.
+  response.body = { breakpoints: cleared_break_points };
+}
+
+
+DebugCommandProcessor.prototype.clearBreakPointRequest_ = function(request, response) {
+  // Check for legal request.
+  if (!request.arguments) {
+    response.failed('Missing arguments');
+    return;
+  }
+
+  // Pull out arguments.
+  var break_point = %ToNumber(request.arguments.breakpoint);
+
+  // Check for legal arguments.
+  if (!break_point) {
+    response.failed('Missing argument "breakpoint"');
+    return;
+  }
+
+  // Clear break point.
+  Debug.clearBreakPoint(break_point);
+
+  // Add the cleared break point number to the response.
+  response.body = { breakpoint: break_point }
+}
+
+
+DebugCommandProcessor.prototype.backtraceRequest_ = function(request, response) {
+  // Get the number of frames.
+  var total_frames = this.exec_state_.frameCount();
+
+  // Create simple response if there are no frames.
+  if (total_frames == 0) {
+    response.body = {
+      totalFrames: total_frames
+    }
+    return;
+  }
+
+  // Default frame range to include in backtrace.
+  var from_index = 0
+  var to_index = kDefaultBacktraceLength;
+
+  // Get the range from the arguments.
+  if (request.arguments) {
+    if (request.arguments.fromFrame) {
+      from_index = request.arguments.fromFrame;
+    }
+    if (request.arguments.toFrame) {
+      to_index = request.arguments.toFrame;
+    }
+    if (request.arguments.bottom) {
+      var tmp_index = total_frames - from_index;
+      from_index = total_frames - to_index
+      to_index = tmp_index;
+    }
+    if (from_index < 0 || to_index < 0) {
+      return response.failed('Invalid frame number');
+    }
+  }
+
+  // Adjust the index.
+  to_index = Math.min(total_frames, to_index);
+
+  if (to_index <= from_index) {
+    var error = 'Invalid frame range';
+    return response.failed(error);
+  }
+
+  // Create the response body.
+  var frames = [];
+  for (var i = from_index; i < to_index; i++) {
+    frames.push(this.exec_state_.frame(i));
+  }
+  response.body = {
+    fromFrame: from_index,
+    toFrame: to_index,
+    totalFrames: total_frames,
+    frames: frames
+  }
+};
+
+
+DebugCommandProcessor.prototype.backtracec = function(cmd, args) {
+  return this.exec_state_.cframesValue();
+};
+
+
+DebugCommandProcessor.prototype.frameRequest_ = function(request, response) {
+  // No frames no source.
+  if (this.exec_state_.frameCount() == 0) {
+    return response.failed('No frames');
+  }
+
+  // With no arguments just keep the selected frame.
+  if (request.arguments) {
+    var index = request.arguments.number;
+    if (index < 0 || this.exec_state_.frameCount() <= index) {
+      return response.failed('Invalid frame number');
+    }
+
+    this.exec_state_.setSelectedFrame(request.arguments.number);
+  }
+  response.body = this.exec_state_.frame();
+};
+
+
+DebugCommandProcessor.prototype.frameForScopeRequest_ = function(request) {
+  // Get the frame for which the scope or scopes are requested. With no frameNumber
+  // argument use the currently selected frame.
+  if (request.arguments && !IS_UNDEFINED(request.arguments.frameNumber)) {
+    frame_index = request.arguments.frameNumber;
+    if (frame_index < 0 || this.exec_state_.frameCount() <= frame_index) {
+      return response.failed('Invalid frame number');
+    }
+    return this.exec_state_.frame(frame_index);
+  } else {
+    return this.exec_state_.frame();
+  }
+}
+
+
+DebugCommandProcessor.prototype.scopesRequest_ = function(request, response) {
+  // No frames no scopes.
+  if (this.exec_state_.frameCount() == 0) {
+    return response.failed('No scopes');
+  }
+
+  // Get the frame for which the scopes are requested.
+  var frame = this.frameForScopeRequest_(request);
+
+  // Fill all scopes for this frame.
+  var total_scopes = frame.scopeCount();
+  var scopes = [];
+  for (var i = 0; i < total_scopes; i++) {
+    scopes.push(frame.scope(i));
+  }
+  response.body = {
+    fromScope: 0,
+    toScope: total_scopes,
+    totalScopes: total_scopes,
+    scopes: scopes
+  }
+};
+
+
+DebugCommandProcessor.prototype.scopeRequest_ = function(request, response) {
+  // No frames no scopes.
+  if (this.exec_state_.frameCount() == 0) {
+    return response.failed('No scopes');
+  }
+
+  // Get the frame for which the scope is requested.
+  var frame = this.frameForScopeRequest_(request);
+
+  // With no scope argument just return top scope.
+  var scope_index = 0;
+  if (request.arguments && !IS_UNDEFINED(request.arguments.number)) {
+    scope_index = %ToNumber(request.arguments.number);
+    if (scope_index < 0 || frame.scopeCount() <= scope_index) {
+      return response.failed('Invalid scope number');
+    }
+  }
+
+  response.body = frame.scope(scope_index);
+};
+
+
+DebugCommandProcessor.prototype.evaluateRequest_ = function(request, response) {
+  if (!request.arguments) {
+    return response.failed('Missing arguments');
+  }
+
+  // Pull out arguments.
+  var expression = request.arguments.expression;
+  var frame = request.arguments.frame;
+  var global = request.arguments.global;
+  var disable_break = request.arguments.disable_break;
+
+  // The expression argument could be an integer so we convert it to a
+  // string.
+  try {
+    expression = String(expression);
+  } catch(e) {
+    return response.failed('Failed to convert expression argument to string');
+  }
+
+  // Check for legal arguments.
+  if (!IS_UNDEFINED(frame) && global) {
+    return response.failed('Arguments "frame" and "global" are exclusive');
+  }
+
+  // Global evaluate.
+  if (global) {
+    // Evaluate in the global context.
+    response.body =
+        this.exec_state_.evaluateGlobal(expression, Boolean(disable_break));
+    return;
+  }
+
+  // Default value for disable_break is true.
+  if (IS_UNDEFINED(disable_break)) {
+    disable_break = true;
+  }
+
+  // No frames no evaluate in frame.
+  if (this.exec_state_.frameCount() == 0) {
+    return response.failed('No frames');
+  }
+
+  // Check whether a frame was specified.
+  if (!IS_UNDEFINED(frame)) {
+    var frame_number = %ToNumber(frame);
+    if (frame_number < 0 || frame_number >= this.exec_state_.frameCount()) {
+      return response.failed('Invalid frame "' + frame + '"');
+    }
+    // Evaluate in the specified frame.
+    response.body = this.exec_state_.frame(frame_number).evaluate(
+        expression, Boolean(disable_break));
+    return;
+  } else {
+    // Evaluate in the selected frame.
+    response.body = this.exec_state_.frame().evaluate(
+        expression, Boolean(disable_break));
+    return;
+  }
+};
+
+
+DebugCommandProcessor.prototype.lookupRequest_ = function(request, response) {
+  if (!request.arguments) {
+    return response.failed('Missing arguments');
+  }
+
+  // Pull out arguments.
+  var handles = request.arguments.handles;
+
+  // Check for legal arguments.
+  if (IS_UNDEFINED(handles)) {
+    return response.failed('Argument "handles" missing');
+  }
+
+  // Set 'includeSource' option for script lookup.
+  if (!IS_UNDEFINED(request.arguments.includeSource)) {
+    includeSource = %ToBoolean(request.arguments.includeSource);
+    response.setOption('includeSource', includeSource);
+  }
+
+  // Lookup handles.
+  var mirrors = {};
+  for (var i = 0; i < handles.length; i++) {
+    var handle = handles[i];
+    var mirror = LookupMirror(handle);
+    if (!mirror) {
+      return response.failed('Object #' + handle + '# not found');
+    }
+    mirrors[handle] = mirror;
+  }
+  response.body = mirrors;
+};
+
+
+DebugCommandProcessor.prototype.referencesRequest_ =
+    function(request, response) {
+  if (!request.arguments) {
+    return response.failed('Missing arguments');
+  }
+
+  // Pull out arguments.
+  var type = request.arguments.type;
+  var handle = request.arguments.handle;
+
+  // Check for legal arguments.
+  if (IS_UNDEFINED(type)) {
+    return response.failed('Argument "type" missing');
+  }
+  if (IS_UNDEFINED(handle)) {
+    return response.failed('Argument "handle" missing');
+  }
+  if (type != 'referencedBy' && type != 'constructedBy') {
+    return response.failed('Invalid type "' + type + '"');
+  }
+
+  // Lookup handle and return objects with references the object.
+  var mirror = LookupMirror(handle);
+  if (mirror) {
+    if (type == 'referencedBy') {
+      response.body = mirror.referencedBy();
+    } else {
+      response.body = mirror.constructedBy();
+    }
+  } else {
+    return response.failed('Object #' + handle + '# not found');
+  }
+};
+
+
+DebugCommandProcessor.prototype.sourceRequest_ = function(request, response) {
+  // No frames no source.
+  if (this.exec_state_.frameCount() == 0) {
+    return response.failed('No source');
+  }
+
+  var from_line;
+  var to_line;
+  var frame = this.exec_state_.frame();
+  if (request.arguments) {
+    // Pull out arguments.
+    from_line = request.arguments.fromLine;
+    to_line = request.arguments.toLine;
+
+    if (!IS_UNDEFINED(request.arguments.frame)) {
+      var frame_number = %ToNumber(request.arguments.frame);
+      if (frame_number < 0 || frame_number >= this.exec_state_.frameCount()) {
+        return response.failed('Invalid frame "' + frame + '"');
+      }
+      frame = this.exec_state_.frame(frame_number);
+    }
+  }
+
+  // Get the script selected.
+  var script = frame.func().script();
+  if (!script) {
+    return response.failed('No source');
+  }
+
+  // Get the source slice and fill it into the response.
+  var slice = script.sourceSlice(from_line, to_line);
+  if (!slice) {
+    return response.failed('Invalid line interval');
+  }
+  response.body = {};
+  response.body.source = slice.sourceText();
+  response.body.fromLine = slice.from_line;
+  response.body.toLine = slice.to_line;
+  response.body.fromPosition = slice.from_position;
+  response.body.toPosition = slice.to_position;
+  response.body.totalLines = script.lineCount();
+};
+
+
+DebugCommandProcessor.prototype.scriptsRequest_ = function(request, response) {
+  var types = ScriptTypeFlag(Debug.ScriptType.Normal);
+  var includeSource = false;
+  var idsToInclude = null;
+  if (request.arguments) {
+    // Pull out arguments.
+    if (!IS_UNDEFINED(request.arguments.types)) {
+      types = %ToNumber(request.arguments.types);
+      if (isNaN(types) || types < 0) {
+        return response.failed('Invalid types "' + request.arguments.types + '"');
+      }
+    }
+    
+    if (!IS_UNDEFINED(request.arguments.includeSource)) {
+      includeSource = %ToBoolean(request.arguments.includeSource);
+      response.setOption('includeSource', includeSource);
+    }
+    
+    if (IS_ARRAY(request.arguments.ids)) {
+      idsToInclude = {};
+      var ids = request.arguments.ids;
+      for (var i = 0; i < ids.length; i++) {
+        idsToInclude[ids[i]] = true;
+      }
+    }
+  }
+
+  // Collect all scripts in the heap.
+  var scripts = %DebugGetLoadedScripts();
+
+  response.body = [];
+
+  for (var i = 0; i < scripts.length; i++) {
+    if (idsToInclude && !idsToInclude[scripts[i].id]) {
+      continue;
+    }
+    if (types & ScriptTypeFlag(scripts[i].type)) {
+      response.body.push(MakeMirror(scripts[i]));
+    }
+  }
+};
+
+
+DebugCommandProcessor.prototype.threadsRequest_ = function(request, response) {
+  // Get the number of threads.
+  var total_threads = this.exec_state_.threadCount();
+
+  // Get information for all threads.
+  var threads = [];
+  for (var i = 0; i < total_threads; i++) {
+    var details = %GetThreadDetails(this.exec_state_.break_id, i);
+    var thread_info = { current: details[0],
+                        id: details[1]
+                      }
+    threads.push(thread_info);
+  }
+
+  // Create the response body.
+  response.body = {
+    totalThreads: total_threads,
+    threads: threads
+  }
+};
+
+
+DebugCommandProcessor.prototype.suspendRequest_ = function(request, response) {
+  response.running = false;
+};
+
+
+DebugCommandProcessor.prototype.versionRequest_ = function(request, response) {
+  response.body = {
+    V8Version: %GetV8Version()
+  }
+};
+
+
+DebugCommandProcessor.prototype.profileRequest_ = function(request, response) {
+  if (!request.arguments) {
+    return response.failed('Missing arguments');
+  }
+  var modules = parseInt(request.arguments.modules);
+  if (isNaN(modules)) {
+    return response.failed('Modules is not an integer');
+  }
+  if (request.arguments.command == 'resume') {
+    %ProfilerResume(modules);
+  } else if (request.arguments.command == 'pause') {
+    %ProfilerPause(modules);
+  } else {
+    return response.failed('Unknown command');
+  }
+  response.body = {};
+};
+
+
+// Check whether the previously processed command caused the VM to become
+// running.
+DebugCommandProcessor.prototype.isRunning = function() {
+  return this.running_;
+}
+
+
+DebugCommandProcessor.prototype.systemBreak = function(cmd, args) {
+  return %SystemBreak();
+};
+
+
+function NumberToHex8Str(n) {
+  var r = "";
+  for (var i = 0; i < 8; ++i) {
+    var c = hexCharArray[n & 0x0F];  // hexCharArray is defined in uri.js
+    r = c + r;
+    n = n >>> 4;
+  }
+  return r;
+};
+
+DebugCommandProcessor.prototype.formatCFrames = function(cframes_value) {
+  var result = "";
+  if (cframes_value == null || cframes_value.length == 0) {
+    result += "(stack empty)";
+  } else {
+    for (var i = 0; i < cframes_value.length; ++i) {
+      if (i != 0) result += "\n";
+      result += this.formatCFrame(cframes_value[i]);
+    }
+  }
+  return result;
+};
+
+
+DebugCommandProcessor.prototype.formatCFrame = function(cframe_value) {
+  var result = "";
+  result += "0x" + NumberToHex8Str(cframe_value.address);
+  if (!IS_UNDEFINED(cframe_value.text)) {
+    result += " " + cframe_value.text;
+  }
+  return result;
+}
+
+
+/**
+ * Convert an Object to its debugger protocol representation. The representation
+ * may be serilized to a JSON object using JSON.stringify().
+ * This implementation simply runs through all string property names, converts
+ * each property value to a protocol value and adds the property to the result
+ * object. For type "object" the function will be called recursively. Note that
+ * circular structures will cause infinite recursion.
+ * @param {Object} object The object to format as protocol object.
+ * @param {MirrorSerializer} mirror_serializer The serializer to use if any
+ *     mirror objects are encountered.
+ * @return {Object} Protocol object value.
+ */
+function ObjectToProtocolObject_(object, mirror_serializer) {
+  var content = {};
+  for (var key in object) {
+    // Only consider string keys.
+    if (typeof key == 'string') {
+      // Format the value based on its type.
+      var property_value_json = ValueToProtocolValue_(object[key],
+                                                      mirror_serializer);
+      // Add the property if relevant.
+      if (!IS_UNDEFINED(property_value_json)) {
+        content[key] = property_value_json;
+      }
+    }
+  }
+  
+  return content;
+}
+
+
+/**
+ * Convert an array to its debugger protocol representation. It will convert
+ * each array element to a protocol value.
+ * @param {Array} array The array to format as protocol array.
+ * @param {MirrorSerializer} mirror_serializer The serializer to use if any
+ *     mirror objects are encountered.
+ * @return {Array} Protocol array value.
+ */
+function ArrayToProtocolArray_(array, mirror_serializer) {
+  var json = [];
+  for (var i = 0; i < array.length; i++) {
+    json.push(ValueToProtocolValue_(array[i], mirror_serializer));
+  }
+  return json;
+}
+
+
+/**
+ * Convert a value to its debugger protocol representation. 
+ * @param {*} value The value to format as protocol value.
+ * @param {MirrorSerializer} mirror_serializer The serializer to use if any
+ *     mirror objects are encountered.
+ * @return {*} Protocol value.
+ */
+function ValueToProtocolValue_(value, mirror_serializer) {
+  // Format the value based on its type.
+  var json;
+  switch (typeof value) {
+    case 'object':
+      if (value instanceof Mirror) {
+        json = mirror_serializer.serializeValue(value);
+      } else if (IS_ARRAY(value)){
+        json = ArrayToProtocolArray_(value, mirror_serializer);
+      } else {
+        json = ObjectToProtocolObject_(value, mirror_serializer);
+      }
+      break;
+
+    case 'boolean':
+    case 'string':
+    case 'number':
+      json = value;
+      break
+
+    default:
+      json = null;
+  }
+  return json;
+}
diff --git a/src/debug.cc b/src/debug.cc
index fb9b23e..68f8d1e 100644
--- a/src/debug.cc
+++ b/src/debug.cc
@@ -690,7 +690,13 @@
   bool allow_natives_syntax = FLAG_allow_natives_syntax;
   FLAG_allow_natives_syntax = true;
   Handle<JSFunction> boilerplate;
-  boilerplate = Compiler::Compile(source_code, script_name, 0, 0, NULL, NULL);
+  boilerplate = Compiler::Compile(source_code,
+                                  script_name,
+                                  0,
+                                  0,
+                                  NULL,
+                                  NULL,
+                                  NATIVES_CODE);
   FLAG_allow_natives_syntax = allow_natives_syntax;
 
   // Silently ignore stack overflows during compilation.
@@ -720,7 +726,6 @@
 
   // Mark this script as native and return successfully.
   Handle<Script> script(Script::cast(function->shared()->script()));
-  script->set_type(Smi::FromInt(Script::TYPE_NATIVE));
   return true;
 }
 
@@ -1674,7 +1679,7 @@
 
   // Perform two GCs to get rid of all unreferenced scripts. The first GC gets
   // rid of all the cached script wrappers and the second gets rid of the
-  // scripts which is no longer referenced.
+  // scripts which are no longer referenced.
   Heap::CollectAllGarbage(false);
   Heap::CollectAllGarbage(false);
 
@@ -1987,7 +1992,7 @@
   // If debugging there might be script break points registered for this
   // script. Make sure that these break points are set.
 
-  // Get the function UpdateScriptBreakPoints (defined in debug-delay.js).
+  // Get the function UpdateScriptBreakPoints (defined in debug-debugger.js).
   Handle<Object> update_script_break_points =
       Handle<Object>(Debug::debug_context()->global()->GetProperty(
           *Factory::LookupAsciiSymbol("UpdateScriptBreakPoints")));
diff --git a/src/disassembler.cc b/src/disassembler.cc
index 50f3eb9..8473cd9 100644
--- a/src/disassembler.cc
+++ b/src/disassembler.cc
@@ -261,7 +261,7 @@
             ASSERT(code->major_key() == CodeStub::MajorKeyFromKey(key));
             out.AddFormatted(" %s, %s, ",
                              Code::Kind2String(kind),
-                             CodeStub::MajorName(code->major_key()));
+                             CodeStub::MajorName(code->major_key(), false));
             switch (code->major_key()) {
               case CodeStub::CallFunction:
                 out.AddFormatted("argc = %d", minor_key);
diff --git a/src/fast-codegen.cc b/src/fast-codegen.cc
index 4e6f259..36d7297 100644
--- a/src/fast-codegen.cc
+++ b/src/fast-codegen.cc
@@ -51,8 +51,7 @@
   } while (false)
 
 
-void FastCodeGenSyntaxChecker::Check(FunctionLiteral* fun,
-                                     CompilationInfo* info) {
+void FastCodeGenSyntaxChecker::Check(CompilationInfo* info) {
   info_ = info;
 
   // We do not specialize if we do not have a receiver or if it is not a
@@ -64,7 +63,7 @@
 
   // We do not support stack or heap slots (both of which require
   // allocation).
-  Scope* scope = fun->scope();
+  Scope* scope = info->scope();
   if (scope->num_stack_slots() > 0) {
     BAILOUT("Function has stack-allocated locals");
   }
@@ -76,8 +75,10 @@
   CHECK_BAILOUT;
 
   // We do not support empty function bodies.
-  if (fun->body()->is_empty()) BAILOUT("Function has an empty body");
-  VisitStatements(fun->body());
+  if (info->function()->body()->is_empty()) {
+    BAILOUT("Function has an empty body");
+  }
+  VisitStatements(info->function()->body());
 }
 
 
@@ -213,7 +214,16 @@
 void FastCodeGenSyntaxChecker::VisitVariableProxy(VariableProxy* expr) {
   // Only global variable references are supported.
   Variable* var = expr->var();
-  if (!var->is_global()) BAILOUT("Non-global variable");
+  if (!var->is_global() || var->is_this()) BAILOUT("Non-global variable");
+
+  // Check if the global variable is existing and non-deletable.
+  if (info()->has_global_object()) {
+    LookupResult lookup;
+    info()->global_object()->Lookup(*expr->name(), &lookup);
+    if (!lookup.IsValid() || !lookup.IsDontDelete()) {
+      BAILOUT("Non-existing or deletable global variable");
+    }
+  }
 }
 
 
@@ -332,24 +342,20 @@
 
 #define __ ACCESS_MASM(masm())
 
-Handle<Code> FastCodeGenerator::MakeCode(FunctionLiteral* fun,
-                                         Handle<Script> script,
-                                         bool is_eval,
-                                         CompilationInfo* info) {
+Handle<Code> FastCodeGenerator::MakeCode(CompilationInfo* info) {
   // Label the AST before calling MakeCodePrologue, so AST node numbers are
   // printed with the AST.
   AstLabeler labeler;
-  labeler.Label(fun);
-  info->set_has_this_properties(labeler.has_this_properties());
+  labeler.Label(info);
 
-  CodeGenerator::MakeCodePrologue(fun);
+  CodeGenerator::MakeCodePrologue(info);
 
   const int kInitialBufferSize = 4 * KB;
   MacroAssembler masm(NULL, kInitialBufferSize);
 
   // Generate the fast-path code.
-  FastCodeGenerator fast_cgen(&masm, script, is_eval);
-  fast_cgen.Generate(fun, info);
+  FastCodeGenerator fast_cgen(&masm);
+  fast_cgen.Generate(info);
   if (fast_cgen.HasStackOverflow()) {
     ASSERT(!Top::has_pending_exception());
     return Handle<Code>::null();
@@ -357,16 +363,16 @@
 
   // Generate the full code for the function in bailout mode, using the same
   // macro assembler.
-  CodeGenerator cgen(&masm, script, is_eval);
+  CodeGenerator cgen(&masm);
   CodeGeneratorScope scope(&cgen);
-  cgen.Generate(fun, CodeGenerator::SECONDARY, info);
+  cgen.Generate(info, CodeGenerator::SECONDARY);
   if (cgen.HasStackOverflow()) {
     ASSERT(!Top::has_pending_exception());
     return Handle<Code>::null();
   }
 
   Code::Flags flags = Code::ComputeFlags(Code::FUNCTION, NOT_IN_LOOP);
-  return CodeGenerator::MakeCodeEpilogue(fun, &masm, flags, script);
+  return CodeGenerator::MakeCodeEpilogue(&masm, flags, info);
 }
 
 
@@ -488,7 +494,16 @@
     SmartPointer<char> name = expr->name()->ToCString();
     PrintF("%d: t%d = Global(%s)\n", expr->num(), expr->num(), *name);
   }
-  EmitGlobalVariableLoad(expr->name());
+
+  // Check if we can compile a global variable load directly from the cell.
+  ASSERT(info()->has_global_object());
+  LookupResult lookup;
+  info()->global_object()->Lookup(*expr->name(), &lookup);
+  // We only support DontDelete properties for now.
+  ASSERT(lookup.IsValid());
+  ASSERT(lookup.IsDontDelete());
+  Handle<Object> cell(info()->global_object()->GetPropertyCell(&lookup));
+  EmitGlobalVariableLoad(cell);
 }
 
 
diff --git a/src/fast-codegen.h b/src/fast-codegen.h
index b40f6fb..cbcb5bf 100644
--- a/src/fast-codegen.h
+++ b/src/fast-codegen.h
@@ -42,7 +42,7 @@
       : info_(NULL), has_supported_syntax_(true) {
   }
 
-  void Check(FunctionLiteral* fun, CompilationInfo* info);
+  void Check(CompilationInfo* info);
 
   CompilationInfo* info() { return info_; }
   bool has_supported_syntax() { return has_supported_syntax_; }
@@ -65,29 +65,19 @@
 
 class FastCodeGenerator: public AstVisitor {
  public:
-  FastCodeGenerator(MacroAssembler* masm, Handle<Script> script, bool is_eval)
-      : masm_(masm),
-        script_(script),
-        is_eval_(is_eval),
-        function_(NULL),
-        info_(NULL) {
-  }
+  explicit FastCodeGenerator(MacroAssembler* masm) : masm_(masm), info_(NULL) {}
 
-  static Handle<Code> MakeCode(FunctionLiteral* fun,
-                               Handle<Script> script,
-                               bool is_eval,
-                               CompilationInfo* info);
+  static Handle<Code> MakeCode(CompilationInfo* info);
 
-  void Generate(FunctionLiteral* fun, CompilationInfo* info);
+  void Generate(CompilationInfo* compilation_info);
 
  private:
   MacroAssembler* masm() { return masm_; }
-  FunctionLiteral* function() { return function_; }
+  CompilationInfo* info() { return info_; }
   Label* bailout() { return &bailout_; }
 
-  bool has_receiver() { return !info_->receiver().is_null(); }
-  Handle<Object> receiver() { return info_->receiver(); }
-  bool has_this_properties() { return info_->has_this_properties(); }
+  FunctionLiteral* function() { return info_->function(); }
+  Scope* scope() { return info_->scope(); }
 
   // AST node visit functions.
 #define DECLARE_VISIT(type) virtual void Visit##type(type* node);
@@ -102,11 +92,13 @@
   // arm-r1}.  Emit a branch to the (single) bailout label if check fails.
   void EmitReceiverMapCheck();
 
-  // Emit code to load a global variable value into {is32-eax, x64-rax,
-  // arm-r0}.  Register {ia32-edx, x64-rdx, arm-r1} is preserved if it is
-  // holding the receiver and {is32-ecx, x64-rcx, arm-r2} is always
-  // clobbered.
-  void EmitGlobalVariableLoad(Handle<String> name);
+  // Emit code to check that the global object has the same map as the
+  // global object seen at compile time.
+  void EmitGlobalMapCheck();
+
+  // Emit code to load a global variable directly from a global
+  // property cell into {ia32-eax, x64-rax, arm-r0}.
+  void EmitGlobalVariableLoad(Handle<Object> cell);
 
   // Emit a store to an own property of this.  The stored value is expected
   // in {ia32-eax, x64-rax, arm-r0} and the receiver in {is32-edx, x64-rdx,
@@ -114,10 +106,7 @@
   void EmitThisPropertyStore(Handle<String> name);
 
   MacroAssembler* masm_;
-  Handle<Script> script_;
-  bool is_eval_;
 
-  FunctionLiteral* function_;
   CompilationInfo* info_;
 
   Label bailout_;
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index b57f2cb..9afdea4 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -116,11 +116,12 @@
             "enable use of SAHF instruction if available (X64 only)")
 DEFINE_bool(enable_vfp3, true,
             "enable use of VFP3 instructions if available (ARM only)")
+DEFINE_bool(enable_armv7, true,
+            "enable use of ARMv7 instructions if available (ARM only)")
 
 // bootstrapper.cc
 DEFINE_string(expose_natives_as, NULL, "expose natives in global object")
 DEFINE_string(expose_debug_as, NULL, "expose debug in global object")
-DEFINE_string(natives_file, NULL, "alternative natives file")
 DEFINE_bool(expose_gc, false, "expose gc extension")
 DEFINE_int(stack_trace_limit, 10, "number of stack frames to capture")
 
@@ -218,7 +219,7 @@
 // rewriter.cc
 DEFINE_bool(optimize_ast, true, "optimize the ast")
 
-// simulator-arm.cc
+// simulator-arm.cc and simulator-mips.cc
 DEFINE_bool(trace_sim, false, "trace simulator execution")
 DEFINE_int(stop_sim_at, 0, "Simulator stop after x number of instructions")
 
@@ -383,7 +384,7 @@
 DEFINE_bool(log_regexp, false, "Log regular expression execution.")
 DEFINE_bool(sliding_state_window, false,
             "Update sliding state window counters.")
-DEFINE_string(logfile, "v8.log", "Specify the name of the log file.")
+DEFINE_string(logfile, "/sdcard/v8.log", "Specify the name of the log file.")
 DEFINE_bool(oprofile, false, "Enable JIT agent for OProfile.")
 
 //
diff --git a/src/frames-inl.h b/src/frames-inl.h
index c5f2f1a..7221851 100644
--- a/src/frames-inl.h
+++ b/src/frames-inl.h
@@ -36,6 +36,8 @@
 #include "x64/frames-x64.h"
 #elif V8_TARGET_ARCH_ARM
 #include "arm/frames-arm.h"
+#elif V8_TARGET_ARCH_MIPS
+#include "mips/frames-mips.h"
 #else
 #error Unsupported target architecture.
 #endif
diff --git a/src/frames.cc b/src/frames.cc
index e56a2c8..0550740 100644
--- a/src/frames.cc
+++ b/src/frames.cc
@@ -355,6 +355,7 @@
 
 void StackFrame::Cook() {
   Code* code = this->code();
+  ASSERT(code->IsCode());
   for (StackHandlerIterator it(this, top_handler()); !it.done(); it.Advance()) {
     it.handler()->Cook(code);
   }
@@ -365,6 +366,7 @@
 
 void StackFrame::Uncook() {
   Code* code = this->code();
+  ASSERT(code->IsCode());
   for (StackHandlerIterator it(this, top_handler()); !it.done(); it.Advance()) {
     it.handler()->Uncook(code);
   }
diff --git a/src/full-codegen.cc b/src/full-codegen.cc
index 01714cb..22510e9 100644
--- a/src/full-codegen.cc
+++ b/src/full-codegen.cc
@@ -439,24 +439,23 @@
 
 #define __ ACCESS_MASM(masm())
 
-Handle<Code> FullCodeGenerator::MakeCode(FunctionLiteral* fun,
-                                         Handle<Script> script,
-                                         bool is_eval) {
+Handle<Code> FullCodeGenerator::MakeCode(CompilationInfo* info) {
+  Handle<Script> script = info->script();
   if (!script->IsUndefined() && !script->source()->IsUndefined()) {
     int len = String::cast(script->source())->length();
     Counters::total_full_codegen_source_size.Increment(len);
   }
-  CodeGenerator::MakeCodePrologue(fun);
+  CodeGenerator::MakeCodePrologue(info);
   const int kInitialBufferSize = 4 * KB;
   MacroAssembler masm(NULL, kInitialBufferSize);
-  FullCodeGenerator cgen(&masm, script, is_eval);
-  cgen.Generate(fun, PRIMARY);
+  FullCodeGenerator cgen(&masm);
+  cgen.Generate(info, PRIMARY);
   if (cgen.HasStackOverflow()) {
     ASSERT(!Top::has_pending_exception());
     return Handle<Code>::null();
   }
   Code::Flags flags = Code::ComputeFlags(Code::FUNCTION, NOT_IN_LOOP);
-  return CodeGenerator::MakeCodeEpilogue(fun, &masm, flags, script);
+  return CodeGenerator::MakeCodeEpilogue(&masm, flags, info);
 }
 
 
@@ -467,7 +466,7 @@
   // Adjust by a (parameter or local) base offset.
   switch (slot->type()) {
     case Slot::PARAMETER:
-      offset += (function_->scope()->num_parameters() + 1) * kPointerSize;
+      offset += (scope()->num_parameters() + 1) * kPointerSize;
       break;
     case Slot::LOCAL:
       offset += JavaScriptFrameConstants::kLocal0Offset;
@@ -520,7 +519,7 @@
           }
         } else {
           Handle<JSFunction> function =
-              Compiler::BuildBoilerplate(decl->fun(), script_, this);
+              Compiler::BuildBoilerplate(decl->fun(), script(), this);
           // Check for stack-overflow exception.
           if (HasStackOverflow()) return;
           array->set(j++, *function);
diff --git a/src/full-codegen.h b/src/full-codegen.h
index 6688ff7..96d0f3e 100644
--- a/src/full-codegen.h
+++ b/src/full-codegen.h
@@ -68,11 +68,9 @@
     SECONDARY
   };
 
-  FullCodeGenerator(MacroAssembler* masm, Handle<Script> script, bool is_eval)
+  explicit FullCodeGenerator(MacroAssembler* masm)
       : masm_(masm),
-        script_(script),
-        is_eval_(is_eval),
-        function_(NULL),
+        info_(NULL),
         nesting_stack_(NULL),
         loop_depth_(0),
         location_(kStack),
@@ -80,11 +78,9 @@
         false_label_(NULL) {
   }
 
-  static Handle<Code> MakeCode(FunctionLiteral* fun,
-                               Handle<Script> script,
-                               bool is_eval);
+  static Handle<Code> MakeCode(CompilationInfo* info);
 
-  void Generate(FunctionLiteral* fun, Mode mode);
+  void Generate(CompilationInfo* info, Mode mode);
 
  private:
   class Breakable;
@@ -408,6 +404,12 @@
   }
 
   MacroAssembler* masm() { return masm_; }
+
+  Handle<Script> script() { return info_->script(); }
+  bool is_eval() { return info_->is_eval(); }
+  FunctionLiteral* function() { return info_->function(); }
+  Scope* scope() { return info_->scope(); }
+
   static Register result_register();
   static Register context_register();
 
@@ -427,10 +429,7 @@
   void EmitLogicalOperation(BinaryOperation* expr);
 
   MacroAssembler* masm_;
-  Handle<Script> script_;
-  bool is_eval_;
-
-  FunctionLiteral* function_;
+  CompilationInfo* info_;
 
   Label return_label_;
   NestedStatement* nesting_stack_;
diff --git a/src/globals.h b/src/globals.h
index 39f6bcb..68d0bdc 100644
--- a/src/globals.h
+++ b/src/globals.h
@@ -46,6 +46,9 @@
 #elif defined(__ARMEL__)
 #define V8_HOST_ARCH_ARM 1
 #define V8_HOST_ARCH_32_BIT 1
+#elif defined(_MIPS_ARCH_MIPS32R2)
+#define V8_HOST_ARCH_MIPS 1
+#define V8_HOST_ARCH_32_BIT 1
 #else
 #error Your host architecture was not detected as supported by v8
 #endif
@@ -53,6 +56,7 @@
 #if defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_IA32)
 #define V8_TARGET_CAN_READ_UNALIGNED 1
 #elif V8_TARGET_ARCH_ARM
+#elif V8_TARGET_ARCH_MIPS
 #else
 #error Your target architecture is not supported by v8
 #endif
@@ -310,6 +314,10 @@
 
 enum VisitMode { VISIT_ALL, VISIT_ALL_IN_SCAVENGE, VISIT_ONLY_STRONG };
 
+// Flag indicating whether code is built in to the VM (one of the natives
+// files).
+enum NativesFlag { NOT_NATIVES_CODE, NATIVES_CODE };
+
 
 // A CodeDesc describes a buffer holding instructions and relocation
 // information. The instructions start at the beginning of the buffer
@@ -389,7 +397,7 @@
 // Type of properties.
 // Order of properties is significant.
 // Must fit in the BitField PropertyDetails::TypeField.
-// A copy of this is in mirror-delay.js.
+// A copy of this is in mirror-debugger.js.
 enum PropertyType {
   NORMAL              = 0,  // only in slow mode
   FIELD               = 1,  // only in fast mode
@@ -608,6 +616,7 @@
                   RDTSC = 4,   // x86
                   CPUID = 10,  // x86
                   VFP3 = 1,    // ARM
+                  ARMv7 = 2,   // ARM
                   SAHF = 0};   // x86
 
 } }  // namespace v8::internal
diff --git a/src/handles.cc b/src/handles.cc
index c66056e..05f561e 100644
--- a/src/handles.cc
+++ b/src/handles.cc
@@ -243,6 +243,15 @@
 }
 
 
+Handle<Object> SetNormalizedProperty(Handle<JSObject> object,
+                                     Handle<String> key,
+                                     Handle<Object> value,
+                                     PropertyDetails details) {
+  CALL_HEAP_FUNCTION(object->SetNormalizedProperty(*key, *value, details),
+                     Object);
+}
+
+
 Handle<Object> ForceDeleteProperty(Handle<JSObject> object,
                                    Handle<Object> key) {
   CALL_HEAP_FUNCTION(Runtime::ForceDeleteObjectProperty(object, key), Object);
@@ -686,7 +695,7 @@
 
 bool CompileLazyShared(Handle<SharedFunctionInfo> shared,
                        ClearExceptionFlag flag) {
-  CompilationInfo info(shared, Handle<Object>::null(), 0);
+  CompilationInfo info(shared);
   return CompileLazyHelper(&info, flag);
 }
 
@@ -694,8 +703,7 @@
 bool CompileLazy(Handle<JSFunction> function,
                  Handle<Object> receiver,
                  ClearExceptionFlag flag) {
-  Handle<SharedFunctionInfo> shared(function->shared());
-  CompilationInfo info(shared, receiver, 0);
+  CompilationInfo info(function, 0, receiver);
   bool result = CompileLazyHelper(&info, flag);
   LOG(FunctionCreateEvent(*function));
   return result;
@@ -705,8 +713,7 @@
 bool CompileLazyInLoop(Handle<JSFunction> function,
                        Handle<Object> receiver,
                        ClearExceptionFlag flag) {
-  Handle<SharedFunctionInfo> shared(function->shared());
-  CompilationInfo info(shared, receiver, 1);
+  CompilationInfo info(function, 1, receiver);
   bool result = CompileLazyHelper(&info, flag);
   LOG(FunctionCreateEvent(*function));
   return result;
@@ -747,87 +754,4 @@
   }
 }
 
-
-void LoadLazy(Handle<JSObject> obj, bool* pending_exception) {
-  HandleScope scope;
-  Handle<FixedArray> info(FixedArray::cast(obj->map()->constructor()));
-  int index = Smi::cast(info->get(0))->value();
-  ASSERT(index >= 0);
-  Handle<Context> compile_context(Context::cast(info->get(1)));
-  Handle<Context> function_context(Context::cast(info->get(2)));
-  Handle<Object> receiver(compile_context->global()->builtins());
-
-  Vector<const char> name = Natives::GetScriptName(index);
-
-  Handle<JSFunction> boilerplate;
-
-  if (!Bootstrapper::NativesCacheLookup(name, &boilerplate)) {
-    Handle<String> source_code = Bootstrapper::NativesSourceLookup(index);
-    Handle<String> script_name = Factory::NewStringFromAscii(name);
-    bool allow_natives_syntax = FLAG_allow_natives_syntax;
-    FLAG_allow_natives_syntax = true;
-    boilerplate = Compiler::Compile(source_code, script_name, 0, 0, NULL, NULL);
-    FLAG_allow_natives_syntax = allow_natives_syntax;
-    // If the compilation failed (possibly due to stack overflows), we
-    // should never enter the result in the natives cache. Instead we
-    // return from the function without marking the function as having
-    // been lazily loaded.
-    if (boilerplate.is_null()) {
-      *pending_exception = true;
-      return;
-    }
-    Bootstrapper::NativesCacheAdd(name, boilerplate);
-  }
-
-  // We shouldn't get here if compiling the script failed.
-  ASSERT(!boilerplate.is_null());
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-  // When the debugger running in its own context touches lazy loaded
-  // functions loading can be triggered. In that case ensure that the
-  // execution of the boilerplate is in the correct context.
-  SaveContext save;
-  if (!Debug::debug_context().is_null() &&
-      Top::context() == *Debug::debug_context()) {
-    Top::set_context(*compile_context);
-  }
-#endif
-
-  // Reset the lazy load data before running the script to make sure
-  // not to get recursive lazy loading.
-  obj->map()->set_needs_loading(false);
-  obj->map()->set_constructor(info->get(3));
-
-  // Run the script.
-  Handle<JSFunction> script_fun(
-      Factory::NewFunctionFromBoilerplate(boilerplate, function_context));
-  Execution::Call(script_fun, receiver, 0, NULL, pending_exception);
-
-  // If lazy loading failed, restore the unloaded state of obj.
-  if (*pending_exception) {
-    obj->map()->set_needs_loading(true);
-    obj->map()->set_constructor(*info);
-  }
-}
-
-
-void SetupLazy(Handle<JSObject> obj,
-               int index,
-               Handle<Context> compile_context,
-               Handle<Context> function_context) {
-  Handle<FixedArray> arr = Factory::NewFixedArray(4);
-  arr->set(0, Smi::FromInt(index));
-  arr->set(1, *compile_context);  // Compile in this context
-  arr->set(2, *function_context);  // Set function context to this
-  arr->set(3, obj->map()->constructor());  // Remember the constructor
-  Handle<Map> old_map(obj->map());
-  Handle<Map> new_map = Factory::CopyMapDropTransitions(old_map);
-  obj->set_map(*new_map);
-  new_map->set_needs_loading(true);
-  // Store the lazy loading info in the constructor field.  We'll
-  // reestablish the constructor from the fixed array after loading.
-  new_map->set_constructor(*arr);
-  ASSERT(!obj->IsLoaded());
-}
-
 } }  // namespace v8::internal
diff --git a/src/handles.h b/src/handles.h
index 04f087b..caa9966 100644
--- a/src/handles.h
+++ b/src/handles.h
@@ -210,6 +210,11 @@
                                 Handle<Object> value,
                                 PropertyAttributes attributes);
 
+Handle<Object> SetNormalizedProperty(Handle<JSObject> object,
+                                     Handle<String> key,
+                                     Handle<Object> value,
+                                     PropertyDetails details);
+
 Handle<Object> ForceDeleteProperty(Handle<JSObject> object,
                                    Handle<Object> key);
 
@@ -330,13 +335,6 @@
 // Returns the lazy compilation stub for argc arguments.
 Handle<Code> ComputeLazyCompile(int argc);
 
-// These deal with lazily loaded properties.
-void SetupLazy(Handle<JSObject> obj,
-               int index,
-               Handle<Context> compile_context,
-               Handle<Context> function_context);
-void LoadLazy(Handle<JSObject> obj, bool* pending_exception);
-
 class NoHandleAllocation BASE_EMBEDDED {
  public:
 #ifndef DEBUG
diff --git a/src/heap.h b/src/heap.h
index cbf0b73..7e1a743 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -106,6 +106,7 @@
   V(FixedArray, single_character_string_cache, SingleCharacterStringCache)     \
   V(FixedArray, natives_source_cache, NativesSourceCache)                      \
   V(Object, last_script_id, LastScriptId)                                      \
+  V(Script, empty_script, EmptyScript)                                         \
   V(Smi, real_stack_limit, RealStackLimit)                                     \
 
 #if V8_TARGET_ARCH_ARM && V8_NATIVE_REGEXP
@@ -734,6 +735,10 @@
     roots_[kNonMonomorphicCacheRootIndex] = value;
   }
 
+  static void public_set_empty_script(Script* script) {
+    roots_[kEmptyScriptRootIndex] = script;
+  }
+
   // Update the next script id.
   static inline void SetLastScriptId(Object* last_script_id);
 
diff --git a/src/ia32/assembler-ia32.cc b/src/ia32/assembler-ia32.cc
index dc017ae..ffcefe0 100644
--- a/src/ia32/assembler-ia32.cc
+++ b/src/ia32/assembler-ia32.cc
@@ -267,7 +267,7 @@
 }
 
 // -----------------------------------------------------------------------------
-// Implementation of Assembler
+// Implementation of Assembler.
 
 // Emit a single byte. Must always be inlined.
 #define EMIT(x)                                 \
@@ -278,12 +278,12 @@
 static void InitCoverageLog();
 #endif
 
-// spare_buffer_
+// Spare buffer.
 byte* Assembler::spare_buffer_ = NULL;
 
 Assembler::Assembler(void* buffer, int buffer_size) {
   if (buffer == NULL) {
-    // do our own buffer management
+    // Do our own buffer management.
     if (buffer_size <= kMinimalBufferSize) {
       buffer_size = kMinimalBufferSize;
 
@@ -300,7 +300,7 @@
     buffer_size_ = buffer_size;
     own_buffer_ = true;
   } else {
-    // use externally provided buffer instead
+    // Use externally provided buffer instead.
     ASSERT(buffer_size > 0);
     buffer_ = static_cast<byte*>(buffer);
     buffer_size_ = buffer_size;
@@ -316,7 +316,7 @@
   }
 #endif
 
-  // setup buffer pointers
+  // Setup buffer pointers.
   ASSERT(buffer_ != NULL);
   pc_ = buffer_;
   reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
@@ -344,11 +344,10 @@
 
 
 void Assembler::GetCode(CodeDesc* desc) {
-  // finalize code
-  // (at this point overflow() may be true, but the gap ensures that
-  // we are still not overlapping instructions and relocation info)
-  ASSERT(pc_ <= reloc_info_writer.pos());  // no overlap
-  // setup desc
+  // Finalize code (at this point overflow() may be true, but the gap ensures
+  // that we are still not overlapping instructions and relocation info).
+  ASSERT(pc_ <= reloc_info_writer.pos());  // No overlap.
+  // Setup code descriptor.
   desc->buffer = buffer_;
   desc->buffer_size = buffer_size_;
   desc->instr_size = pc_offset();
@@ -435,7 +434,7 @@
 void Assembler::pop(Register dst) {
   ASSERT(reloc_info_writer.last_pc() != NULL);
   if (FLAG_push_pop_elimination && (reloc_info_writer.last_pc() <= last_pc_)) {
-    // (last_pc_ != NULL) is rolled into the above check
+    // (last_pc_ != NULL) is rolled into the above check.
     // If a last_pc_ is set, we need to make sure that there has not been any
     // relocation information generated between the last instruction and this
     // pop instruction.
@@ -461,7 +460,7 @@
       return;
     } else if (instr == 0xff) {  // push of an operand, convert to a move
       byte op1 = last_pc_[1];
-      // Check if the operation is really a push
+      // Check if the operation is really a push.
       if ((op1 & 0x38) == (6 << 3)) {
         op1 = (op1 & ~0x38) | static_cast<byte>(dst.code() << 3);
         last_pc_[0] = 0x8b;
@@ -747,7 +746,7 @@
   ASSERT(CpuFeatures::IsEnabled(CMOV));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
-  // Opcode: 0f 40 + cc /r
+  // Opcode: 0f 40 + cc /r.
   EMIT(0x0F);
   EMIT(0x40 + cc);
   emit_operand(dst, src);
@@ -765,7 +764,7 @@
 void Assembler::xchg(Register dst, Register src) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
-  if (src.is(eax) || dst.is(eax)) {  // Single-byte encoding
+  if (src.is(eax) || dst.is(eax)) {  // Single-byte encoding.
     EMIT(0x90 | (src.is(eax) ? dst.code() : src.code()));
   } else {
     EMIT(0x87);
@@ -1434,7 +1433,7 @@
       if (disp.type() == Displacement::UNCONDITIONAL_JUMP) {
         ASSERT(byte_at(fixup_pos - 1) == 0xE9);  // jmp expected
       }
-      // relative address, relative to point after address
+      // Relative address, relative to point after address.
       int imm32 = pos - (fixup_pos + sizeof(int32_t));
       long_at_put(fixup_pos, imm32);
     }
@@ -1449,7 +1448,7 @@
   last_pc_ = NULL;
   if (appendix->is_linked()) {
     if (L->is_linked()) {
-      // append appendix to L's list
+      // Append appendix to L's list.
       Label p;
       Label q = *L;
       do {
@@ -1462,7 +1461,7 @@
       disp_at_put(&p, disp);
       p.Unuse();  // to avoid assertion failure in ~Label
     } else {
-      // L is empty, simply use appendix
+      // L is empty, simply use appendix.
       *L = *appendix;
     }
   }
@@ -1485,11 +1484,11 @@
     const int long_size = 5;
     int offs = L->pos() - pc_offset();
     ASSERT(offs <= 0);
-    // 1110 1000 #32-bit disp
+    // 1110 1000 #32-bit disp.
     EMIT(0xE8);
     emit(offs - long_size);
   } else {
-    // 1110 1000 #32-bit disp
+    // 1110 1000 #32-bit disp.
     EMIT(0xE8);
     emit_disp(L, Displacement::OTHER);
   }
@@ -1532,16 +1531,16 @@
     int offs = L->pos() - pc_offset();
     ASSERT(offs <= 0);
     if (is_int8(offs - short_size)) {
-      // 1110 1011 #8-bit disp
+      // 1110 1011 #8-bit disp.
       EMIT(0xEB);
       EMIT((offs - short_size) & 0xFF);
     } else {
-      // 1110 1001 #32-bit disp
+      // 1110 1001 #32-bit disp.
       EMIT(0xE9);
       emit(offs - long_size);
     }
   } else {
-    // 1110 1001 #32-bit disp
+    // 1110 1001 #32-bit disp.
     EMIT(0xE9);
     emit_disp(L, Displacement::UNCONDITIONAL_JUMP);
   }
@@ -1611,7 +1610,7 @@
   last_pc_ = pc_;
   ASSERT((0 <= cc) && (cc < 16));
   if (FLAG_emit_branch_hints && hint != no_hint) EMIT(hint);
-  // 0000 1111 1000 tttn #32-bit disp
+  // 0000 1111 1000 tttn #32-bit disp.
   EMIT(0x0F);
   EMIT(0x80 | cc);
   emit(entry - (pc_ + sizeof(int32_t)), rmode);
@@ -1629,7 +1628,7 @@
 }
 
 
-// FPU instructions
+// FPU instructions.
 
 void Assembler::fld(int i) {
   EnsureSpace ensure_space(this);
@@ -2225,10 +2224,10 @@
 
 
 void Assembler::GrowBuffer() {
-  ASSERT(overflow());  // should not call this otherwise
+  ASSERT(overflow());
   if (!own_buffer_) FATAL("external code buffer is too small");
 
-  // compute new buffer size
+  // Compute new buffer size.
   CodeDesc desc;  // the new buffer
   if (buffer_size_ < 4*KB) {
     desc.buffer_size = 4*KB;
@@ -2242,7 +2241,7 @@
     V8::FatalProcessOutOfMemory("Assembler::GrowBuffer");
   }
 
-  // setup new buffer
+  // Setup new buffer.
   desc.buffer = NewArray<byte>(desc.buffer_size);
   desc.instr_size = pc_offset();
   desc.reloc_size = (buffer_ + buffer_size_) - (reloc_info_writer.pos());
@@ -2253,14 +2252,14 @@
   memset(desc.buffer, 0xCC, desc.buffer_size);
 #endif
 
-  // copy the data
+  // Copy the data.
   int pc_delta = desc.buffer - buffer_;
   int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
   memmove(desc.buffer, buffer_, desc.instr_size);
   memmove(rc_delta + reloc_info_writer.pos(),
           reloc_info_writer.pos(), desc.reloc_size);
 
-  // switch buffers
+  // Switch buffers.
   if (spare_buffer_ == NULL && buffer_size_ == kMinimalBufferSize) {
     spare_buffer_ = buffer_;
   } else {
@@ -2275,7 +2274,7 @@
   reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
                                reloc_info_writer.last_pc() + pc_delta);
 
-  // relocate runtime entries
+  // Relocate runtime entries.
   for (RelocIterator it(desc); !it.done(); it.next()) {
     RelocInfo::Mode rmode = it.rinfo()->rmode();
     if (rmode == RelocInfo::RUNTIME_ENTRY) {
diff --git a/src/ia32/assembler-ia32.h b/src/ia32/assembler-ia32.h
index 9ce0734..f0cf4f1 100644
--- a/src/ia32/assembler-ia32.h
+++ b/src/ia32/assembler-ia32.h
@@ -77,7 +77,7 @@
     return 1 << code_;
   }
 
-  // (unfortunately we can't make this private in a struct)
+  // Unfortunately we can't make this private in a struct.
   int code_;
 };
 
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index fe91903..d0fbabb 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -103,14 +103,10 @@
 // -------------------------------------------------------------------------
 // CodeGenerator implementation
 
-CodeGenerator::CodeGenerator(MacroAssembler* masm,
-                             Handle<Script> script,
-                             bool is_eval)
-    : is_eval_(is_eval),
-      script_(script),
-      deferred_(8),
+CodeGenerator::CodeGenerator(MacroAssembler* masm)
+    : deferred_(8),
       masm_(masm),
-      scope_(NULL),
+      info_(NULL),
       frame_(NULL),
       allocator_(NULL),
       state_(NULL),
@@ -120,23 +116,21 @@
 }
 
 
+Scope* CodeGenerator::scope() { return info_->function()->scope(); }
+
+
 // Calling conventions:
 // ebp: caller's frame pointer
 // esp: stack pointer
 // edi: called JS function
 // esi: callee's context
 
-void CodeGenerator::Generate(FunctionLiteral* fun,
-                             Mode mode,
-                             CompilationInfo* info) {
+void CodeGenerator::Generate(CompilationInfo* info, Mode mode) {
   // Record the position for debugging purposes.
-  CodeForFunctionPosition(fun);
-
-  ZoneList<Statement*>* body = fun->body();
+  CodeForFunctionPosition(info->function());
 
   // Initialize state.
-  ASSERT(scope_ == NULL);
-  scope_ = fun->scope();
+  info_ = info;
   ASSERT(allocator_ == NULL);
   RegisterAllocator register_allocator(this);
   allocator_ = &register_allocator;
@@ -151,7 +145,7 @@
 
 #ifdef DEBUG
   if (strlen(FLAG_stop_at) > 0 &&
-      fun->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
+      info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
     frame_->SpillAll();
     __ int3();
   }
@@ -177,7 +171,7 @@
       frame_->AllocateStackSlots();
 
       // Allocate the local context if needed.
-      int heap_slots = scope_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+      int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
       if (heap_slots > 0) {
         Comment cmnt(masm_, "[ allocate local context");
         // Allocate local context.
@@ -207,7 +201,6 @@
       // 3) don't copy parameter operand code from SlotOperand!
       {
         Comment cmnt2(masm_, "[ copy context parameters into .context");
-
         // Note that iteration order is relevant here! If we have the same
         // parameter twice (e.g., function (x, y, x)), and that parameter
         // needs to be copied into the context, it must be the last argument
@@ -216,15 +209,15 @@
         // order: such a parameter is copied repeatedly into the same
         // context location and thus the last value is what is seen inside
         // the function.
-        for (int i = 0; i < scope_->num_parameters(); i++) {
-          Variable* par = scope_->parameter(i);
+        for (int i = 0; i < scope()->num_parameters(); i++) {
+          Variable* par = scope()->parameter(i);
           Slot* slot = par->slot();
           if (slot != NULL && slot->type() == Slot::CONTEXT) {
             // The use of SlotOperand below is safe in unspilled code
             // because the slot is guaranteed to be a context slot.
             //
             // There are no parameters in the global scope.
-            ASSERT(!scope_->is_global_scope());
+            ASSERT(!scope()->is_global_scope());
             frame_->PushParameterAt(i);
             Result value = frame_->Pop();
             value.ToRegister();
@@ -252,9 +245,9 @@
       }
 
       // Initialize ThisFunction reference if present.
-      if (scope_->is_function_scope() && scope_->function() != NULL) {
+      if (scope()->is_function_scope() && scope()->function() != NULL) {
         frame_->Push(Factory::the_hole_value());
-        StoreToSlot(scope_->function()->slot(), NOT_CONST_INIT);
+        StoreToSlot(scope()->function()->slot(), NOT_CONST_INIT);
       }
     } else {
       // When used as the secondary compiler for splitting, ebp, esi,
@@ -272,12 +265,12 @@
     // Generate code to 'execute' declarations and initialize functions
     // (source elements). In case of an illegal redeclaration we need to
     // handle that instead of processing the declarations.
-    if (scope_->HasIllegalRedeclaration()) {
+    if (scope()->HasIllegalRedeclaration()) {
       Comment cmnt(masm_, "[ illegal redeclarations");
-      scope_->VisitIllegalRedeclaration(this);
+      scope()->VisitIllegalRedeclaration(this);
     } else {
       Comment cmnt(masm_, "[ declarations");
-      ProcessDeclarations(scope_->declarations());
+      ProcessDeclarations(scope()->declarations());
       // Bail out if a stack-overflow exception occurred when processing
       // declarations.
       if (HasStackOverflow()) return;
@@ -292,7 +285,7 @@
     // Compile the body of the function in a vanilla state. Don't
     // bother compiling all the code if the scope has an illegal
     // redeclaration.
-    if (!scope_->HasIllegalRedeclaration()) {
+    if (!scope()->HasIllegalRedeclaration()) {
       Comment cmnt(masm_, "[ function body");
 #ifdef DEBUG
       bool is_builtin = Bootstrapper::IsActive();
@@ -303,14 +296,14 @@
         // Ignore the return value.
       }
 #endif
-      VisitStatements(body);
+      VisitStatements(info->function()->body());
 
       // Handle the return from the function.
       if (has_valid_frame()) {
         // If there is a valid frame, control flow can fall off the end of
         // the body.  In that case there is an implicit return statement.
         ASSERT(!function_return_is_shadowed_);
-        CodeForReturnPosition(fun);
+        CodeForReturnPosition(info->function());
         frame_->PrepareForReturn();
         Result undefined(Factory::undefined_value());
         if (function_return_.is_bound()) {
@@ -353,7 +346,6 @@
   // There is no need to delete the register allocator, it is a
   // stack-allocated local.
   allocator_ = NULL;
-  scope_ = NULL;
 }
 
 
@@ -590,13 +582,13 @@
 }
 
 
-ArgumentsAllocationMode CodeGenerator::ArgumentsMode() const {
-  if (scope_->arguments() == NULL) return NO_ARGUMENTS_ALLOCATION;
-  ASSERT(scope_->arguments_shadow() != NULL);
+ArgumentsAllocationMode CodeGenerator::ArgumentsMode() {
+  if (scope()->arguments() == NULL) return NO_ARGUMENTS_ALLOCATION;
+  ASSERT(scope()->arguments_shadow() != NULL);
   // We don't want to do lazy arguments allocation for functions that
   // have heap-allocated contexts, because it interfers with the
   // uninitialized const tracking in the context objects.
-  return (scope_->num_heap_slots() > 0)
+  return (scope()->num_heap_slots() > 0)
       ? EAGER_ARGUMENTS_ALLOCATION
       : LAZY_ARGUMENTS_ALLOCATION;
 }
@@ -616,13 +608,13 @@
     ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
     frame_->PushFunction();
     frame_->PushReceiverSlotAddress();
-    frame_->Push(Smi::FromInt(scope_->num_parameters()));
+    frame_->Push(Smi::FromInt(scope()->num_parameters()));
     Result result = frame_->CallStub(&stub, 3);
     frame_->Push(&result);
   }
 
-  Variable* arguments = scope_->arguments()->var();
-  Variable* shadow = scope_->arguments_shadow()->var();
+  Variable* arguments = scope()->arguments()->var();
+  Variable* shadow = scope()->arguments_shadow()->var();
   ASSERT(arguments != NULL && arguments->slot() != NULL);
   ASSERT(shadow != NULL && shadow->slot() != NULL);
   JumpTarget done;
@@ -2344,7 +2336,7 @@
   // Load the receiver and the existing arguments object onto the
   // expression stack. Avoid allocating the arguments object here.
   Load(receiver);
-  LoadFromSlot(scope_->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
+  LoadFromSlot(scope()->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
 
   // Emit the source position information after having loaded the
   // receiver and the arguments.
@@ -2424,8 +2416,8 @@
       __ j(equal, &adapted);
 
       // No arguments adaptor frame. Copy fixed number of arguments.
-      __ mov(eax, Immediate(scope_->num_parameters()));
-      for (int i = 0; i < scope_->num_parameters(); i++) {
+      __ mov(eax, Immediate(scope()->num_parameters()));
+      for (int i = 0; i < scope()->num_parameters(); i++) {
         __ push(frame_->ParameterAt(i));
       }
       __ jmp(&invoke);
@@ -2831,7 +2823,7 @@
   // Leave the frame and return popping the arguments and the
   // receiver.
   frame_->Exit();
-  masm_->ret((scope_->num_parameters() + 1) * kPointerSize);
+  masm_->ret((scope()->num_parameters() + 1) * kPointerSize);
   DeleteFrame();
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
@@ -3952,7 +3944,7 @@
 
   // Build the function boilerplate and instantiate it.
   Handle<JSFunction> boilerplate =
-      Compiler::BuildBoilerplate(node, script_, this);
+      Compiler::BuildBoilerplate(node, script(), this);
   // Check for stack-overflow exception.
   if (HasStackOverflow()) return;
   InstantiateBoilerplate(boilerplate);
@@ -5277,7 +5269,7 @@
   ASSERT(args->length() == 0);
   // ArgumentsAccessStub takes the parameter count as an input argument
   // in register eax.  Create a constant result for it.
-  Result count(Handle<Smi>(Smi::FromInt(scope_->num_parameters())));
+  Result count(Handle<Smi>(Smi::FromInt(scope()->num_parameters())));
   // Call the shared stub to get to the arguments.length.
   ArgumentsAccessStub stub(ArgumentsAccessStub::READ_LENGTH);
   Result result = frame_->CallStub(&stub, &count);
@@ -5424,7 +5416,7 @@
   Load(args->at(0));
   Result key = frame_->Pop();
   // Explicitly create a constant result.
-  Result count(Handle<Smi>(Smi::FromInt(scope_->num_parameters())));
+  Result count(Handle<Smi>(Smi::FromInt(scope()->num_parameters())));
   // Call the shared stub to get to arguments[key].
   ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
   Result result = frame_->CallStub(&stub, &key, &count);
@@ -9967,7 +9959,8 @@
          Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
   // Load string argument and locate character of sub string start.
   __ mov(esi, Operand(esp, 3 * kPointerSize));
-  __ add(Operand(esi), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  __ add(Operand(esi),
+         Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
   __ mov(ebx, Operand(esp, 2 * kPointerSize));  // from
   // As from is a smi it is 2 times the value which matches the size of a two
   // byte character.
diff --git a/src/ia32/codegen-ia32.h b/src/ia32/codegen-ia32.h
index 956f424..843bbf7 100644
--- a/src/ia32/codegen-ia32.h
+++ b/src/ia32/codegen-ia32.h
@@ -305,19 +305,15 @@
 
   // Takes a function literal, generates code for it. This function should only
   // be called by compiler.cc.
-  static Handle<Code> MakeCode(FunctionLiteral* fun,
-                               Handle<Script> script,
-                               bool is_eval,
-                               CompilationInfo* info);
+  static Handle<Code> MakeCode(CompilationInfo* info);
 
   // Printing of AST, etc. as requested by flags.
-  static void MakeCodePrologue(FunctionLiteral* fun);
+  static void MakeCodePrologue(CompilationInfo* info);
 
   // Allocate and install the code.
-  static Handle<Code> MakeCodeEpilogue(FunctionLiteral* fun,
-                                       MacroAssembler* masm,
+  static Handle<Code> MakeCodeEpilogue(MacroAssembler* masm,
                                        Code::Flags flags,
-                                       Handle<Script> script);
+                                       CompilationInfo* info);
 
 #ifdef ENABLE_LOGGING_AND_PROFILING
   static bool ShouldGenerateLog(Expression* type);
@@ -328,7 +324,7 @@
   // Accessors
   MacroAssembler* masm() { return masm_; }
   VirtualFrame* frame() const { return frame_; }
-  Handle<Script> script() { return script_; }
+  inline Handle<Script> script();
 
   bool has_valid_frame() const { return frame_ != NULL; }
 
@@ -352,11 +348,11 @@
 
  private:
   // Construction/Destruction
-  CodeGenerator(MacroAssembler* masm, Handle<Script> script, bool is_eval);
+  explicit CodeGenerator(MacroAssembler* masm);
 
   // Accessors
-  Scope* scope() const { return scope_; }
-  bool is_eval() { return is_eval_; }
+  inline bool is_eval();
+  Scope* scope();
 
   // Generating deferred code.
   void ProcessDeferred();
@@ -388,7 +384,7 @@
   void VisitStatementsAndSpill(ZoneList<Statement*>* statements);
 
   // Main code generation function
-  void Generate(FunctionLiteral* fun, Mode mode, CompilationInfo* info);
+  void Generate(CompilationInfo* info, Mode mode);
 
   // Generate the return sequence code.  Should be called no more than
   // once per compiled function, immediately after binding the return
@@ -396,7 +392,7 @@
   void GenerateReturnSequence(Result* return_value);
 
   // Returns the arguments allocation mode.
-  ArgumentsAllocationMode ArgumentsMode() const;
+  ArgumentsAllocationMode ArgumentsMode();
 
   // Store the arguments object and allocate it if necessary.
   Result StoreArgumentsObject(bool initial);
@@ -607,15 +603,14 @@
   bool HasValidEntryRegisters();
 #endif
 
-  bool is_eval_;  // Tells whether code is generated for eval.
-  Handle<Script> script_;
   ZoneList<DeferredCode*> deferred_;
 
   // Assembler
   MacroAssembler* masm_;  // to generate code
 
+  CompilationInfo* info_;
+
   // Code generation state
-  Scope* scope_;
   VirtualFrame* frame_;
   RegisterAllocator* allocator_;
   CodeGenState* state_;
@@ -750,13 +745,6 @@
 };
 
 
-// Flag that indicates how to generate code for the stub StringAddStub.
-enum StringAddFlags {
-  NO_STRING_ADD_FLAGS = 0,
-  NO_STRING_CHECK_IN_STUB = 1 << 0  // Omit string check in stub.
-};
-
-
 class StringStubBase: public CodeStub {
  public:
   // Generate code for copying characters using a simple loop. This should only
@@ -782,6 +770,13 @@
 };
 
 
+// Flag that indicates how to generate code for the stub StringAddStub.
+enum StringAddFlags {
+  NO_STRING_ADD_FLAGS = 0,
+  NO_STRING_CHECK_IN_STUB = 1 << 0  // Omit string check in stub.
+};
+
+
 class StringAddStub: public StringStubBase {
  public:
   explicit StringAddStub(StringAddFlags flags) {
diff --git a/src/ia32/fast-codegen-ia32.cc b/src/ia32/fast-codegen-ia32.cc
index 2a15733..126f96b 100644
--- a/src/ia32/fast-codegen-ia32.cc
+++ b/src/ia32/fast-codegen-ia32.cc
@@ -48,42 +48,47 @@
     PrintF("MapCheck(this)\n");
   }
 
-  EmitLoadReceiver(edx);
-  __ test(edx, Immediate(kSmiTagMask));
-  __ j(zero, bailout());
-
-  ASSERT(has_receiver() && receiver()->IsHeapObject());
-  Handle<HeapObject> object = Handle<HeapObject>::cast(receiver());
+  ASSERT(info()->has_receiver() && info()->receiver()->IsHeapObject());
+  Handle<HeapObject> object = Handle<HeapObject>::cast(info()->receiver());
   Handle<Map> map(object->map());
-  __ cmp(FieldOperand(edx, HeapObject::kMapOffset), Immediate(map));
-  __ j(not_equal, bailout());
+
+  EmitLoadReceiver(edx);
+  __ CheckMap(edx, map, bailout(), false);
 }
 
 
-void FastCodeGenerator::EmitGlobalVariableLoad(Handle<String> name) {
-  // Compile global variable accesses as load IC calls.  The only live
-  // registers are esi (context) and possibly edx (this).  Both are also
-  // saved in the stack and esi is preserved by the call.
-  __ push(CodeGenerator::GlobalObject());
-  __ mov(ecx, name);
-  Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
-  __ call(ic, RelocInfo::CODE_TARGET_CONTEXT);
-  if (has_this_properties()) {
-    // Restore this.
-    EmitLoadReceiver(edx);
-  } else {
-    __ nop();  // Not test eax, indicates IC has no inlined code at call site.
+void FastCodeGenerator::EmitGlobalMapCheck() {
+  Comment cmnt(masm(), ";; GlobalMapCheck");
+  if (FLAG_print_ir) {
+    PrintF(";; GlobalMapCheck()");
+  }
+
+  ASSERT(info()->has_global_object());
+  Handle<Map> map(info()->global_object()->map());
+
+  __ mov(ebx, CodeGenerator::GlobalObject());
+  __ CheckMap(ebx, map, bailout(), true);
+}
+
+
+void FastCodeGenerator::EmitGlobalVariableLoad(Handle<Object> cell) {
+  ASSERT(cell->IsJSGlobalPropertyCell());
+  __ mov(eax, Immediate(cell));
+  __ mov(eax, FieldOperand(eax, JSGlobalPropertyCell::kValueOffset));
+  if (FLAG_debug_code) {
+    __ cmp(eax, Factory::the_hole_value());
+    __ Check(not_equal, "DontDelete cells can't contain the hole");
   }
 }
 
 
 void FastCodeGenerator::EmitThisPropertyStore(Handle<String> name) {
   LookupResult lookup;
-  receiver()->Lookup(*name, &lookup);
+  info()->receiver()->Lookup(*name, &lookup);
 
-  ASSERT(lookup.holder() == *receiver());
+  ASSERT(lookup.holder() == *info()->receiver());
   ASSERT(lookup.type() == FIELD);
-  Handle<Map> map(Handle<HeapObject>::cast(receiver())->map());
+  Handle<Map> map(Handle<HeapObject>::cast(info()->receiver())->map());
   int index = lookup.GetFieldIndex() - map->inobject_properties();
   int offset = index * kPointerSize;
 
@@ -103,11 +108,9 @@
 }
 
 
-void FastCodeGenerator::Generate(FunctionLiteral* fun, CompilationInfo* info) {
-  ASSERT(function_ == NULL);
+void FastCodeGenerator::Generate(CompilationInfo* compilation_info) {
   ASSERT(info_ == NULL);
-  function_ = fun;
-  info_ = info;
+  info_ = compilation_info;
 
   // Save the caller's frame pointer and set up our own.
   Comment prologue_cmnt(masm(), ";; Prologue");
@@ -119,9 +122,13 @@
   // point.
 
   // Receiver (this) is allocated to edx if there are this properties.
-  if (has_this_properties()) EmitReceiverMapCheck();
+  if (info()->has_this_properties()) EmitReceiverMapCheck();
 
-  VisitStatements(fun->body());
+  // If there is a global variable access check if the global object
+  // is the same as at lazy-compilation time.
+  if (info()->has_globals()) EmitGlobalMapCheck();
+
+  VisitStatements(function()->body());
 
   Comment return_cmnt(masm(), ";; Return(<undefined>)");
   __ mov(eax, Factory::undefined_value());
@@ -129,7 +136,7 @@
   Comment epilogue_cmnt(masm(), ";; Epilogue");
   __ mov(esp, ebp);
   __ pop(ebp);
-  __ ret((fun->scope()->num_parameters() + 1) * kPointerSize);
+  __ ret((scope()->num_parameters() + 1) * kPointerSize);
 
   __ bind(&bailout_);
 }
diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc
index 9f9ac56..3163b19 100644
--- a/src/ia32/full-codegen-ia32.cc
+++ b/src/ia32/full-codegen-ia32.cc
@@ -51,9 +51,10 @@
 //
 // The function builds a JS frame.  Please see JavaScriptFrameConstants in
 // frames-ia32.h for its layout.
-void FullCodeGenerator::Generate(FunctionLiteral* fun, Mode mode) {
-  function_ = fun;
-  SetFunctionPosition(fun);
+void FullCodeGenerator::Generate(CompilationInfo* info, Mode mode) {
+  ASSERT(info_ == NULL);
+  info_ = info;
+  SetFunctionPosition(function());
 
   if (mode == PRIMARY) {
     __ push(ebp);  // Caller's frame pointer.
@@ -62,7 +63,7 @@
     __ push(edi);  // Callee's JS Function.
 
     { Comment cmnt(masm_, "[ Allocate locals");
-      int locals_count = fun->scope()->num_stack_slots();
+      int locals_count = scope()->num_stack_slots();
       if (locals_count == 1) {
         __ push(Immediate(Factory::undefined_value()));
       } else if (locals_count > 1) {
@@ -76,7 +77,7 @@
     bool function_in_register = true;
 
     // Possibly allocate a local context.
-    if (fun->scope()->num_heap_slots() > 0) {
+    if (scope()->num_heap_slots() > 0) {
       Comment cmnt(masm_, "[ Allocate local context");
       // Argument to NewContext is the function, which is still in edi.
       __ push(edi);
@@ -87,9 +88,9 @@
       __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi);
 
       // Copy parameters into context if necessary.
-      int num_parameters = fun->scope()->num_parameters();
+      int num_parameters = scope()->num_parameters();
       for (int i = 0; i < num_parameters; i++) {
-        Slot* slot = fun->scope()->parameter(i)->slot();
+        Slot* slot = scope()->parameter(i)->slot();
         if (slot != NULL && slot->type() == Slot::CONTEXT) {
           int parameter_offset = StandardFrameConstants::kCallerSPOffset +
                                      (num_parameters - 1 - i) * kPointerSize;
@@ -107,7 +108,7 @@
       }
     }
 
-    Variable* arguments = fun->scope()->arguments()->AsVariable();
+    Variable* arguments = scope()->arguments()->AsVariable();
     if (arguments != NULL) {
       // Function uses arguments object.
       Comment cmnt(masm_, "[ Allocate arguments object");
@@ -117,10 +118,11 @@
         __ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
       }
       // Receiver is just before the parameters on the caller's stack.
-      __ lea(edx, Operand(ebp, StandardFrameConstants::kCallerSPOffset +
-                          fun->num_parameters() * kPointerSize));
+      int offset = scope()->num_parameters() * kPointerSize;
+      __ lea(edx,
+             Operand(ebp, StandardFrameConstants::kCallerSPOffset + offset));
       __ push(edx);
-      __ push(Immediate(Smi::FromInt(fun->num_parameters())));
+      __ push(Immediate(Smi::FromInt(scope()->num_parameters())));
       // Arguments to ArgumentsAccessStub:
       //   function, receiver address, parameter count.
       // The stub will rewrite receiver and parameter count if the previous
@@ -130,13 +132,13 @@
       __ mov(ecx, eax);  // Duplicate result.
       Move(arguments->slot(), eax, ebx, edx);
       Slot* dot_arguments_slot =
-          fun->scope()->arguments_shadow()->AsVariable()->slot();
+          scope()->arguments_shadow()->AsVariable()->slot();
       Move(dot_arguments_slot, ecx, ebx, edx);
     }
   }
 
   { Comment cmnt(masm_, "[ Declarations");
-    VisitDeclarations(fun->scope()->declarations());
+    VisitDeclarations(scope()->declarations());
   }
 
   { Comment cmnt(masm_, "[ Stack check");
@@ -156,14 +158,14 @@
 
   { Comment cmnt(masm_, "[ Body");
     ASSERT(loop_depth() == 0);
-    VisitStatements(fun->body());
+    VisitStatements(function()->body());
     ASSERT(loop_depth() == 0);
   }
 
   { Comment cmnt(masm_, "[ return <undefined>;");
     // Emit a 'return undefined' in case control fell off the end of the body.
     __ mov(eax, Factory::undefined_value());
-    EmitReturnSequence(function_->end_position());
+    EmitReturnSequence(function()->end_position());
   }
 }
 
@@ -190,7 +192,7 @@
     // patch with the code required by the debugger.
     __ mov(esp, ebp);
     __ pop(ebp);
-    __ ret((function_->scope()->num_parameters() + 1) * kPointerSize);
+    __ ret((scope()->num_parameters() + 1) * kPointerSize);
 #ifdef ENABLE_DEBUGGER_SUPPORT
     // Check that the size of the code used for returning matches what is
     // expected by the debugger.
@@ -627,7 +629,7 @@
       return Operand(ebp, SlotOffset(slot));
     case Slot::CONTEXT: {
       int context_chain_length =
-          function_->scope()->ContextChainLength(slot->var()->scope());
+          scope()->ContextChainLength(slot->var()->scope());
       __ LoadContext(scratch, context_chain_length);
       return CodeGenerator::ContextOperand(scratch, slot->index());
     }
@@ -686,7 +688,7 @@
         // this specific context.
 
         // The variable in the decl always resides in the current context.
-        ASSERT_EQ(0, function_->scope()->ContextChainLength(var->scope()));
+        ASSERT_EQ(0, scope()->ContextChainLength(var->scope()));
         if (FLAG_debug_code) {
           // Check if we have the correct context pointer.
           __ mov(ebx,
@@ -764,7 +766,7 @@
   // Call the runtime to declare the globals.
   __ push(esi);  // The context is the first argument.
   __ push(Immediate(pairs));
-  __ push(Immediate(Smi::FromInt(is_eval_ ? 1 : 0)));
+  __ push(Immediate(Smi::FromInt(is_eval() ? 1 : 0)));
   __ CallRuntime(Runtime::kDeclareGlobals, 3);
   // Return value is ignored.
 }
@@ -775,7 +777,7 @@
 
   // Build the function boilerplate and instantiate it.
   Handle<JSFunction> boilerplate =
-      Compiler::BuildBoilerplate(expr, script_, this);
+      Compiler::BuildBoilerplate(expr, script(), this);
   if (HasStackOverflow()) return;
 
   ASSERT(boilerplate->IsBoilerplate());
diff --git a/src/ia32/ic-ia32.cc b/src/ia32/ic-ia32.cc
index 44dae3b..d1ae28b 100644
--- a/src/ia32/ic-ia32.cc
+++ b/src/ia32/ic-ia32.cc
@@ -151,23 +151,6 @@
 }
 
 
-// Helper function used to check that a value is either not an object
-// or is loaded if it is an object.
-static void GenerateCheckNonObjectOrLoaded(MacroAssembler* masm, Label* miss,
-                                           Register value, Register scratch) {
-  Label done;
-  // Check if the value is a Smi.
-  __ test(value, Immediate(kSmiTagMask));
-  __ j(zero, &done, not_taken);
-  // Check if the object has been loaded.
-  __ mov(scratch, FieldOperand(value, JSFunction::kMapOffset));
-  __ mov(scratch, FieldOperand(scratch, Map::kBitField2Offset));
-  __ test(scratch, Immediate(1 << Map::kNeedsLoading));
-  __ j(not_zero, miss, not_taken);
-  __ bind(&done);
-}
-
-
 // The offset from the inlined patch site to the start of the
 // inlined load instruction.  It is 7 bytes (test eax, imm) plus
 // 6 bytes (jne slow_label).
@@ -369,7 +352,6 @@
                          edx,
                          eax,
                          DICTIONARY_CHECK_DONE);
-  GenerateCheckNonObjectOrLoaded(masm, &slow, ecx, edx);
   __ mov(eax, Operand(ecx));
   __ IncrementCounter(&Counters::keyed_load_generic_symbol, 1);
   __ ret(0);
@@ -1011,11 +993,6 @@
   __ CmpObjectType(edi, JS_FUNCTION_TYPE, eax);
   __ j(not_equal, miss, not_taken);
 
-  // Check that the function has been loaded.  eax holds function's map.
-  __ mov(eax, FieldOperand(eax, Map::kBitField2Offset));
-  __ test(eax, Immediate(1 << Map::kNeedsLoading));
-  __ j(not_zero, miss, not_taken);
-
   // Patch the receiver on stack with the global proxy if necessary.
   if (is_global_object) {
     __ mov(edx, FieldOperand(edx, GlobalObject::kGlobalReceiverOffset));
@@ -1203,7 +1180,6 @@
   // Search the dictionary placing the result in eax.
   __ bind(&probe);
   GenerateDictionaryLoad(masm, &miss, edx, eax, ebx, ecx, CHECK_DICTIONARY);
-  GenerateCheckNonObjectOrLoaded(masm, &miss, eax, edx);
   __ ret(0);
 
   // Global object access: Check access rights.
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index 19a380b..4dd6a9b 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -338,6 +338,19 @@
 }
 
 
+void MacroAssembler::CheckMap(Register obj,
+                              Handle<Map> map,
+                              Label* fail,
+                              bool is_heap_object) {
+  if (!is_heap_object) {
+    test(obj, Immediate(kSmiTagMask));
+    j(zero, fail);
+  }
+  cmp(FieldOperand(obj, HeapObject::kMapOffset), Immediate(map));
+  j(not_equal, fail);
+}
+
+
 Condition MacroAssembler::IsObjectStringType(Register heap_object,
                                              Register map,
                                              Register instance_type) {
diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h
index cc24560..0ddbd5d 100644
--- a/src/ia32/macro-assembler-ia32.h
+++ b/src/ia32/macro-assembler-ia32.h
@@ -141,6 +141,14 @@
   // Compare instance type for map.
   void CmpInstanceType(Register map, InstanceType type);
 
+  // Check if the map of an object is equal to a specified map and
+  // branch to label if not. Skip the smi check if not required
+  // (object is known to be a heap object)
+  void CheckMap(Register obj,
+                Handle<Map> map,
+                Label* fail,
+                bool is_heap_object);
+
   // Check if the object in register heap_object is a string. Afterwards the
   // register map contains the object map and the register instance_type
   // contains the instance_type. The registers map and instance_type can be the
@@ -430,8 +438,8 @@
   List<Unresolved> unresolved_;
   bool generating_stub_;
   bool allow_stub_calls_;
-  Handle<Object> code_object_;  // This handle will be patched with the
-                                // code object on installation.
+  // This handle will be patched with the code object on installation.
+  Handle<Object> code_object_;
 
   // Helper functions for generating invokes.
   void InvokePrologue(const ParameterCount& expected,
diff --git a/src/ic.cc b/src/ic.cc
index 8fc9ddb..27a1841 100644
--- a/src/ic.cc
+++ b/src/ic.cc
@@ -432,7 +432,7 @@
   }
 
   // Lookup is valid: Update inline cache and stub cache.
-  if (FLAG_use_ic && lookup.IsLoaded()) {
+  if (FLAG_use_ic) {
     UpdateCaches(&lookup, state, object, name);
   }
 
@@ -491,7 +491,6 @@
                           State state,
                           Handle<Object> object,
                           Handle<String> name) {
-  ASSERT(lookup->IsLoaded());
   // Bail out if we didn't find a result.
   if (!lookup->IsValid() || !lookup->IsCacheable()) return;
 
@@ -654,7 +653,6 @@
       FLAG_use_ic &&
       state == PREMONOMORPHIC &&
       lookup.IsValid() &&
-      lookup.IsLoaded() &&
       lookup.IsCacheable() &&
       lookup.holder() == *object &&
       lookup.type() == FIELD &&
@@ -676,7 +674,7 @@
   }
 
   // Update inline cache and stub cache.
-  if (FLAG_use_ic && lookup.IsLoaded()) {
+  if (FLAG_use_ic) {
     UpdateCaches(&lookup, state, object, name);
   }
 
@@ -702,7 +700,6 @@
                           State state,
                           Handle<Object> object,
                           Handle<String> name) {
-  ASSERT(lookup->IsLoaded());
   // Bail out if we didn't find a result.
   if (!lookup->IsValid() || !lookup->IsCacheable()) return;
 
@@ -864,7 +861,7 @@
       }
     }
 
-    if (FLAG_use_ic && lookup.IsLoaded()) {
+    if (FLAG_use_ic) {
       UpdateCaches(&lookup, state, object, name);
     }
 
@@ -917,7 +914,6 @@
 
 void KeyedLoadIC::UpdateCaches(LookupResult* lookup, State state,
                                Handle<Object> object, Handle<String> name) {
-  ASSERT(lookup->IsLoaded());
   // Bail out if we didn't find a result.
   if (!lookup->IsValid() || !lookup->IsCacheable()) return;
 
@@ -998,8 +994,6 @@
   // state.
   if (lookup->IsReadOnly()) return false;
 
-  if (!lookup->IsLoaded()) return false;
-
   return true;
 }
 
@@ -1064,7 +1058,6 @@
                            Handle<JSObject> receiver,
                            Handle<String> name,
                            Handle<Object> value) {
-  ASSERT(lookup->IsLoaded());
   // Skip JSGlobalProxy.
   ASSERT(!receiver->IsJSGlobalProxy());
 
@@ -1172,7 +1165,7 @@
     receiver->LocalLookup(*name, &lookup);
 
     // Update inline cache and stub cache.
-    if (FLAG_use_ic && lookup.IsLoaded()) {
+    if (FLAG_use_ic) {
       UpdateCaches(&lookup, state, receiver, name, value);
     }
 
@@ -1206,8 +1199,6 @@
                                 Handle<JSObject> receiver,
                                 Handle<String> name,
                                 Handle<Object> value) {
-  ASSERT(lookup->IsLoaded());
-
   // Skip JSGlobalProxy.
   if (receiver->IsJSGlobalProxy()) return;
 
diff --git a/src/json.js b/src/json.js
new file mode 100644
index 0000000..3e42d36
--- /dev/null
+++ b/src/json.js
@@ -0,0 +1,268 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+var $JSON = global.JSON;
+
+function ParseJSONUnfiltered(text) {
+  var s = $String(text);
+  var f = %CompileString(text, true);
+  return f();
+}
+
+function Revive(holder, name, reviver) {
+  var val = holder[name];
+  if (IS_OBJECT(val)) {
+    if (IS_ARRAY(val)) {
+      var length = val.length;
+      for (var i = 0; i < length; i++) {
+        var newElement = Revive(val, $String(i), reviver);
+        val[i] = newElement;
+      }
+    } else {
+      for (var p in val) {
+        if (ObjectHasOwnProperty.call(val, p)) {
+          var newElement = Revive(val, p, reviver);
+          if (IS_UNDEFINED(newElement)) {
+            delete val[p];
+          } else {
+            val[p] = newElement;
+          }
+        }
+      }
+    }
+  }
+  return reviver.call(holder, name, val);
+}
+
+function JSONParse(text, reviver) {
+  var unfiltered = ParseJSONUnfiltered(text);
+  if (IS_FUNCTION(reviver)) {
+    return Revive({'': unfiltered}, '', reviver);
+  } else {
+    return unfiltered;
+  }
+}
+
+var characterQuoteCache = {
+  '\"': '\\"',
+  '\\': '\\\\',
+  '/': '\\/',
+  '\b': '\\b',
+  '\f': '\\f',
+  '\n': '\\n',
+  '\r': '\\r',
+  '\t': '\\t',
+  '\x0B': '\\u000b'
+};
+
+function QuoteSingleJSONCharacter(c) {
+  if (c in characterQuoteCache) {
+    return characterQuoteCache[c];
+  }
+  var charCode = c.charCodeAt(0);
+  var result;
+  if (charCode < 16) result = '\\u000';
+  else if (charCode < 256) result = '\\u00';
+  else if (charCode < 4096) result = '\\u0';
+  else result = '\\u';
+  result += charCode.toString(16);
+  characterQuoteCache[c] = result;
+  return result;
+}
+
+function QuoteJSONString(str) {
+  var quotable = /[\\\"\x00-\x1f\x80-\uffff]/g;
+  return '"' + str.replace(quotable, QuoteSingleJSONCharacter) + '"';
+}
+
+function StackContains(stack, val) {
+  var length = stack.length;
+  for (var i = 0; i < length; i++) {
+    if (stack[i] === val) {
+      return true;
+    }
+  }
+  return false;
+}
+
+function SerializeArray(value, replacer, stack, indent, gap) {
+  if (StackContains(stack, value)) {
+    throw MakeTypeError('circular_structure', []);
+  }
+  stack.push(value);
+  var stepback = indent;
+  indent += gap;
+  var partial = [];
+  var len = value.length;
+  for (var i = 0; i < len; i++) {
+    var strP = JSONSerialize($String(i), value, replacer, stack,
+                             indent, gap);
+    if (IS_UNDEFINED(strP)) {
+      strP = "null";
+    }
+    partial.push(strP);
+  }
+  var final;
+  if (gap == "") {
+    final = "[" + partial.join(",") + "]";
+  } else if (partial.length > 0) {
+    var separator = ",\n" + indent;
+    final = "[\n" + indent + partial.join(separator) + "\n" +
+        stepback + "]";
+  } else {
+    final = "[]";
+  }
+  stack.pop();
+  return final;
+}
+
+function SerializeObject(value, replacer, stack, indent, gap) {
+  if (StackContains(stack, value)) {
+    throw MakeTypeError('circular_structure', []);
+  }
+  stack.push(value);
+  var stepback = indent;
+  indent += gap;
+  var partial = [];
+  if (IS_ARRAY(replacer)) {
+    var length = replacer.length;
+    for (var i = 0; i < length; i++) {
+      if (ObjectHasOwnProperty.call(replacer, i)) {
+        var p = replacer[i];
+        var strP = JSONSerialize(p, value, replacer, stack, indent, gap);
+        if (!IS_UNDEFINED(strP)) {
+          var member = QuoteJSONString(p) + ":";
+          if (gap != "") member += " ";
+          member += strP;
+          partial.push(member);
+        }
+      }
+    }
+  } else {
+    for (var p in value) {
+      if (ObjectHasOwnProperty.call(value, p)) {
+        var strP = JSONSerialize(p, value, replacer, stack, indent, gap);
+        if (!IS_UNDEFINED(strP)) {
+          var member = QuoteJSONString(p) + ":";
+          if (gap != "") member += " ";
+          member += strP;
+          partial.push(member);
+        }
+      }
+    }
+  }
+  var final;
+  if (gap == "") {
+    final = "{" + partial.join(",") + "}";
+  } else if (partial.length > 0) {
+    var separator = ",\n" + indent;
+    final = "{\n" + indent + partial.join(separator) + "\n" +
+        stepback + "}";
+  } else {
+    final = "{}";
+  }
+  stack.pop();
+  return final;
+}
+
+function JSONSerialize(key, holder, replacer, stack, indent, gap) {
+  var value = holder[key];
+  if (IS_OBJECT(value) && value) {
+    var toJSON = value.toJSON;
+    if (IS_FUNCTION(toJSON)) {
+      value = toJSON.call(value, key);
+    }
+  }
+  if (IS_FUNCTION(replacer)) {
+    value = replacer.call(holder, key, value);
+  }
+  // Unwrap value if necessary
+  if (IS_OBJECT(value)) {
+    if (IS_NUMBER_WRAPPER(value)) {
+      value = $Number(value);
+    } else if (IS_STRING_WRAPPER(value)) {
+      value = $String(value);
+    } else if (IS_BOOLEAN_WRAPPER(value)) {
+      value = $Boolean(value);
+    }
+  }
+  switch (typeof value) {
+    case "string":
+      return QuoteJSONString(value);
+    case "object":
+      if (!value) {
+        return "null";
+      } else if (IS_ARRAY(value)) {
+        return SerializeArray(value, replacer, stack, indent, gap);
+      } else {
+        return SerializeObject(value, replacer, stack, indent, gap);
+      }
+    case "number":
+      return $isFinite(value) ? $String(value) : "null";
+    case "boolean":
+      return value ? "true" : "false";
+  }
+}
+
+function JSONStringify(value, replacer, space) {
+  var stack = [];
+  var indent = "";
+  if (IS_OBJECT(space)) {
+    // Unwrap 'space' if it is wrapped
+    if (IS_NUMBER_WRAPPER(space)) {
+      space = $Number(space);
+    } else if (IS_STRING_WRAPPER(space)) {
+      space = $String(space);
+    }
+  }
+  var gap;
+  if (IS_NUMBER(space)) {
+    space = $Math.min(space, 10);
+    gap = "";
+    for (var i = 0; i < space; i++) {
+      gap += " ";
+    }
+  } else if (IS_STRING(space)) {
+    if (space.length > 10) {
+      gap = space.substring(0, 10);
+    } else {
+      gap = space;
+    }
+  } else {
+    gap = "";
+  }
+  return JSONSerialize('', {'': value}, replacer, stack, indent, gap);
+}
+
+function SetupJSON() {
+  InstallFunctions($JSON, DONT_ENUM, $Array(
+    "parse", JSONParse,
+    "stringify", JSONStringify
+  ));
+}
+
+SetupJSON();
diff --git a/src/jsregexp.cc b/src/jsregexp.cc
index 505cf03..4094365 100644
--- a/src/jsregexp.cc
+++ b/src/jsregexp.cc
@@ -66,11 +66,6 @@
                                                Handle<String> pattern,
                                                Handle<String> flags,
                                                bool* has_pending_exception) {
-  // Ensure that the constructor function has been loaded.
-  if (!constructor->IsLoaded()) {
-    LoadLazy(constructor, has_pending_exception);
-    if (*has_pending_exception) return Handle<Object>();
-  }
   // Call the construct code with 2 arguments.
   Object** argv[2] = { Handle<Object>::cast(pattern).location(),
                        Handle<Object>::cast(flags).location() };
diff --git a/src/log.cc b/src/log.cc
index 5de7429..cf2fc97 100644
--- a/src/log.cc
+++ b/src/log.cc
@@ -573,6 +573,20 @@
 }
 
 
+void Logger::LogProfileMarker(Vector<const char> marker) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  if (!Log::IsEnabled() || !FLAG_prof) return;
+  LogMessageBuilder msg;
+  for (int i = 0; i < marker.length(); i++) {
+    char c = marker[i];
+    msg.Append(c);
+  }
+  msg.Append('\n');
+  msg.WriteToLogFile();
+#endif
+}
+
+
 void Logger::ApiIndexedSecurityCheck(uint32_t index) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
   if (!Log::IsEnabled() || !FLAG_log_api) return;
@@ -1261,7 +1275,9 @@
       case Code::FUNCTION:
         return;  // We log this later using LogCompiledFunctions.
       case Code::STUB:
-        description = CodeStub::MajorName(code_object->major_key());
+        description = CodeStub::MajorName(code_object->major_key(), true);
+        if (description == NULL)
+          description = "A stub from the snapshot";
         tag = Logger::STUB_TAG;
         break;
       case Code::BUILTIN:
@@ -1294,6 +1310,15 @@
 }
 
 
+void Logger::LogCodeObjects() {
+  AssertNoAllocation no_alloc;
+  HeapIterator iterator;
+  for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
+    if (obj->IsCode()) LogCodeObject(obj);
+  }
+}
+
+
 void Logger::LogCompiledFunctions() {
   HandleScope scope;
   const int compiled_funcs_count = EnumerateCompiledFunctions(NULL);
diff --git a/src/log.h b/src/log.h
index 1f6e60e..ae1e4be 100644
--- a/src/log.h
+++ b/src/log.h
@@ -265,6 +265,9 @@
   // Log an event reported from generated code
   static void LogRuntime(Vector<const char> format, JSArray* args);
 
+  // Log a profiling marker.
+  static void LogProfileMarker(Vector<const char> marker);
+
 #ifdef ENABLE_LOGGING_AND_PROFILING
   static StateTag state() {
     return current_state_ ? current_state_->state() : OTHER;
@@ -292,7 +295,7 @@
   // Logs all accessor callbacks found in the heap.
   static void LogAccessorCallbacks();
   // Used for logging stubs found in the snapshot.
-  static void LogCodeObject(Object* code_object);
+  static void LogCodeObjects();
 
  private:
 
@@ -325,6 +328,9 @@
   // Emits the source code of a regexp. Used by regexp events.
   static void LogRegExpSource(Handle<JSRegExp> regexp);
 
+  // Used for logging stubs found in the snapshot.
+  static void LogCodeObject(Object* code_object);
+
   // Emits a profiler tick event. Used by the profiler thread.
   static void TickEvent(TickSample* sample, bool overflow);
 
diff --git a/src/macro-assembler.h b/src/macro-assembler.h
index 0fe4328..e33148c 100644
--- a/src/macro-assembler.h
+++ b/src/macro-assembler.h
@@ -86,6 +86,13 @@
 #endif
 #include "code.h"  // must be after assembler_*.h
 #include "arm/macro-assembler-arm.h"
+#elif V8_TARGET_ARCH_MIPS
+#include "mips/constants-mips.h"
+#include "assembler.h"
+#include "mips/assembler-mips.h"
+#include "mips/assembler-mips-inl.h"
+#include "code.h"  // must be after assembler_*.h
+#include "mips/macro-assembler-mips.h"
 #else
 #error Unsupported target architecture.
 #endif
diff --git a/src/macros.py b/src/macros.py
index c160b49..537113c 100644
--- a/src/macros.py
+++ b/src/macros.py
@@ -104,10 +104,6 @@
 # Macros implemented in Python.
 python macro CHAR_CODE(str) = ord(str[1]);
 
-# Accessors for original global properties that ensure they have been loaded.
-const ORIGINAL_REGEXP = (global.RegExp, $RegExp);
-const ORIGINAL_DATE   = (global.Date, $Date);
-
 # Constants used on an array to implement the properties of the RegExp object.
 const REGEXP_NUMBER_OF_CAPTURES = 0;
 const REGEXP_FIRST_CAPTURE = 3;
diff --git a/src/messages.js b/src/messages.js
index df008c9..7c939ca 100644
--- a/src/messages.js
+++ b/src/messages.js
@@ -27,6 +27,16 @@
 
 
 // -------------------------------------------------------------------
+//
+// Matches Script::Type from objects.h
+var TYPE_NATIVE = 0;
+var TYPE_EXTENSION = 1;
+var TYPE_NORMAL = 2;
+
+// Matches Script::CompilationType from objects.h
+var COMPILATION_TYPE_HOST = 0;
+var COMPILATION_TYPE_EVAL = 1;
+var COMPILATION_TYPE_JSON = 2;
 
 // Lazily initialized.
 var kVowelSounds = 0;
@@ -162,6 +172,8 @@
       value_and_accessor:           "Invalid property.  A property cannot both have accessors and be writable or have a value: %0",
       proto_object_or_null:         "Object prototype may only be an Object or null",
       property_desc_object:         "Property description must be an object: %0",
+      redefine_disallowed:          "Cannot redefine property: %0",
+      define_disallowed:            "Cannot define property, object is not extensible: %0",
       // RangeError
       invalid_array_length:         "Invalid array length",
       stack_overflow:               "Maximum call stack size exceeded",
@@ -630,7 +642,7 @@
 
 CallSite.prototype.isEval = function () {
   var script = %FunctionGetScript(this.fun);
-  return script && script.compilation_type == 1;
+  return script && script.compilation_type == COMPILATION_TYPE_EVAL;
 };
 
 CallSite.prototype.getEvalOrigin = function () {
@@ -652,7 +664,7 @@
   }
   // Maybe this is an evaluation?
   var script = %FunctionGetScript(this.fun);
-  if (script && script.compilation_type == 1)
+  if (script && script.compilation_type == COMPILATION_TYPE_EVAL)
     return "eval";
   return null;
 };
@@ -708,7 +720,7 @@
 
 CallSite.prototype.isNative = function () {
   var script = %FunctionGetScript(this.fun);
-  return script ? (script.type == 0) : false;
+  return script ? (script.type == TYPE_NATIVE) : false;
 };
 
 CallSite.prototype.getPosition = function () {
@@ -732,7 +744,7 @@
   
   var eval_from_script = script.eval_from_script;
   if (eval_from_script) {
-    if (eval_from_script.compilation_type == 1) {
+    if (eval_from_script.compilation_type == COMPILATION_TYPE_EVAL) {
       // eval script originated from another eval.
       eval_origin += " (eval at " + FormatEvalOrigin(eval_from_script) + ")";
     } else {
diff --git a/src/mips/assembler-mips-inl.h b/src/mips/assembler-mips-inl.h
new file mode 100644
index 0000000..2e63461
--- /dev/null
+++ b/src/mips/assembler-mips-inl.h
@@ -0,0 +1,215 @@
+// Copyright (c) 1994-2006 Sun Microsystems Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// - Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// - Redistribution in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// - Neither the name of Sun Microsystems or the names of contributors may
+// be used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The original source code covered by the above license above has been
+// modified significantly by Google Inc.
+// Copyright 2010 the V8 project authors. All rights reserved.
+
+
+#ifndef V8_MIPS_ASSEMBLER_MIPS_INL_H_
+#define V8_MIPS_ASSEMBLER_MIPS_INL_H_
+
+#include "mips/assembler-mips.h"
+#include "cpu.h"
+
+
+namespace v8 {
+namespace internal {
+
+// -----------------------------------------------------------------------------
+// Condition
+
+Condition NegateCondition(Condition cc) {
+  ASSERT(cc != cc_always);
+  return static_cast<Condition>(cc ^ 1);
+}
+
+
+// -----------------------------------------------------------------------------
+// Operand and MemOperand
+
+Operand::Operand(int32_t immediate, RelocInfo::Mode rmode)  {
+  rm_ = no_reg;
+  imm32_ = immediate;
+  rmode_ = rmode;
+}
+
+Operand::Operand(const ExternalReference& f)  {
+  rm_ = no_reg;
+  imm32_ = reinterpret_cast<int32_t>(f.address());
+  rmode_ = RelocInfo::EXTERNAL_REFERENCE;
+}
+
+Operand::Operand(const char* s) {
+  rm_ = no_reg;
+  imm32_ = reinterpret_cast<int32_t>(s);
+  rmode_ = RelocInfo::EMBEDDED_STRING;
+}
+
+Operand::Operand(Smi* value) {
+  rm_ = no_reg;
+  imm32_ =  reinterpret_cast<intptr_t>(value);
+  rmode_ = RelocInfo::NONE;
+}
+
+Operand::Operand(Register rm) {
+  rm_ = rm;
+}
+
+bool Operand::is_reg() const {
+  return rm_.is_valid();
+}
+
+
+
+// -----------------------------------------------------------------------------
+// RelocInfo
+
+void RelocInfo::apply(intptr_t delta) {
+  // On MIPS we do not use pc relative addressing, so we don't need to patch the
+  // code here.
+}
+
+
+Address RelocInfo::target_address() {
+  ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
+  return Assembler::target_address_at(pc_);
+}
+
+
+Address RelocInfo::target_address_address() {
+  ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
+  return reinterpret_cast<Address>(pc_);
+}
+
+
+void RelocInfo::set_target_address(Address target) {
+  ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
+  Assembler::set_target_address_at(pc_, target);
+}
+
+
+Object* RelocInfo::target_object() {
+  ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+  return reinterpret_cast<Object*>(Assembler::target_address_at(pc_));
+}
+
+
+Handle<Object> RelocInfo::target_object_handle(Assembler *origin) {
+  ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+  return Handle<Object>(reinterpret_cast<Object**>(
+      Assembler::target_address_at(pc_)));
+}
+
+
+Object** RelocInfo::target_object_address() {
+  ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+  return reinterpret_cast<Object**>(pc_);
+}
+
+
+void RelocInfo::set_target_object(Object* target) {
+  ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+  Assembler::set_target_address_at(pc_, reinterpret_cast<Address>(target));
+}
+
+
+Address* RelocInfo::target_reference_address() {
+  ASSERT(rmode_ == EXTERNAL_REFERENCE);
+  return reinterpret_cast<Address*>(pc_);
+}
+
+
+Address RelocInfo::call_address() {
+  ASSERT(IsPatchedReturnSequence());
+  // The 2 instructions offset assumes patched return sequence.
+  ASSERT(IsJSReturn(rmode()));
+  return Memory::Address_at(pc_ + 2 * Assembler::kInstrSize);
+}
+
+
+void RelocInfo::set_call_address(Address target) {
+  ASSERT(IsPatchedReturnSequence());
+  // The 2 instructions offset assumes patched return sequence.
+  ASSERT(IsJSReturn(rmode()));
+  Memory::Address_at(pc_ + 2 * Assembler::kInstrSize) = target;
+}
+
+
+Object* RelocInfo::call_object() {
+  return *call_object_address();
+}
+
+
+Object** RelocInfo::call_object_address() {
+  ASSERT(IsPatchedReturnSequence());
+  // The 2 instructions offset assumes patched return sequence.
+  ASSERT(IsJSReturn(rmode()));
+  return reinterpret_cast<Object**>(pc_ + 2 * Assembler::kInstrSize);
+}
+
+
+void RelocInfo::set_call_object(Object* target) {
+  *call_object_address() = target;
+}
+
+
+bool RelocInfo::IsPatchedReturnSequence() {
+#ifdef DEBUG
+  PrintF("%s - %d - %s : Checking for jal(r)",
+      __FILE__, __LINE__, __func__);
+#endif
+  return ((Assembler::instr_at(pc_) & kOpcodeMask) == SPECIAL) &&
+         (((Assembler::instr_at(pc_) & kFunctionFieldMask) == JAL) ||
+          ((Assembler::instr_at(pc_) & kFunctionFieldMask) == JALR));
+}
+
+
+// -----------------------------------------------------------------------------
+// Assembler
+
+
+void Assembler::CheckBuffer() {
+  if (buffer_space() <= kGap) {
+    GrowBuffer();
+  }
+}
+
+
+void Assembler::emit(Instr x) {
+  CheckBuffer();
+  *reinterpret_cast<Instr*>(pc_) = x;
+  pc_ += kInstrSize;
+}
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_MIPS_ASSEMBLER_MIPS_INL_H_
diff --git a/src/mips/assembler-mips.cc b/src/mips/assembler-mips.cc
new file mode 100644
index 0000000..4a91624
--- /dev/null
+++ b/src/mips/assembler-mips.cc
@@ -0,0 +1,1208 @@
+// Copyright (c) 1994-2006 Sun Microsystems Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// - Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// - Redistribution in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// - Neither the name of Sun Microsystems or the names of contributors may
+// be used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The original source code covered by the above license above has been
+// modified significantly by Google Inc.
+// Copyright 2010 the V8 project authors. All rights reserved.
+
+
+#include "v8.h"
+#include "mips/assembler-mips-inl.h"
+#include "serialize.h"
+
+
+namespace v8 {
+namespace internal {
+
+
+
+const Register no_reg = { -1 };
+
+const Register zero_reg = { 0 };
+const Register at = { 1 };
+const Register v0 = { 2 };
+const Register v1 = { 3 };
+const Register a0 = { 4 };
+const Register a1 = { 5 };
+const Register a2 = { 6 };
+const Register a3 = { 7 };
+const Register t0 = { 8 };
+const Register t1 = { 9 };
+const Register t2 = { 10 };
+const Register t3 = { 11 };
+const Register t4 = { 12 };
+const Register t5 = { 13 };
+const Register t6 = { 14 };
+const Register t7 = { 15 };
+const Register s0 = { 16 };
+const Register s1 = { 17 };
+const Register s2 = { 18 };
+const Register s3 = { 19 };
+const Register s4 = { 20 };
+const Register s5 = { 21 };
+const Register s6 = { 22 };
+const Register s7 = { 23 };
+const Register t8 = { 24 };
+const Register t9 = { 25 };
+const Register k0 = { 26 };
+const Register k1 = { 27 };
+const Register gp = { 28 };
+const Register sp = { 29 };
+const Register s8_fp = { 30 };
+const Register ra = { 31 };
+
+
+const FPURegister no_creg = { -1 };
+
+const FPURegister f0 = { 0 };
+const FPURegister f1 = { 1 };
+const FPURegister f2 = { 2 };
+const FPURegister f3 = { 3 };
+const FPURegister f4 = { 4 };
+const FPURegister f5 = { 5 };
+const FPURegister f6 = { 6 };
+const FPURegister f7 = { 7 };
+const FPURegister f8 = { 8 };
+const FPURegister f9 = { 9 };
+const FPURegister f10 = { 10 };
+const FPURegister f11 = { 11 };
+const FPURegister f12 = { 12 };
+const FPURegister f13 = { 13 };
+const FPURegister f14 = { 14 };
+const FPURegister f15 = { 15 };
+const FPURegister f16 = { 16 };
+const FPURegister f17 = { 17 };
+const FPURegister f18 = { 18 };
+const FPURegister f19 = { 19 };
+const FPURegister f20 = { 20 };
+const FPURegister f21 = { 21 };
+const FPURegister f22 = { 22 };
+const FPURegister f23 = { 23 };
+const FPURegister f24 = { 24 };
+const FPURegister f25 = { 25 };
+const FPURegister f26 = { 26 };
+const FPURegister f27 = { 27 };
+const FPURegister f28 = { 28 };
+const FPURegister f29 = { 29 };
+const FPURegister f30 = { 30 };
+const FPURegister f31 = { 31 };
+
+int ToNumber(Register reg) {
+  ASSERT(reg.is_valid());
+  const int kNumbers[] = {
+    0,    // zero_reg
+    1,    // at
+    2,    // v0
+    3,    // v1
+    4,    // a0
+    5,    // a1
+    6,    // a2
+    7,    // a3
+    8,    // t0
+    9,    // t1
+    10,   // t2
+    11,   // t3
+    12,   // t4
+    13,   // t5
+    14,   // t6
+    15,   // t7
+    16,   // s0
+    17,   // s1
+    18,   // s2
+    19,   // s3
+    20,   // s4
+    21,   // s5
+    22,   // s6
+    23,   // s7
+    24,   // t8
+    25,   // t9
+    26,   // k0
+    27,   // k1
+    28,   // gp
+    29,   // sp
+    30,   // s8_fp
+    31,   // ra
+  };
+  return kNumbers[reg.code()];
+}
+
+Register ToRegister(int num) {
+  ASSERT(num >= 0 && num < kNumRegisters);
+  const Register kRegisters[] = {
+    zero_reg,
+    at,
+    v0, v1,
+    a0, a1, a2, a3,
+    t0, t1, t2, t3, t4, t5, t6, t7,
+    s0, s1, s2, s3, s4, s5, s6, s7,
+    t8, t9,
+    k0, k1,
+    gp,
+    sp,
+    s8_fp,
+    ra
+  };
+  return kRegisters[num];
+}
+
+
+// -----------------------------------------------------------------------------
+// Implementation of RelocInfo.
+
+const int RelocInfo::kApplyMask = 0;
+
+// Patch the code at the current address with the supplied instructions.
+void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
+  Instr* pc = reinterpret_cast<Instr*>(pc_);
+  Instr* instr = reinterpret_cast<Instr*>(instructions);
+  for (int i = 0; i < instruction_count; i++) {
+    *(pc + i) = *(instr + i);
+  }
+
+  // Indicate that code has changed.
+  CPU::FlushICache(pc_, instruction_count * Assembler::kInstrSize);
+}
+
+
+// Patch the code at the current PC with a call to the target address.
+// Additional guard instructions can be added if required.
+void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
+  // Patch the code at the current address with a call to the target.
+  UNIMPLEMENTED_MIPS();
+}
+
+
+// -----------------------------------------------------------------------------
+// Implementation of Operand and MemOperand.
+// See assembler-mips-inl.h for inlined constructors.
+
+Operand::Operand(Handle<Object> handle) {
+  rm_ = no_reg;
+  // Verify all Objects referred by code are NOT in new space.
+  Object* obj = *handle;
+  ASSERT(!Heap::InNewSpace(obj));
+  if (obj->IsHeapObject()) {
+    imm32_ = reinterpret_cast<intptr_t>(handle.location());
+    rmode_ = RelocInfo::EMBEDDED_OBJECT;
+  } else {
+    // No relocation needed.
+    imm32_ = reinterpret_cast<intptr_t>(obj);
+    rmode_ = RelocInfo::NONE;
+  }
+}
+
+MemOperand::MemOperand(Register rm, int16_t offset) : Operand(rm) {
+  offset_ = offset;
+}
+
+
+// -----------------------------------------------------------------------------
+// Implementation of Assembler.
+
+static const int kMinimalBufferSize = 4*KB;
+static byte* spare_buffer_ = NULL;
+
+Assembler::Assembler(void* buffer, int buffer_size) {
+  if (buffer == NULL) {
+    // Do our own buffer management.
+    if (buffer_size <= kMinimalBufferSize) {
+      buffer_size = kMinimalBufferSize;
+
+      if (spare_buffer_ != NULL) {
+        buffer = spare_buffer_;
+        spare_buffer_ = NULL;
+      }
+    }
+    if (buffer == NULL) {
+      buffer_ = NewArray<byte>(buffer_size);
+    } else {
+      buffer_ = static_cast<byte*>(buffer);
+    }
+    buffer_size_ = buffer_size;
+    own_buffer_ = true;
+
+  } else {
+    // Use externally provided buffer instead.
+    ASSERT(buffer_size > 0);
+    buffer_ = static_cast<byte*>(buffer);
+    buffer_size_ = buffer_size;
+    own_buffer_ = false;
+  }
+
+  // Setup buffer pointers.
+  ASSERT(buffer_ != NULL);
+  pc_ = buffer_;
+  reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
+  current_statement_position_ = RelocInfo::kNoPosition;
+  current_position_ = RelocInfo::kNoPosition;
+  written_statement_position_ = current_statement_position_;
+  written_position_ = current_position_;
+}
+
+
+Assembler::~Assembler() {
+  if (own_buffer_) {
+    if (spare_buffer_ == NULL && buffer_size_ == kMinimalBufferSize) {
+      spare_buffer_ = buffer_;
+    } else {
+      DeleteArray(buffer_);
+    }
+  }
+}
+
+
+void Assembler::GetCode(CodeDesc* desc) {
+  ASSERT(pc_ <= reloc_info_writer.pos());  // no overlap
+  // Setup code descriptor.
+  desc->buffer = buffer_;
+  desc->buffer_size = buffer_size_;
+  desc->instr_size = pc_offset();
+  desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
+}
+
+
+// Labels refer to positions in the (to be) generated code.
+// There are bound, linked, and unused labels.
+//
+// Bound labels refer to known positions in the already
+// generated code. pos() is the position the label refers to.
+//
+// Linked labels refer to unknown positions in the code
+// to be generated; pos() is the position of the last
+// instruction using the label.
+
+
+// The link chain is terminated by a negative code position (must be aligned).
+const int kEndOfChain = -4;
+
+bool Assembler::is_branch(Instr instr) {
+  uint32_t opcode   = ((instr & kOpcodeMask));
+  uint32_t rt_field = ((instr & kRtFieldMask));
+  uint32_t rs_field = ((instr & kRsFieldMask));
+  // Checks if the instruction is a branch.
+  return opcode == BEQ ||
+      opcode == BNE ||
+      opcode == BLEZ ||
+      opcode == BGTZ ||
+      opcode == BEQL ||
+      opcode == BNEL ||
+      opcode == BLEZL ||
+      opcode == BGTZL||
+      (opcode == REGIMM && (rt_field == BLTZ || rt_field == BGEZ ||
+                            rt_field == BLTZAL || rt_field == BGEZAL)) ||
+      (opcode == COP1 && rs_field == BC1);  // Coprocessor branch.
+}
+
+
+int Assembler::target_at(int32_t pos) {
+  Instr instr = instr_at(pos);
+  if ((instr & ~kImm16Mask) == 0) {
+    // Emitted label constant, not part of a branch.
+    return instr - (Code::kHeaderSize - kHeapObjectTag);
+  }
+  // Check we have a branch instruction.
+  ASSERT(is_branch(instr));
+  // Do NOT change this to <<2. We rely on arithmetic shifts here, assuming
+  // the compiler uses arithmectic shifts for signed integers.
+  int32_t imm18 = ((instr &
+                    static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
+
+  return pos + kBranchPCOffset + imm18;
+}
+
+
+void Assembler::target_at_put(int32_t pos, int32_t target_pos) {
+  Instr instr = instr_at(pos);
+  if ((instr & ~kImm16Mask) == 0) {
+    ASSERT(target_pos == kEndOfChain || target_pos >= 0);
+    // Emitted label constant, not part of a branch.
+    // Make label relative to Code* of generated Code object.
+    instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag));
+    return;
+  }
+
+  ASSERT(is_branch(instr));
+  int32_t imm18 = target_pos - (pos + kBranchPCOffset);
+  ASSERT((imm18 & 3) == 0);
+
+  instr &= ~kImm16Mask;
+  int32_t imm16 = imm18 >> 2;
+  ASSERT(is_int16(imm16));
+
+  instr_at_put(pos, instr | (imm16 & kImm16Mask));
+}
+
+
+void Assembler::print(Label* L) {
+  if (L->is_unused()) {
+    PrintF("unused label\n");
+  } else if (L->is_bound()) {
+    PrintF("bound label to %d\n", L->pos());
+  } else if (L->is_linked()) {
+    Label l = *L;
+    PrintF("unbound label");
+    while (l.is_linked()) {
+      PrintF("@ %d ", l.pos());
+      Instr instr = instr_at(l.pos());
+      if ((instr & ~kImm16Mask) == 0) {
+        PrintF("value\n");
+      } else {
+        PrintF("%d\n", instr);
+      }
+      next(&l);
+    }
+  } else {
+    PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
+  }
+}
+
+
+void Assembler::bind_to(Label* L, int pos) {
+  ASSERT(0 <= pos && pos <= pc_offset());  // must have a valid binding position
+  while (L->is_linked()) {
+    int32_t fixup_pos = L->pos();
+    next(L);  // call next before overwriting link with target at fixup_pos
+    target_at_put(fixup_pos, pos);
+  }
+  L->bind_to(pos);
+
+  // Keep track of the last bound label so we don't eliminate any instructions
+  // before a bound label.
+  if (pos > last_bound_pos_)
+    last_bound_pos_ = pos;
+}
+
+
+void Assembler::link_to(Label* L, Label* appendix) {
+  if (appendix->is_linked()) {
+    if (L->is_linked()) {
+      // Append appendix to L's list.
+      int fixup_pos;
+      int link = L->pos();
+      do {
+        fixup_pos = link;
+        link = target_at(fixup_pos);
+      } while (link > 0);
+      ASSERT(link == kEndOfChain);
+      target_at_put(fixup_pos, appendix->pos());
+    } else {
+      // L is empty, simply use appendix
+      *L = *appendix;
+    }
+  }
+  appendix->Unuse();  // appendix should not be used anymore
+}
+
+
+void Assembler::bind(Label* L) {
+  ASSERT(!L->is_bound());  // label can only be bound once
+  bind_to(L, pc_offset());
+}
+
+
+void Assembler::next(Label* L) {
+  ASSERT(L->is_linked());
+  int link = target_at(L->pos());
+  if (link > 0) {
+    L->link_to(link);
+  } else {
+    ASSERT(link == kEndOfChain);
+    L->Unuse();
+  }
+}
+
+
+// We have to use a temporary register for things that can be relocated even
+// if they can be encoded in the MIPS's 16 bits of immediate-offset instruction
+// space.  There is no guarantee that the relocated location can be similarly
+// encoded.
+bool Assembler::MustUseAt(RelocInfo::Mode rmode) {
+  if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
+    return Serializer::enabled();
+  } else if (rmode == RelocInfo::NONE) {
+    return false;
+  }
+  return true;
+}
+
+
+void Assembler::GenInstrRegister(Opcode opcode,
+                                 Register rs,
+                                 Register rt,
+                                 Register rd,
+                                 uint16_t sa,
+                                 SecondaryField func) {
+  ASSERT(rd.is_valid() && rs.is_valid() && rt.is_valid() && is_uint5(sa));
+  Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
+      | (rd.code() << kRdShift) | (sa << kSaShift) | func;
+  emit(instr);
+}
+
+
+void Assembler::GenInstrRegister(Opcode opcode,
+                                 SecondaryField fmt,
+                                 FPURegister ft,
+                                 FPURegister fs,
+                                 FPURegister fd,
+                                 SecondaryField func) {
+  ASSERT(fd.is_valid() && fs.is_valid() && ft.is_valid());
+  Instr instr = opcode | fmt | (ft.code() << 16) | (fs.code() << kFsShift)
+      | (fd.code() << 6) | func;
+  emit(instr);
+}
+
+
+void Assembler::GenInstrRegister(Opcode opcode,
+                                 SecondaryField fmt,
+                                 Register rt,
+                                 FPURegister fs,
+                                 FPURegister fd,
+                                 SecondaryField func) {
+  ASSERT(fd.is_valid() && fs.is_valid() && rt.is_valid());
+  Instr instr = opcode | fmt | (rt.code() << kRtShift)
+      | (fs.code() << kFsShift) | (fd.code() << 6) | func;
+  emit(instr);
+}
+
+
+// Instructions with immediate value.
+// Registers are in the order of the instruction encoding, from left to right.
+void Assembler::GenInstrImmediate(Opcode opcode,
+                                  Register rs,
+                                  Register rt,
+                                  int32_t j) {
+  ASSERT(rs.is_valid() && rt.is_valid() && (is_int16(j) || is_uint16(j)));
+  Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
+      | (j & kImm16Mask);
+  emit(instr);
+}
+
+
+void Assembler::GenInstrImmediate(Opcode opcode,
+                                  Register rs,
+                                  SecondaryField SF,
+                                  int32_t j) {
+  ASSERT(rs.is_valid() && (is_int16(j) || is_uint16(j)));
+  Instr instr = opcode | (rs.code() << kRsShift) | SF | (j & kImm16Mask);
+  emit(instr);
+}
+
+
+void Assembler::GenInstrImmediate(Opcode opcode,
+                                  Register rs,
+                                  FPURegister ft,
+                                  int32_t j) {
+  ASSERT(rs.is_valid() && ft.is_valid() && (is_int16(j) || is_uint16(j)));
+  Instr instr = opcode | (rs.code() << kRsShift) | (ft.code() << kFtShift)
+      | (j & kImm16Mask);
+  emit(instr);
+}
+
+
+// Registers are in the order of the instruction encoding, from left to right.
+void Assembler::GenInstrJump(Opcode opcode,
+                              uint32_t address) {
+  ASSERT(is_uint26(address));
+  Instr instr = opcode | address;
+  emit(instr);
+}
+
+
+int32_t Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
+  int32_t target_pos;
+  if (L->is_bound()) {
+    target_pos = L->pos();
+  } else {
+    if (L->is_linked()) {
+      target_pos = L->pos();  // L's link
+    } else {
+      target_pos = kEndOfChain;
+    }
+    L->link_to(pc_offset());
+  }
+
+  int32_t offset = target_pos - (pc_offset() + kBranchPCOffset);
+  return offset;
+}
+
+
+void Assembler::label_at_put(Label* L, int at_offset) {
+  int target_pos;
+  if (L->is_bound()) {
+    target_pos = L->pos();
+  } else {
+    if (L->is_linked()) {
+      target_pos = L->pos();  // L's link
+    } else {
+      target_pos = kEndOfChain;
+    }
+    L->link_to(at_offset);
+    instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
+  }
+}
+
+
+//------- Branch and jump instructions --------
+
+void Assembler::b(int16_t offset) {
+  beq(zero_reg, zero_reg, offset);
+}
+
+
+void Assembler::bal(int16_t offset) {
+  bgezal(zero_reg, offset);
+}
+
+
+void Assembler::beq(Register rs, Register rt, int16_t offset) {
+  GenInstrImmediate(BEQ, rs, rt, offset);
+}
+
+
+void Assembler::bgez(Register rs, int16_t offset) {
+  GenInstrImmediate(REGIMM, rs, BGEZ, offset);
+}
+
+
+void Assembler::bgezal(Register rs, int16_t offset) {
+  GenInstrImmediate(REGIMM, rs, BGEZAL, offset);
+}
+
+
+void Assembler::bgtz(Register rs, int16_t offset) {
+  GenInstrImmediate(BGTZ, rs, zero_reg, offset);
+}
+
+
+void Assembler::blez(Register rs, int16_t offset) {
+  GenInstrImmediate(BLEZ, rs, zero_reg, offset);
+}
+
+
+void Assembler::bltz(Register rs, int16_t offset) {
+  GenInstrImmediate(REGIMM, rs, BLTZ, offset);
+}
+
+
+void Assembler::bltzal(Register rs, int16_t offset) {
+  GenInstrImmediate(REGIMM, rs, BLTZAL, offset);
+}
+
+
+void Assembler::bne(Register rs, Register rt, int16_t offset) {
+  GenInstrImmediate(BNE, rs, rt, offset);
+}
+
+
+void Assembler::j(int32_t target) {
+  ASSERT(is_uint28(target) && ((target & 3) == 0));
+  GenInstrJump(J, target >> 2);
+}
+
+
+void Assembler::jr(Register rs) {
+  GenInstrRegister(SPECIAL, rs, zero_reg, zero_reg, 0, JR);
+}
+
+
+void Assembler::jal(int32_t target) {
+  ASSERT(is_uint28(target) && ((target & 3) == 0));
+  GenInstrJump(JAL, target >> 2);
+}
+
+
+void Assembler::jalr(Register rs, Register rd) {
+  GenInstrRegister(SPECIAL, rs, zero_reg, rd, 0, JALR);
+}
+
+
+//-------Data-processing-instructions---------
+
+// Arithmetic.
+
+void Assembler::add(Register rd, Register rs, Register rt) {
+  GenInstrRegister(SPECIAL, rs, rt, rd, 0, ADD);
+}
+
+
+void Assembler::addu(Register rd, Register rs, Register rt) {
+  GenInstrRegister(SPECIAL, rs, rt, rd, 0, ADDU);
+}
+
+
+void Assembler::addi(Register rd, Register rs, int32_t j) {
+  GenInstrImmediate(ADDI, rs, rd, j);
+}
+
+
+void Assembler::addiu(Register rd, Register rs, int32_t j) {
+  GenInstrImmediate(ADDIU, rs, rd, j);
+}
+
+
+void Assembler::sub(Register rd, Register rs, Register rt) {
+  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SUB);
+}
+
+
+void Assembler::subu(Register rd, Register rs, Register rt) {
+  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SUBU);
+}
+
+
+void Assembler::mul(Register rd, Register rs, Register rt) {
+  GenInstrRegister(SPECIAL2, rs, rt, rd, 0, MUL);
+}
+
+
+void Assembler::mult(Register rs, Register rt) {
+  GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULT);
+}
+
+
+void Assembler::multu(Register rs, Register rt) {
+  GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULTU);
+}
+
+
+void Assembler::div(Register rs, Register rt) {
+  GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIV);
+}
+
+
+void Assembler::divu(Register rs, Register rt) {
+  GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIVU);
+}
+
+
+// Logical.
+
+void Assembler::and_(Register rd, Register rs, Register rt) {
+  GenInstrRegister(SPECIAL, rs, rt, rd, 0, AND);
+}
+
+
+void Assembler::andi(Register rt, Register rs, int32_t j) {
+  GenInstrImmediate(ANDI, rs, rt, j);
+}
+
+
+void Assembler::or_(Register rd, Register rs, Register rt) {
+  GenInstrRegister(SPECIAL, rs, rt, rd, 0, OR);
+}
+
+
+void Assembler::ori(Register rt, Register rs, int32_t j) {
+  GenInstrImmediate(ORI, rs, rt, j);
+}
+
+
+void Assembler::xor_(Register rd, Register rs, Register rt) {
+  GenInstrRegister(SPECIAL, rs, rt, rd, 0, XOR);
+}
+
+
+void Assembler::xori(Register rt, Register rs, int32_t j) {
+  GenInstrImmediate(XORI, rs, rt, j);
+}
+
+
+void Assembler::nor(Register rd, Register rs, Register rt) {
+  GenInstrRegister(SPECIAL, rs, rt, rd, 0, NOR);
+}
+
+
+// Shifts.
+void Assembler::sll(Register rd, Register rt, uint16_t sa) {
+  GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SLL);
+}
+
+
+void Assembler::sllv(Register rd, Register rt, Register rs) {
+  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLLV);
+}
+
+
+void Assembler::srl(Register rd, Register rt, uint16_t sa) {
+  GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SRL);
+}
+
+
+void Assembler::srlv(Register rd, Register rt, Register rs) {
+  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRLV);
+}
+
+
+void Assembler::sra(Register rd, Register rt, uint16_t sa) {
+  GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SRA);
+}
+
+
+void Assembler::srav(Register rd, Register rt, Register rs) {
+  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRAV);
+}
+
+
+//------------Memory-instructions-------------
+
+void Assembler::lb(Register rd, const MemOperand& rs) {
+  GenInstrImmediate(LB, rs.rm(), rd, rs.offset_);
+}
+
+
+void Assembler::lbu(Register rd, const MemOperand& rs) {
+  GenInstrImmediate(LBU, rs.rm(), rd, rs.offset_);
+}
+
+
+void Assembler::lw(Register rd, const MemOperand& rs) {
+  GenInstrImmediate(LW, rs.rm(), rd, rs.offset_);
+}
+
+
+void Assembler::sb(Register rd, const MemOperand& rs) {
+  GenInstrImmediate(SB, rs.rm(), rd, rs.offset_);
+}
+
+
+void Assembler::sw(Register rd, const MemOperand& rs) {
+  GenInstrImmediate(SW, rs.rm(), rd, rs.offset_);
+}
+
+
+void Assembler::lui(Register rd, int32_t j) {
+  GenInstrImmediate(LUI, zero_reg, rd, j);
+}
+
+
+//-------------Misc-instructions--------------
+
+// Break / Trap instructions.
+void Assembler::break_(uint32_t code) {
+  ASSERT((code & ~0xfffff) == 0);
+  Instr break_instr = SPECIAL | BREAK | (code << 6);
+  emit(break_instr);
+}
+
+
+void Assembler::tge(Register rs, Register rt, uint16_t code) {
+  ASSERT(is_uint10(code));
+  Instr instr = SPECIAL | TGE | rs.code() << kRsShift
+      | rt.code() << kRtShift | code << 6;
+  emit(instr);
+}
+
+
+void Assembler::tgeu(Register rs, Register rt, uint16_t code) {
+  ASSERT(is_uint10(code));
+  Instr instr = SPECIAL | TGEU | rs.code() << kRsShift
+      | rt.code() << kRtShift | code << 6;
+  emit(instr);
+}
+
+
+void Assembler::tlt(Register rs, Register rt, uint16_t code) {
+  ASSERT(is_uint10(code));
+  Instr instr =
+      SPECIAL | TLT | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
+  emit(instr);
+}
+
+
+void Assembler::tltu(Register rs, Register rt, uint16_t code) {
+  ASSERT(is_uint10(code));
+  Instr instr = SPECIAL | TLTU | rs.code() << kRsShift
+      | rt.code() << kRtShift | code << 6;
+  emit(instr);
+}
+
+
+void Assembler::teq(Register rs, Register rt, uint16_t code) {
+  ASSERT(is_uint10(code));
+  Instr instr =
+      SPECIAL | TEQ | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
+  emit(instr);
+}
+
+
+void Assembler::tne(Register rs, Register rt, uint16_t code) {
+  ASSERT(is_uint10(code));
+  Instr instr =
+      SPECIAL | TNE | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
+  emit(instr);
+}
+
+
+// Move from HI/LO register.
+
+void Assembler::mfhi(Register rd) {
+  GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFHI);
+}
+
+
+void Assembler::mflo(Register rd) {
+  GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFLO);
+}
+
+
+// Set on less than instructions.
+void Assembler::slt(Register rd, Register rs, Register rt) {
+  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLT);
+}
+
+
+void Assembler::sltu(Register rd, Register rs, Register rt) {
+  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLTU);
+}
+
+
+void Assembler::slti(Register rt, Register rs, int32_t j) {
+  GenInstrImmediate(SLTI, rs, rt, j);
+}
+
+
+void Assembler::sltiu(Register rt, Register rs, int32_t j) {
+  GenInstrImmediate(SLTIU, rs, rt, j);
+}
+
+
+//--------Coprocessor-instructions----------------
+
+// Load, store, move.
+void Assembler::lwc1(FPURegister fd, const MemOperand& src) {
+  GenInstrImmediate(LWC1, src.rm(), fd, src.offset_);
+}
+
+
+void Assembler::ldc1(FPURegister fd, const MemOperand& src) {
+  GenInstrImmediate(LDC1, src.rm(), fd, src.offset_);
+}
+
+
+void Assembler::swc1(FPURegister fd, const MemOperand& src) {
+  GenInstrImmediate(SWC1, src.rm(), fd, src.offset_);
+}
+
+
+void Assembler::sdc1(FPURegister fd, const MemOperand& src) {
+  GenInstrImmediate(SDC1, src.rm(), fd, src.offset_);
+}
+
+
+void Assembler::mtc1(FPURegister fs, Register rt) {
+  GenInstrRegister(COP1, MTC1, rt, fs, f0);
+}
+
+
+void Assembler::mthc1(FPURegister fs, Register rt) {
+  GenInstrRegister(COP1, MTHC1, rt, fs, f0);
+}
+
+
+void Assembler::mfc1(FPURegister fs, Register rt) {
+  GenInstrRegister(COP1, MFC1, rt, fs, f0);
+}
+
+
+void Assembler::mfhc1(FPURegister fs, Register rt) {
+  GenInstrRegister(COP1, MFHC1, rt, fs, f0);
+}
+
+
+// Conversions.
+
+void Assembler::cvt_w_s(FPURegister fd, FPURegister fs) {
+  GenInstrRegister(COP1, S, f0, fs, fd, CVT_W_S);
+}
+
+
+void Assembler::cvt_w_d(FPURegister fd, FPURegister fs) {
+  GenInstrRegister(COP1, D, f0, fs, fd, CVT_W_D);
+}
+
+
+void Assembler::cvt_l_s(FPURegister fd, FPURegister fs) {
+  GenInstrRegister(COP1, S, f0, fs, fd, CVT_L_S);
+}
+
+
+void Assembler::cvt_l_d(FPURegister fd, FPURegister fs) {
+  GenInstrRegister(COP1, D, f0, fs, fd, CVT_L_D);
+}
+
+
+void Assembler::cvt_s_w(FPURegister fd, FPURegister fs) {
+  GenInstrRegister(COP1, W, f0, fs, fd, CVT_S_W);
+}
+
+
+void Assembler::cvt_s_l(FPURegister fd, FPURegister fs) {
+  GenInstrRegister(COP1, L, f0, fs, fd, CVT_S_L);
+}
+
+
+void Assembler::cvt_s_d(FPURegister fd, FPURegister fs) {
+  GenInstrRegister(COP1, D, f0, fs, fd, CVT_S_D);
+}
+
+
+void Assembler::cvt_d_w(FPURegister fd, FPURegister fs) {
+  GenInstrRegister(COP1, W, f0, fs, fd, CVT_D_W);
+}
+
+
+void Assembler::cvt_d_l(FPURegister fd, FPURegister fs) {
+  GenInstrRegister(COP1, L, f0, fs, fd, CVT_D_L);
+}
+
+
+void Assembler::cvt_d_s(FPURegister fd, FPURegister fs) {
+  GenInstrRegister(COP1, S, f0, fs, fd, CVT_D_S);
+}
+
+
+// Conditions.
+void Assembler::c(FPUCondition cond, SecondaryField fmt,
+    FPURegister ft, FPURegister fs, uint16_t cc) {
+  ASSERT(is_uint3(cc));
+  ASSERT((fmt & ~(31 << kRsShift)) == 0);
+  Instr instr = COP1 | fmt | ft.code() << 16 | fs.code() << kFsShift
+      | cc << 8 | 3 << 4 | cond;
+  emit(instr);
+}
+
+
+void Assembler::bc1f(int16_t offset, uint16_t cc) {
+  ASSERT(is_uint3(cc));
+  Instr instr = COP1 | BC1 | cc << 18 | 0 << 16 | (offset & kImm16Mask);
+  emit(instr);
+}
+
+
+void Assembler::bc1t(int16_t offset, uint16_t cc) {
+  ASSERT(is_uint3(cc));
+  Instr instr = COP1 | BC1 | cc << 18 | 1 << 16 | (offset & kImm16Mask);
+  emit(instr);
+}
+
+
+// Debugging.
+void Assembler::RecordJSReturn() {
+  WriteRecordedPositions();
+  CheckBuffer();
+  RecordRelocInfo(RelocInfo::JS_RETURN);
+}
+
+
+void Assembler::RecordComment(const char* msg) {
+  if (FLAG_debug_code) {
+    CheckBuffer();
+    RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
+  }
+}
+
+
+void Assembler::RecordPosition(int pos) {
+  if (pos == RelocInfo::kNoPosition) return;
+  ASSERT(pos >= 0);
+  current_position_ = pos;
+}
+
+
+void Assembler::RecordStatementPosition(int pos) {
+  if (pos == RelocInfo::kNoPosition) return;
+  ASSERT(pos >= 0);
+  current_statement_position_ = pos;
+}
+
+
+void Assembler::WriteRecordedPositions() {
+  // Write the statement position if it is different from what was written last
+  // time.
+  if (current_statement_position_ != written_statement_position_) {
+    CheckBuffer();
+    RecordRelocInfo(RelocInfo::STATEMENT_POSITION, current_statement_position_);
+    written_statement_position_ = current_statement_position_;
+  }
+
+  // Write the position if it is different from what was written last time and
+  // also different from the written statement position.
+  if (current_position_ != written_position_ &&
+      current_position_ != written_statement_position_) {
+    CheckBuffer();
+    RecordRelocInfo(RelocInfo::POSITION, current_position_);
+    written_position_ = current_position_;
+  }
+}
+
+
+void Assembler::GrowBuffer() {
+  if (!own_buffer_) FATAL("external code buffer is too small");
+
+  // Compute new buffer size.
+  CodeDesc desc;  // the new buffer
+  if (buffer_size_ < 4*KB) {
+    desc.buffer_size = 4*KB;
+  } else if (buffer_size_ < 1*MB) {
+    desc.buffer_size = 2*buffer_size_;
+  } else {
+    desc.buffer_size = buffer_size_ + 1*MB;
+  }
+  CHECK_GT(desc.buffer_size, 0);  // no overflow
+
+  // Setup new buffer.
+  desc.buffer = NewArray<byte>(desc.buffer_size);
+
+  desc.instr_size = pc_offset();
+  desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
+
+  // Copy the data.
+  int pc_delta = desc.buffer - buffer_;
+  int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
+  memmove(desc.buffer, buffer_, desc.instr_size);
+  memmove(reloc_info_writer.pos() + rc_delta,
+          reloc_info_writer.pos(), desc.reloc_size);
+
+  // Switch buffers.
+  DeleteArray(buffer_);
+  buffer_ = desc.buffer;
+  buffer_size_ = desc.buffer_size;
+  pc_ += pc_delta;
+  reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
+                               reloc_info_writer.last_pc() + pc_delta);
+
+
+  // On ia32 and ARM pc relative addressing is used, and we thus need to apply a
+  // shift by pc_delta. But on MIPS the target address it directly loaded, so
+  // we do not need to relocate here.
+
+  ASSERT(!overflow());
+}
+
+
+void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
+  RelocInfo rinfo(pc_, rmode, data);  // we do not try to reuse pool constants
+  if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::STATEMENT_POSITION) {
+    // Adjust code for new modes.
+    ASSERT(RelocInfo::IsJSReturn(rmode)
+           || RelocInfo::IsComment(rmode)
+           || RelocInfo::IsPosition(rmode));
+    // These modes do not need an entry in the constant pool.
+  }
+  if (rinfo.rmode() != RelocInfo::NONE) {
+    // Don't record external references unless the heap will be serialized.
+    if (rmode == RelocInfo::EXTERNAL_REFERENCE &&
+        !Serializer::enabled() &&
+        !FLAG_debug_code) {
+      return;
+    }
+    ASSERT(buffer_space() >= kMaxRelocSize);  // too late to grow buffer here
+    reloc_info_writer.Write(&rinfo);
+  }
+}
+
+
+Address Assembler::target_address_at(Address pc) {
+  Instr instr1 = instr_at(pc);
+  Instr instr2 = instr_at(pc + kInstrSize);
+  // Check we have 2 instructions generated by li.
+  ASSERT(((instr1 & kOpcodeMask) == LUI && (instr2 & kOpcodeMask) == ORI) ||
+         ((instr1 == nopInstr) && ((instr2 & kOpcodeMask) == ADDI ||
+                            (instr2 & kOpcodeMask) == ORI ||
+                            (instr2 & kOpcodeMask) == LUI)));
+  // Interpret these 2 instructions.
+  if (instr1 == nopInstr) {
+    if ((instr2 & kOpcodeMask) == ADDI) {
+      return reinterpret_cast<Address>(((instr2 & kImm16Mask) << 16) >> 16);
+    } else if ((instr2 & kOpcodeMask) == ORI) {
+      return reinterpret_cast<Address>(instr2 & kImm16Mask);
+    } else if ((instr2 & kOpcodeMask) == LUI) {
+      return reinterpret_cast<Address>((instr2 & kImm16Mask) << 16);
+    }
+  } else if ((instr1 & kOpcodeMask) == LUI && (instr2 & kOpcodeMask) == ORI) {
+    // 32 bits value.
+    return reinterpret_cast<Address>(
+        (instr1 & kImm16Mask) << 16 | (instr2 & kImm16Mask));
+  }
+
+  // We should never get here.
+  UNREACHABLE();
+  return (Address)0x0;
+}
+
+
+void Assembler::set_target_address_at(Address pc, Address target) {
+  // On MIPS we need to patch the code to generate.
+
+  // First check we have a li.
+  Instr instr2 = instr_at(pc + kInstrSize);
+#ifdef DEBUG
+  Instr instr1 = instr_at(pc);
+
+  // Check we have indeed the result from a li with MustUseAt true.
+  CHECK(((instr1 & kOpcodeMask) == LUI && (instr2 & kOpcodeMask) == ORI) ||
+        ((instr1 == 0) && ((instr2 & kOpcodeMask)== ADDIU ||
+                           (instr2 & kOpcodeMask)== ORI ||
+                           (instr2 & kOpcodeMask)== LUI)));
+#endif
+
+
+  uint32_t rt_code = (instr2 & kRtFieldMask);
+  uint32_t* p = reinterpret_cast<uint32_t*>(pc);
+  uint32_t itarget = reinterpret_cast<uint32_t>(target);
+
+  if (is_int16(itarget)) {
+    // nop
+    // addiu rt zero_reg j
+    *p = nopInstr;
+    *(p+1) = ADDIU | rt_code | (itarget & LOMask);
+  } else if (!(itarget & HIMask)) {
+    // nop
+    // ori rt zero_reg j
+    *p = nopInstr;
+    *(p+1) = ORI | rt_code | (itarget & LOMask);
+  } else if (!(itarget & LOMask)) {
+    // nop
+    // lui rt (HIMask & itarget)>>16
+    *p = nopInstr;
+    *(p+1) = LUI | rt_code | ((itarget & HIMask)>>16);
+  } else {
+    // lui rt (HIMask & itarget)>>16
+    // ori rt rt, (LOMask & itarget)
+    *p = LUI | rt_code | ((itarget & HIMask)>>16);
+    *(p+1) = ORI | rt_code | (rt_code << 5) | (itarget & LOMask);
+  }
+
+  CPU::FlushICache(pc, 2 * sizeof(int32_t));
+}
+
+
+} }  // namespace v8::internal
+
diff --git a/src/mips/assembler-mips.h b/src/mips/assembler-mips.h
new file mode 100644
index 0000000..4f5ae3e
--- /dev/null
+++ b/src/mips/assembler-mips.h
@@ -0,0 +1,663 @@
+// Copyright (c) 1994-2006 Sun Microsystems Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// - Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// - Redistribution in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// - Neither the name of Sun Microsystems or the names of contributors may
+// be used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The original source code covered by the above license above has been
+// modified significantly by Google Inc.
+// Copyright 2010 the V8 project authors. All rights reserved.
+
+
+#ifndef V8_MIPS_ASSEMBLER_MIPS_H_
+#define V8_MIPS_ASSEMBLER_MIPS_H_
+
+#include <stdio.h>
+#include "assembler.h"
+#include "constants-mips.h"
+#include "serialize.h"
+
+using namespace assembler::mips;
+
+namespace v8 {
+namespace internal {
+
+// CPU Registers.
+//
+// 1) We would prefer to use an enum, but enum values are assignment-
+// compatible with int, which has caused code-generation bugs.
+//
+// 2) We would prefer to use a class instead of a struct but we don't like
+// the register initialization to depend on the particular initialization
+// order (which appears to be different on OS X, Linux, and Windows for the
+// installed versions of C++ we tried). Using a struct permits C-style
+// "initialization". Also, the Register objects cannot be const as this
+// forces initialization stubs in MSVC, making us dependent on initialization
+// order.
+//
+// 3) By not using an enum, we are possibly preventing the compiler from
+// doing certain constant folds, which may significantly reduce the
+// code generated for some assembly instructions (because they boil down
+// to a few constants). If this is a problem, we could change the code
+// such that we use an enum in optimized mode, and the struct in debug
+// mode. This way we get the compile-time error checking in debug mode
+// and best performance in optimized code.
+
+
+// -----------------------------------------------------------------------------
+// Implementation of Register and FPURegister
+
+// Core register.
+struct Register {
+  bool is_valid() const  { return 0 <= code_ && code_ < kNumRegisters; }
+  bool is(Register reg) const  { return code_ == reg.code_; }
+  int code() const  {
+    ASSERT(is_valid());
+    return code_;
+  }
+  int bit() const  {
+    ASSERT(is_valid());
+    return 1 << code_;
+  }
+
+  // Unfortunately we can't make this private in a struct.
+  int code_;
+};
+
+extern const Register no_reg;
+
+extern const Register zero_reg;
+extern const Register at;
+extern const Register v0;
+extern const Register v1;
+extern const Register a0;
+extern const Register a1;
+extern const Register a2;
+extern const Register a3;
+extern const Register t0;
+extern const Register t1;
+extern const Register t2;
+extern const Register t3;
+extern const Register t4;
+extern const Register t5;
+extern const Register t6;
+extern const Register t7;
+extern const Register s0;
+extern const Register s1;
+extern const Register s2;
+extern const Register s3;
+extern const Register s4;
+extern const Register s5;
+extern const Register s6;
+extern const Register s7;
+extern const Register t8;
+extern const Register t9;
+extern const Register k0;
+extern const Register k1;
+extern const Register gp;
+extern const Register sp;
+extern const Register s8_fp;
+extern const Register ra;
+
+int ToNumber(Register reg);
+
+Register ToRegister(int num);
+
+// Coprocessor register.
+struct FPURegister {
+  bool is_valid() const  { return 0 <= code_ && code_ < kNumFPURegister ; }
+  bool is(FPURegister creg) const  { return code_ == creg.code_; }
+  int code() const  {
+    ASSERT(is_valid());
+    return code_;
+  }
+  int bit() const  {
+    ASSERT(is_valid());
+    return 1 << code_;
+  }
+
+  // Unfortunately we can't make this private in a struct.
+  int code_;
+};
+
+extern const FPURegister no_creg;
+
+extern const FPURegister f0;
+extern const FPURegister f1;
+extern const FPURegister f2;
+extern const FPURegister f3;
+extern const FPURegister f4;
+extern const FPURegister f5;
+extern const FPURegister f6;
+extern const FPURegister f7;
+extern const FPURegister f8;
+extern const FPURegister f9;
+extern const FPURegister f10;
+extern const FPURegister f11;
+extern const FPURegister f12;  // arg
+extern const FPURegister f13;
+extern const FPURegister f14;  // arg
+extern const FPURegister f15;
+extern const FPURegister f16;
+extern const FPURegister f17;
+extern const FPURegister f18;
+extern const FPURegister f19;
+extern const FPURegister f20;
+extern const FPURegister f21;
+extern const FPURegister f22;
+extern const FPURegister f23;
+extern const FPURegister f24;
+extern const FPURegister f25;
+extern const FPURegister f26;
+extern const FPURegister f27;
+extern const FPURegister f28;
+extern const FPURegister f29;
+extern const FPURegister f30;
+extern const FPURegister f31;
+
+
+// Returns the equivalent of !cc.
+// Negation of the default no_condition (-1) results in a non-default
+// no_condition value (-2). As long as tests for no_condition check
+// for condition < 0, this will work as expected.
+inline Condition NegateCondition(Condition cc);
+
+inline Condition ReverseCondition(Condition cc) {
+  switch (cc) {
+    case Uless:
+      return Ugreater;
+    case Ugreater:
+      return Uless;
+    case Ugreater_equal:
+      return Uless_equal;
+    case Uless_equal:
+      return Ugreater_equal;
+    case less:
+      return greater;
+    case greater:
+      return less;
+    case greater_equal:
+      return less_equal;
+    case less_equal:
+      return greater_equal;
+    default:
+      return cc;
+  };
+}
+
+
+enum Hint {
+  no_hint = 0
+};
+
+inline Hint NegateHint(Hint hint) {
+  return no_hint;
+}
+
+
+// -----------------------------------------------------------------------------
+// Machine instruction Operands.
+
+// Class Operand represents a shifter operand in data processing instructions.
+class Operand BASE_EMBEDDED {
+ public:
+  // Immediate.
+  INLINE(explicit Operand(int32_t immediate,
+         RelocInfo::Mode rmode = RelocInfo::NONE));
+  INLINE(explicit Operand(const ExternalReference& f));
+  INLINE(explicit Operand(const char* s));
+  INLINE(explicit Operand(Object** opp));
+  INLINE(explicit Operand(Context** cpp));
+  explicit Operand(Handle<Object> handle);
+  INLINE(explicit Operand(Smi* value));
+
+  // Register.
+  INLINE(explicit Operand(Register rm));
+
+  // Return true if this is a register operand.
+  INLINE(bool is_reg() const);
+
+  Register rm() const { return rm_; }
+
+ private:
+  Register rm_;
+  int32_t imm32_;  // Valid if rm_ == no_reg
+  RelocInfo::Mode rmode_;
+
+  friend class Assembler;
+  friend class MacroAssembler;
+};
+
+
+// On MIPS we have only one adressing mode with base_reg + offset.
+// Class MemOperand represents a memory operand in load and store instructions.
+class MemOperand : public Operand {
+ public:
+
+  explicit MemOperand(Register rn, int16_t offset = 0);
+
+ private:
+  int16_t offset_;
+
+  friend class Assembler;
+};
+
+
+class Assembler : public Malloced {
+ public:
+  // Create an assembler. Instructions and relocation information are emitted
+  // into a buffer, with the instructions starting from the beginning and the
+  // relocation information starting from the end of the buffer. See CodeDesc
+  // for a detailed comment on the layout (globals.h).
+  //
+  // If the provided buffer is NULL, the assembler allocates and grows its own
+  // buffer, and buffer_size determines the initial buffer size. The buffer is
+  // owned by the assembler and deallocated upon destruction of the assembler.
+  //
+  // If the provided buffer is not NULL, the assembler uses the provided buffer
+  // for code generation and assumes its size to be buffer_size. If the buffer
+  // is too small, a fatal error occurs. No deallocation of the buffer is done
+  // upon destruction of the assembler.
+  Assembler(void* buffer, int buffer_size);
+  ~Assembler();
+
+  // GetCode emits any pending (non-emitted) code and fills the descriptor
+  // desc. GetCode() is idempotent; it returns the same result if no other
+  // Assembler functions are invoked in between GetCode() calls.
+  void GetCode(CodeDesc* desc);
+
+  // Label operations & relative jumps (PPUM Appendix D).
+  //
+  // Takes a branch opcode (cc) and a label (L) and generates
+  // either a backward branch or a forward branch and links it
+  // to the label fixup chain. Usage:
+  //
+  // Label L;    // unbound label
+  // j(cc, &L);  // forward branch to unbound label
+  // bind(&L);   // bind label to the current pc
+  // j(cc, &L);  // backward branch to bound label
+  // bind(&L);   // illegal: a label may be bound only once
+  //
+  // Note: The same Label can be used for forward and backward branches
+  // but it may be bound only once.
+  void bind(Label* L);  // binds an unbound label L to the current code position
+
+  // Returns the branch offset to the given label from the current code position
+  // Links the label to the current position if it is still unbound
+  // Manages the jump elimination optimization if the second parameter is true.
+  int32_t branch_offset(Label* L, bool jump_elimination_allowed);
+  int32_t shifted_branch_offset(Label* L, bool jump_elimination_allowed) {
+    int32_t o = branch_offset(L, jump_elimination_allowed);
+    ASSERT((o & 3) == 0);   // Assert the offset is aligned.
+    return o >> 2;
+  }
+
+  // Puts a labels target address at the given position.
+  // The high 8 bits are set to zero.
+  void label_at_put(Label* L, int at_offset);
+
+  // Size of an instruction.
+  static const int kInstrSize = sizeof(Instr);
+
+  // Difference between address of current opcode and target address offset.
+  static const int kBranchPCOffset = 4;
+
+  // Read/Modify the code target address in the branch/call instruction at pc.
+  static Address target_address_at(Address pc);
+  static void set_target_address_at(Address pc, Address target);
+
+  // This sets the branch destination (which gets loaded at the call address).
+  // This is for calls and branches within generated code.
+  inline static void set_target_at(Address instruction_payload,
+                                   Address target) {
+    set_target_address_at(instruction_payload, target);
+  }
+
+  // This sets the branch destination.
+  // This is for calls and branches to runtime code.
+  inline static void set_external_target_at(Address instruction_payload,
+                                            Address target) {
+    set_target_address_at(instruction_payload, target);
+  }
+
+  static const int kCallTargetSize = 3 * kPointerSize;
+  static const int kExternalTargetSize = 3 * kPointerSize;
+
+  // Distance between the instruction referring to the address of the call
+  // target and the return address.
+  static const int kCallTargetAddressOffset = 4 * kInstrSize;
+
+  // Distance between start of patched return sequence and the emitted address
+  // to jump to.
+  static const int kPatchReturnSequenceAddressOffset = kInstrSize;
+
+
+  // ---------------------------------------------------------------------------
+  // Code generation.
+
+  void nop() { sll(zero_reg, zero_reg, 0); }
+
+
+  //------- Branch and jump  instructions --------
+  // We don't use likely variant of instructions.
+  void b(int16_t offset);
+  void b(Label* L) { b(branch_offset(L, false)>>2); }
+  void bal(int16_t offset);
+  void bal(Label* L) { bal(branch_offset(L, false)>>2); }
+
+  void beq(Register rs, Register rt, int16_t offset);
+  void beq(Register rs, Register rt, Label* L) {
+    beq(rs, rt, branch_offset(L, false) >> 2);
+  }
+  void bgez(Register rs, int16_t offset);
+  void bgezal(Register rs, int16_t offset);
+  void bgtz(Register rs, int16_t offset);
+  void blez(Register rs, int16_t offset);
+  void bltz(Register rs, int16_t offset);
+  void bltzal(Register rs, int16_t offset);
+  void bne(Register rs, Register rt, int16_t offset);
+  void bne(Register rs, Register rt, Label* L) {
+    bne(rs, rt, branch_offset(L, false)>>2);
+  }
+
+  // Never use the int16_t b(l)cond version with a branch offset
+  // instead of using the Label* version. See Twiki for infos.
+
+  // Jump targets must be in the current 256 MB-aligned region. ie 28 bits.
+  void j(int32_t target);
+  void jal(int32_t target);
+  void jalr(Register rs, Register rd = ra);
+  void jr(Register target);
+
+
+  //-------Data-processing-instructions---------
+
+  // Arithmetic.
+  void add(Register rd, Register rs, Register rt);
+  void addu(Register rd, Register rs, Register rt);
+  void sub(Register rd, Register rs, Register rt);
+  void subu(Register rd, Register rs, Register rt);
+  void mult(Register rs, Register rt);
+  void multu(Register rs, Register rt);
+  void div(Register rs, Register rt);
+  void divu(Register rs, Register rt);
+  void mul(Register rd, Register rs, Register rt);
+
+  void addi(Register rd, Register rs, int32_t j);
+  void addiu(Register rd, Register rs, int32_t j);
+
+  // Logical.
+  void and_(Register rd, Register rs, Register rt);
+  void or_(Register rd, Register rs, Register rt);
+  void xor_(Register rd, Register rs, Register rt);
+  void nor(Register rd, Register rs, Register rt);
+
+  void andi(Register rd, Register rs, int32_t j);
+  void ori(Register rd, Register rs, int32_t j);
+  void xori(Register rd, Register rs, int32_t j);
+  void lui(Register rd, int32_t j);
+
+  // Shifts.
+  void sll(Register rd, Register rt, uint16_t sa);
+  void sllv(Register rd, Register rt, Register rs);
+  void srl(Register rd, Register rt, uint16_t sa);
+  void srlv(Register rd, Register rt, Register rs);
+  void sra(Register rt, Register rd, uint16_t sa);
+  void srav(Register rt, Register rd, Register rs);
+
+
+  //------------Memory-instructions-------------
+
+  void lb(Register rd, const MemOperand& rs);
+  void lbu(Register rd, const MemOperand& rs);
+  void lw(Register rd, const MemOperand& rs);
+  void sb(Register rd, const MemOperand& rs);
+  void sw(Register rd, const MemOperand& rs);
+
+
+  //-------------Misc-instructions--------------
+
+  // Break / Trap instructions.
+  void break_(uint32_t code);
+  void tge(Register rs, Register rt, uint16_t code);
+  void tgeu(Register rs, Register rt, uint16_t code);
+  void tlt(Register rs, Register rt, uint16_t code);
+  void tltu(Register rs, Register rt, uint16_t code);
+  void teq(Register rs, Register rt, uint16_t code);
+  void tne(Register rs, Register rt, uint16_t code);
+
+  // Move from HI/LO register.
+  void mfhi(Register rd);
+  void mflo(Register rd);
+
+  // Set on less than.
+  void slt(Register rd, Register rs, Register rt);
+  void sltu(Register rd, Register rs, Register rt);
+  void slti(Register rd, Register rs, int32_t j);
+  void sltiu(Register rd, Register rs, int32_t j);
+
+
+  //--------Coprocessor-instructions----------------
+
+  // Load, store, and move.
+  void lwc1(FPURegister fd, const MemOperand& src);
+  void ldc1(FPURegister fd, const MemOperand& src);
+
+  void swc1(FPURegister fs, const MemOperand& dst);
+  void sdc1(FPURegister fs, const MemOperand& dst);
+
+  // When paired with MTC1 to write a value to a 64-bit FPR, the MTC1 must be
+  // executed first, followed by the MTHC1.
+  void mtc1(FPURegister fs, Register rt);
+  void mthc1(FPURegister fs, Register rt);
+  void mfc1(FPURegister fs, Register rt);
+  void mfhc1(FPURegister fs, Register rt);
+
+  // Conversion.
+  void cvt_w_s(FPURegister fd, FPURegister fs);
+  void cvt_w_d(FPURegister fd, FPURegister fs);
+
+  void cvt_l_s(FPURegister fd, FPURegister fs);
+  void cvt_l_d(FPURegister fd, FPURegister fs);
+
+  void cvt_s_w(FPURegister fd, FPURegister fs);
+  void cvt_s_l(FPURegister fd, FPURegister fs);
+  void cvt_s_d(FPURegister fd, FPURegister fs);
+
+  void cvt_d_w(FPURegister fd, FPURegister fs);
+  void cvt_d_l(FPURegister fd, FPURegister fs);
+  void cvt_d_s(FPURegister fd, FPURegister fs);
+
+  // Conditions and branches.
+  void c(FPUCondition cond, SecondaryField fmt,
+         FPURegister ft, FPURegister fs, uint16_t cc = 0);
+
+  void bc1f(int16_t offset, uint16_t cc = 0);
+  void bc1f(Label* L, uint16_t cc = 0) { bc1f(branch_offset(L, false)>>2, cc); }
+  void bc1t(int16_t offset, uint16_t cc = 0);
+  void bc1t(Label* L, uint16_t cc = 0) { bc1t(branch_offset(L, false)>>2, cc); }
+
+
+  // Check the code size generated from label to here.
+  int InstructionsGeneratedSince(Label* l) {
+    return (pc_offset() - l->pos()) / kInstrSize;
+  }
+
+  // Debugging.
+
+  // Mark address of the ExitJSFrame code.
+  void RecordJSReturn();
+
+  // Record a comment relocation entry that can be used by a disassembler.
+  // Use --debug_code to enable.
+  void RecordComment(const char* msg);
+
+  void RecordPosition(int pos);
+  void RecordStatementPosition(int pos);
+  void WriteRecordedPositions();
+
+  int32_t pc_offset() const { return pc_ - buffer_; }
+  int32_t current_position() const { return current_position_; }
+  int32_t current_statement_position() const { return current_position_; }
+
+  // Check if there is less than kGap bytes available in the buffer.
+  // If this is the case, we need to grow the buffer before emitting
+  // an instruction or relocation information.
+  inline bool overflow() const { return pc_ >= reloc_info_writer.pos() - kGap; }
+
+  // Get the number of bytes available in the buffer.
+  inline int available_space() const { return reloc_info_writer.pos() - pc_; }
+
+ protected:
+  int32_t buffer_space() const { return reloc_info_writer.pos() - pc_; }
+
+  // Read/patch instructions.
+  static Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); }
+  void instr_at_put(byte* pc, Instr instr) {
+    *reinterpret_cast<Instr*>(pc) = instr;
+  }
+  Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); }
+  void instr_at_put(int pos, Instr instr) {
+    *reinterpret_cast<Instr*>(buffer_ + pos) = instr;
+  }
+
+  // Check if an instruction is a branch of some kind.
+  bool is_branch(Instr instr);
+
+  // Decode branch instruction at pos and return branch target pos.
+  int target_at(int32_t pos);
+
+  // Patch branch instruction at pos to branch to given branch target pos.
+  void target_at_put(int32_t pos, int32_t target_pos);
+
+  // Say if we need to relocate with this mode.
+  bool MustUseAt(RelocInfo::Mode rmode);
+
+  // Record reloc info for current pc_.
+  void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
+
+ private:
+  // Code buffer:
+  // The buffer into which code and relocation info are generated.
+  byte* buffer_;
+  int buffer_size_;
+  // True if the assembler owns the buffer, false if buffer is external.
+  bool own_buffer_;
+
+  // Buffer size and constant pool distance are checked together at regular
+  // intervals of kBufferCheckInterval emitted bytes.
+  static const int kBufferCheckInterval = 1*KB/2;
+
+  // Code generation.
+  // The relocation writer's position is at least kGap bytes below the end of
+  // the generated instructions. This is so that multi-instruction sequences do
+  // not have to check for overflow. The same is true for writes of large
+  // relocation info entries.
+  static const int kGap = 32;
+  byte* pc_;  // The program counter - moves forward.
+
+  // Relocation information generation.
+  // Each relocation is encoded as a variable size value.
+  static const int kMaxRelocSize = RelocInfoWriter::kMaxSize;
+  RelocInfoWriter reloc_info_writer;
+
+  // The bound position, before this we cannot do instruction elimination.
+  int last_bound_pos_;
+
+  // Source position information.
+  int current_position_;
+  int current_statement_position_;
+  int written_position_;
+  int written_statement_position_;
+
+  // Code emission.
+  inline void CheckBuffer();
+  void GrowBuffer();
+  inline void emit(Instr x);
+
+  // Instruction generation.
+  // We have 3 different kind of encoding layout on MIPS.
+  // However due to many different types of objects encoded in the same fields
+  // we have quite a few aliases for each mode.
+  // Using the same structure to refer to Register and FPURegister would spare a
+  // few aliases, but mixing both does not look clean to me.
+  // Anyway we could surely implement this differently.
+
+  void GenInstrRegister(Opcode opcode,
+                        Register rs,
+                        Register rt,
+                        Register rd,
+                        uint16_t sa = 0,
+                        SecondaryField func = NULLSF);
+
+  void GenInstrRegister(Opcode opcode,
+                        SecondaryField fmt,
+                        FPURegister ft,
+                        FPURegister fs,
+                        FPURegister fd,
+                        SecondaryField func = NULLSF);
+
+  void GenInstrRegister(Opcode opcode,
+                        SecondaryField fmt,
+                        Register rt,
+                        FPURegister fs,
+                        FPURegister fd,
+                        SecondaryField func = NULLSF);
+
+
+  void GenInstrImmediate(Opcode opcode,
+                         Register rs,
+                         Register rt,
+                         int32_t  j);
+  void GenInstrImmediate(Opcode opcode,
+                         Register rs,
+                         SecondaryField SF,
+                         int32_t  j);
+  void GenInstrImmediate(Opcode opcode,
+                         Register r1,
+                         FPURegister r2,
+                         int32_t  j);
+
+
+  void GenInstrJump(Opcode opcode,
+                     uint32_t address);
+
+
+  // Labels.
+  void print(Label* L);
+  void bind_to(Label* L, int pos);
+  void link_to(Label* L, Label* appendix);
+  void next(Label* L);
+
+  friend class RegExpMacroAssemblerMIPS;
+  friend class RelocInfo;
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_ARM_ASSEMBLER_MIPS_H_
+
diff --git a/src/mips/builtins-mips.cc b/src/mips/builtins-mips.cc
new file mode 100644
index 0000000..3bd42ed
--- /dev/null
+++ b/src/mips/builtins-mips.cc
@@ -0,0 +1,109 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "debug.h"
+#include "runtime.h"
+
+namespace v8 {
+namespace internal {
+
+
+#define __ ACCESS_MASM(masm)
+
+
+void Builtins::Generate_Adaptor(MacroAssembler* masm,
+                                CFunctionId id,
+                                BuiltinExtraArguments extra_args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
+                                             bool is_construct) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
+  Generate_JSEntryTrampolineHelper(masm, false);
+}
+
+
+void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
+  Generate_JSEntryTrampolineHelper(masm, true);
+}
+
+
+void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+#undef __
+
+} }  // namespace v8::internal
+
diff --git a/src/mips/codegen-mips-inl.h b/src/mips/codegen-mips-inl.h
new file mode 100644
index 0000000..2a77715
--- /dev/null
+++ b/src/mips/codegen-mips-inl.h
@@ -0,0 +1,56 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#ifndef V8_MIPS_CODEGEN_MIPS_INL_H_
+#define V8_MIPS_CODEGEN_MIPS_INL_H_
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm_)
+
+// Platform-specific inline functions.
+
+void DeferredCode::Jump() { __ b(&entry_label_); }
+
+void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+#undef __
+
+} }  // namespace v8::internal
+
+#endif  // V8_MIPS_CODEGEN_MIPS_INL_H_
+
diff --git a/src/mips/codegen-mips.cc b/src/mips/codegen-mips.cc
new file mode 100644
index 0000000..5a27c28
--- /dev/null
+++ b/src/mips/codegen-mips.cc
@@ -0,0 +1,501 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#include "v8.h"
+
+#include "bootstrapper.h"
+#include "codegen-inl.h"
+#include "debug.h"
+#include "parser.h"
+#include "register-allocator-inl.h"
+#include "runtime.h"
+#include "scopes.h"
+#include "compiler.h"
+
+
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm_)
+
+
+
+// -------------------------------------------------------------------------
+// Platform-specific DeferredCode functions.
+
+
+void DeferredCode::SaveRegisters() {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void DeferredCode::RestoreRegisters() {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+// -------------------------------------------------------------------------
+// CodeGenerator implementation
+
+CodeGenerator::CodeGenerator(MacroAssembler* masm)
+    : deferred_(8),
+      masm_(masm),
+      scope_(NULL),
+      frame_(NULL),
+      allocator_(NULL),
+      cc_reg_(cc_always),
+      state_(NULL),
+      function_return_is_shadowed_(false) {
+}
+
+
+// Calling conventions:
+// s8_fp: caller's frame pointer
+// sp: stack pointer
+// a1: called JS function
+// cp: callee's context
+
+void CodeGenerator::Generate(CompilationInfo* info, Mode mode) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitBlock(Block* node) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitDeclaration(Declaration* node) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitExpressionStatement(ExpressionStatement* node) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitEmptyStatement(EmptyStatement* node) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitIfStatement(IfStatement* node) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitContinueStatement(ContinueStatement* node) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitBreakStatement(BreakStatement* node) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitWithExitStatement(WithExitStatement* node) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitWhileStatement(WhileStatement* node) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitForStatement(ForStatement* node) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitForInStatement(ForInStatement* node) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* node) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitFunctionBoilerplateLiteral(
+    FunctionBoilerplateLiteral* node) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitConditional(Conditional* node) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitSlot(Slot* node) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitVariableProxy(VariableProxy* node) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitLiteral(Literal* node) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitAssignment(Assignment* node) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitThrow(Throw* node) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitProperty(Property* node) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitCall(Call* node) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitCallNew(CallNew* node) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+// This should generate code that performs a charCodeAt() call or returns
+// undefined in order to trigger the slow case, Runtime_StringCharCodeAt.
+// It is not yet implemented on ARM, so it always goes to the slow case.
+void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateArgumentsAccess(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateRandomPositiveSmi(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateIsUndetectableObject(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateStringAdd(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateSubString(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateStringCompare(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateRegExpExec(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitCountOperation(CountOperation* node) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitThisFunction(ThisFunction* node) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+#ifdef DEBUG
+bool CodeGenerator::HasValidEntryRegisters() { return true; }
+#endif
+
+
+#undef __
+#define __ ACCESS_MASM(masm)
+
+
+// On entry a0 and a1 are the things to be compared.  On exit v0 is 0,
+// positive or negative to indicate the result of the comparison.
+void CompareStub::Generate(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+  __ break_(0x765);
+}
+
+
+void StackCheckStub::Generate(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+  __ break_(0x790);
+}
+
+
+void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+  __ break_(0x808);
+}
+
+
+void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
+                                          UncatchableExceptionType type) {
+  UNIMPLEMENTED_MIPS();
+  __ break_(0x815);
+}
+
+void CEntryStub::GenerateCore(MacroAssembler* masm,
+                              Label* throw_normal_exception,
+                              Label* throw_termination_exception,
+                              Label* throw_out_of_memory_exception,
+                              bool do_gc,
+                              bool always_allocate) {
+  UNIMPLEMENTED_MIPS();
+  __ break_(0x826);
+}
+
+void CEntryStub::Generate(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+  __ break_(0x831);
+}
+
+void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
+  UNIMPLEMENTED_MIPS();
+  // Load a result.
+  __ li(v0, Operand(0x1234));
+  __ jr(ra);
+  // Return
+  __ nop();
+}
+
+
+// This stub performs an instanceof, calling the builtin function if
+// necessary.  Uses a1 for the object, a0 for the function that it may
+// be an instance of (these are fetched from the stack).
+void InstanceofStub::Generate(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+  __ break_(0x845);
+}
+
+
+void ArgumentsAccessStub::GenerateReadLength(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+  __ break_(0x851);
+}
+
+
+void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+  __ break_(0x857);
+}
+
+
+void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+  __ break_(0x863);
+}
+
+
+const char* CompareStub::GetName() {
+  UNIMPLEMENTED_MIPS();
+  return NULL;  // UNIMPLEMENTED RETURN
+}
+
+
+int CompareStub::MinorKey() {
+  // Encode the two parameters in a unique 16 bit value.
+  ASSERT(static_cast<unsigned>(cc_) >> 28 < (1 << 15));
+  return (static_cast<unsigned>(cc_) >> 27) | (strict_ ? 1 : 0);
+}
+
+
+#undef __
+
+} }  // namespace v8::internal
+
diff --git a/src/mips/codegen-mips.h b/src/mips/codegen-mips.h
new file mode 100644
index 0000000..05138bc
--- /dev/null
+++ b/src/mips/codegen-mips.h
@@ -0,0 +1,311 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#ifndef V8_MIPS_CODEGEN_MIPS_H_
+#define V8_MIPS_CODEGEN_MIPS_H_
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations
+class CompilationInfo;
+class DeferredCode;
+class RegisterAllocator;
+class RegisterFile;
+
+enum InitState { CONST_INIT, NOT_CONST_INIT };
+enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
+
+
+// -------------------------------------------------------------------------
+// Code generation state
+
+// The state is passed down the AST by the code generator (and back up, in
+// the form of the state of the label pair).  It is threaded through the
+// call stack.  Constructing a state implicitly pushes it on the owning code
+// generator's stack of states, and destroying one implicitly pops it.
+
+class CodeGenState BASE_EMBEDDED {
+ public:
+  // Create an initial code generator state.  Destroying the initial state
+  // leaves the code generator with a NULL state.
+  explicit CodeGenState(CodeGenerator* owner);
+
+  // Create a code generator state based on a code generator's current
+  // state.  The new state has its own typeof state and pair of branch
+  // labels.
+  CodeGenState(CodeGenerator* owner,
+               JumpTarget* true_target,
+               JumpTarget* false_target);
+
+  // Destroy a code generator state and restore the owning code generator's
+  // previous state.
+  ~CodeGenState();
+
+  TypeofState typeof_state() const { return typeof_state_; }
+  JumpTarget* true_target() const { return true_target_; }
+  JumpTarget* false_target() const { return false_target_; }
+
+ private:
+  // The owning code generator.
+  CodeGenerator* owner_;
+
+  // A flag indicating whether we are compiling the immediate subexpression
+  // of a typeof expression.
+  TypeofState typeof_state_;
+
+  JumpTarget* true_target_;
+  JumpTarget* false_target_;
+
+  // The previous state of the owning code generator, restored when
+  // this state is destroyed.
+  CodeGenState* previous_;
+};
+
+
+
+// -------------------------------------------------------------------------
+// CodeGenerator
+
+class CodeGenerator: public AstVisitor {
+ public:
+  // Compilation mode.  Either the compiler is used as the primary
+  // compiler and needs to setup everything or the compiler is used as
+  // the secondary compiler for split compilation and has to handle
+  // bailouts.
+  enum Mode {
+    PRIMARY,
+    SECONDARY
+  };
+
+  // Takes a function literal, generates code for it. This function should only
+  // be called by compiler.cc.
+  static Handle<Code> MakeCode(CompilationInfo* info);
+
+  // Printing of AST, etc. as requested by flags.
+  static void MakeCodePrologue(CompilationInfo* info);
+
+  // Allocate and install the code.
+  static Handle<Code> MakeCodeEpilogue(MacroAssembler* masm,
+                                       Code::Flags flags,
+                                       CompilationInfo* info);
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  static bool ShouldGenerateLog(Expression* type);
+#endif
+
+  static void SetFunctionInfo(Handle<JSFunction> fun,
+                              FunctionLiteral* lit,
+                              bool is_toplevel,
+                              Handle<Script> script);
+
+  static void RecordPositions(MacroAssembler* masm, int pos);
+
+  // Accessors
+  MacroAssembler* masm() { return masm_; }
+  VirtualFrame* frame() const { return frame_; }
+  inline Handle<Script> script();
+
+  bool has_valid_frame() const { return frame_ != NULL; }
+
+  // Set the virtual frame to be new_frame, with non-frame register
+  // reference counts given by non_frame_registers.  The non-frame
+  // register reference counts of the old frame are returned in
+  // non_frame_registers.
+  void SetFrame(VirtualFrame* new_frame, RegisterFile* non_frame_registers);
+
+  void DeleteFrame();
+
+  RegisterAllocator* allocator() const { return allocator_; }
+
+  CodeGenState* state() { return state_; }
+  void set_state(CodeGenState* state) { state_ = state; }
+
+  void AddDeferred(DeferredCode* code) { deferred_.Add(code); }
+
+  static const int kUnknownIntValue = -1;
+
+  // Number of instructions used for the JS return sequence. The constant is
+  // used by the debugger to patch the JS return sequence.
+  static const int kJSReturnSequenceLength = 6;
+
+ private:
+  // Construction/Destruction.
+  explicit CodeGenerator(MacroAssembler* masm);
+  virtual ~CodeGenerator() { delete masm_; }
+
+  // Accessors.
+  inline bool is_eval();
+  Scope* scope() const { return scope_; }
+
+  // Generating deferred code.
+  void ProcessDeferred();
+
+  // State
+  bool has_cc() const  { return cc_reg_ != cc_always; }
+  TypeofState typeof_state() const { return state_->typeof_state(); }
+  JumpTarget* true_target() const  { return state_->true_target(); }
+  JumpTarget* false_target() const  { return state_->false_target(); }
+
+  // We don't track loop nesting level on mips yet.
+  int loop_nesting() const { return 0; }
+
+  // Node visitors.
+  void VisitStatements(ZoneList<Statement*>* statements);
+
+#define DEF_VISIT(type) \
+  void Visit##type(type* node);
+  AST_NODE_LIST(DEF_VISIT)
+#undef DEF_VISIT
+
+  // Main code generation function
+  void Generate(CompilationInfo* info, Mode mode);
+
+  struct InlineRuntimeLUT {
+    void (CodeGenerator::*method)(ZoneList<Expression*>*);
+    const char* name;
+  };
+
+  static InlineRuntimeLUT* FindInlineRuntimeLUT(Handle<String> name);
+  bool CheckForInlineRuntimeCall(CallRuntime* node);
+  static bool PatchInlineRuntimeEntry(Handle<String> name,
+                                      const InlineRuntimeLUT& new_entry,
+                                      InlineRuntimeLUT* old_entry);
+
+  static Handle<Code> ComputeLazyCompile(int argc);
+  void ProcessDeclarations(ZoneList<Declaration*>* declarations);
+
+  Handle<Code> ComputeCallInitialize(int argc, InLoopFlag in_loop);
+
+  // Declare global variables and functions in the given array of
+  // name/value pairs.
+  void DeclareGlobals(Handle<FixedArray> pairs);
+
+  // Support for type checks.
+  void GenerateIsSmi(ZoneList<Expression*>* args);
+  void GenerateIsNonNegativeSmi(ZoneList<Expression*>* args);
+  void GenerateIsArray(ZoneList<Expression*>* args);
+
+  // Support for construct call checks.
+  void GenerateIsConstructCall(ZoneList<Expression*>* args);
+
+  // Support for arguments.length and arguments[?].
+  void GenerateArgumentsLength(ZoneList<Expression*>* args);
+  void GenerateArgumentsAccess(ZoneList<Expression*>* args);
+
+  // Support for accessing the class and value fields of an object.
+  void GenerateClassOf(ZoneList<Expression*>* args);
+  void GenerateValueOf(ZoneList<Expression*>* args);
+  void GenerateSetValueOf(ZoneList<Expression*>* args);
+
+  // Fast support for charCodeAt(n).
+  void GenerateFastCharCodeAt(ZoneList<Expression*>* args);
+
+  // Fast support for object equality testing.
+  void GenerateObjectEquals(ZoneList<Expression*>* args);
+
+  void GenerateLog(ZoneList<Expression*>* args);
+
+  // Fast support for Math.random().
+  void GenerateRandomPositiveSmi(ZoneList<Expression*>* args);
+
+  void GenerateIsObject(ZoneList<Expression*>* args);
+  void GenerateIsFunction(ZoneList<Expression*>* args);
+  void GenerateIsUndetectableObject(ZoneList<Expression*>* args);
+  void GenerateStringAdd(ZoneList<Expression*>* args);
+  void GenerateSubString(ZoneList<Expression*>* args);
+  void GenerateStringCompare(ZoneList<Expression*>* args);
+  void GenerateRegExpExec(ZoneList<Expression*>* args);
+
+  // Fast support for Math.sin and Math.cos.
+  inline void GenerateMathSin(ZoneList<Expression*>* args);
+  inline void GenerateMathCos(ZoneList<Expression*>* args);
+
+  // Simple condition analysis.
+  enum ConditionAnalysis {
+    ALWAYS_TRUE,
+    ALWAYS_FALSE,
+    DONT_KNOW
+  };
+  ConditionAnalysis AnalyzeCondition(Expression* cond);
+
+  // Methods used to indicate which source code is generated for. Source
+  // positions are collected by the assembler and emitted with the relocation
+  // information.
+  void CodeForFunctionPosition(FunctionLiteral* fun);
+  void CodeForReturnPosition(FunctionLiteral* fun);
+  void CodeForStatementPosition(Statement* node);
+  void CodeForDoWhileConditionPosition(DoWhileStatement* stmt);
+  void CodeForSourcePosition(int pos);
+
+#ifdef DEBUG
+  // True if the registers are valid for entry to a block.
+  bool HasValidEntryRegisters();
+#endif
+
+  bool is_eval_;  // Tells whether code is generated for eval.
+
+  Handle<Script> script_;
+  List<DeferredCode*> deferred_;
+
+  // Assembler
+  MacroAssembler* masm_;  // to generate code
+
+  CompilationInfo* info_;
+
+  // Code generation state
+  Scope* scope_;
+  VirtualFrame* frame_;
+  RegisterAllocator* allocator_;
+  Condition cc_reg_;
+  CodeGenState* state_;
+
+  // Jump targets
+  BreakTarget function_return_;
+
+  // True if the function return is shadowed (ie, jumping to the target
+  // function_return_ does not jump to the true function return, but rather
+  // to some unlinking code).
+  bool function_return_is_shadowed_;
+
+  static InlineRuntimeLUT kInlineRuntimeLUT[];
+
+  friend class VirtualFrame;
+  friend class JumpTarget;
+  friend class Reference;
+  friend class FastCodeGenerator;
+  friend class FullCodeGenSyntaxChecker;
+
+  DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
+};
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_MIPS_CODEGEN_MIPS_H_
+
diff --git a/src/mips/constants-mips.cc b/src/mips/constants-mips.cc
new file mode 100644
index 0000000..a5ef9f8
--- /dev/null
+++ b/src/mips/constants-mips.cc
@@ -0,0 +1,323 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+#include "constants-mips.h"
+
+namespace assembler {
+namespace mips {
+
+namespace v8i = v8::internal;
+
+
+// -----------------------------------------------------------------------------
+// Registers
+
+
+// These register names are defined in a way to match the native disassembler
+// formatting. See for example the command "objdump -d <binary file>".
+const char* Registers::names_[kNumSimuRegisters] = {
+  "zero_reg",
+  "at",
+  "v0", "v1",
+  "a0", "a1", "a2", "a3",
+  "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7",
+  "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
+  "t8", "t9",
+  "k0", "k1",
+  "gp",
+  "sp",
+  "fp",
+  "ra",
+  "LO", "HI",
+  "pc"
+};
+
+// List of alias names which can be used when referring to MIPS registers.
+const Registers::RegisterAlias Registers::aliases_[] = {
+  {0, "zero"},
+  {23, "cp"},
+  {30, "s8"},
+  {30, "s8_fp"},
+  {kInvalidRegister, NULL}
+};
+
+const char* Registers::Name(int reg) {
+  const char* result;
+  if ((0 <= reg) && (reg < kNumSimuRegisters)) {
+    result = names_[reg];
+  } else {
+    result = "noreg";
+  }
+  return result;
+}
+
+
+int Registers::Number(const char* name) {
+  // Look through the canonical names.
+  for (int i = 0; i < kNumSimuRegisters; i++) {
+    if (strcmp(names_[i], name) == 0) {
+      return i;
+    }
+  }
+
+  // Look through the alias names.
+  int i = 0;
+  while (aliases_[i].reg != kInvalidRegister) {
+    if (strcmp(aliases_[i].name, name) == 0) {
+      return aliases_[i].reg;
+    }
+    i++;
+  }
+
+  // No register with the reguested name found.
+  return kInvalidRegister;
+}
+
+
+const char* FPURegister::names_[kNumFPURegister] = {
+  "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8", "f9", "f10", "f11",
+  "f12", "f13", "f14", "f15", "f16", "f17", "f18", "f19", "f20", "f21",
+  "f22", "f23", "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31"
+};
+
+// List of alias names which can be used when referring to MIPS registers.
+const FPURegister::RegisterAlias FPURegister::aliases_[] = {
+  {kInvalidRegister, NULL}
+};
+
+const char* FPURegister::Name(int creg) {
+  const char* result;
+  if ((0 <= creg) && (creg < kNumFPURegister)) {
+    result = names_[creg];
+  } else {
+    result = "nocreg";
+  }
+  return result;
+}
+
+
+int FPURegister::Number(const char* name) {
+  // Look through the canonical names.
+  for (int i = 0; i < kNumSimuRegisters; i++) {
+    if (strcmp(names_[i], name) == 0) {
+      return i;
+    }
+  }
+
+  // Look through the alias names.
+  int i = 0;
+  while (aliases_[i].creg != kInvalidRegister) {
+    if (strcmp(aliases_[i].name, name) == 0) {
+      return aliases_[i].creg;
+    }
+    i++;
+  }
+
+  // No Cregister with the reguested name found.
+  return kInvalidFPURegister;
+}
+
+
+// -----------------------------------------------------------------------------
+// Instruction
+
+bool Instruction::IsForbiddenInBranchDelay() {
+  int op = OpcodeFieldRaw();
+  switch (op) {
+    case J:
+    case JAL:
+    case BEQ:
+    case BNE:
+    case BLEZ:
+    case BGTZ:
+    case BEQL:
+    case BNEL:
+    case BLEZL:
+    case BGTZL:
+      return true;
+    case REGIMM:
+      switch (RtFieldRaw()) {
+        case BLTZ:
+        case BGEZ:
+        case BLTZAL:
+        case BGEZAL:
+          return true;
+        default:
+          return false;
+      };
+      break;
+    case SPECIAL:
+      switch (FunctionFieldRaw()) {
+        case JR:
+        case JALR:
+          return true;
+        default:
+          return false;
+      };
+      break;
+    default:
+      return false;
+  };
+}
+
+
+bool Instruction::IsLinkingInstruction() {
+  int op = OpcodeFieldRaw();
+  switch (op) {
+    case JAL:
+    case BGEZAL:
+    case BLTZAL:
+      return true;
+    case SPECIAL:
+      switch (FunctionFieldRaw()) {
+        case JALR:
+          return true;
+        default:
+          return false;
+      };
+    default:
+      return false;
+  };
+}
+
+
+bool Instruction::IsTrap() {
+  if (OpcodeFieldRaw() != SPECIAL) {
+    return false;
+  } else {
+    switch (FunctionFieldRaw()) {
+      case BREAK:
+      case TGE:
+      case TGEU:
+      case TLT:
+      case TLTU:
+      case TEQ:
+      case TNE:
+        return true;
+      default:
+        return false;
+    };
+  }
+}
+
+
+Instruction::Type Instruction::InstructionType() const {
+  switch (OpcodeFieldRaw()) {
+    case SPECIAL:
+      switch (FunctionFieldRaw()) {
+        case JR:
+        case JALR:
+        case BREAK:
+        case SLL:
+        case SRL:
+        case SRA:
+        case SLLV:
+        case SRLV:
+        case SRAV:
+        case MFHI:
+        case MFLO:
+        case MULT:
+        case MULTU:
+        case DIV:
+        case DIVU:
+        case ADD:
+        case ADDU:
+        case SUB:
+        case SUBU:
+        case AND:
+        case OR:
+        case XOR:
+        case NOR:
+        case SLT:
+        case SLTU:
+        case TGE:
+        case TGEU:
+        case TLT:
+        case TLTU:
+        case TEQ:
+        case TNE:
+          return kRegisterType;
+        default:
+          UNREACHABLE();
+      };
+      break;
+    case SPECIAL2:
+      switch (FunctionFieldRaw()) {
+        case MUL:
+          return kRegisterType;
+        default:
+          UNREACHABLE();
+      };
+      break;
+    case COP1:    // Coprocessor instructions
+      switch (FunctionFieldRaw()) {
+        case BC1:   // branch on coprocessor condition
+          return kImmediateType;
+        default:
+          return kRegisterType;
+      };
+      break;
+    // 16 bits Immediate type instructions. eg: addi dest, src, imm16
+    case REGIMM:
+    case BEQ:
+    case BNE:
+    case BLEZ:
+    case BGTZ:
+    case ADDI:
+    case ADDIU:
+    case SLTI:
+    case SLTIU:
+    case ANDI:
+    case ORI:
+    case XORI:
+    case LUI:
+    case BEQL:
+    case BNEL:
+    case BLEZL:
+    case BGTZL:
+    case LB:
+    case LW:
+    case LBU:
+    case SB:
+    case SW:
+    case LWC1:
+    case LDC1:
+    case SWC1:
+    case SDC1:
+      return kImmediateType;
+    // 26 bits immediate type instructions. eg: j imm26
+    case J:
+    case JAL:
+      return kJumpType;
+    default:
+      UNREACHABLE();
+  };
+  return kUnsupported;
+}
+
+} }   // namespace assembler::mips
diff --git a/src/mips/constants-mips.h b/src/mips/constants-mips.h
new file mode 100644
index 0000000..d0fdf88
--- /dev/null
+++ b/src/mips/constants-mips.h
@@ -0,0 +1,525 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef  V8_MIPS_CONSTANTS_H_
+#define  V8_MIPS_CONSTANTS_H_
+
+#include "checks.h"
+
+// UNIMPLEMENTED_ macro for MIPS.
+#define UNIMPLEMENTED_MIPS()                                                  \
+  v8::internal::PrintF("%s, \tline %d: \tfunction %s not implemented. \n",    \
+                       __FILE__, __LINE__, __func__)
+#define UNSUPPORTED_MIPS() v8::internal::PrintF("Unsupported instruction.\n")
+
+
+// Defines constants and accessor classes to assemble, disassemble and
+// simulate MIPS32 instructions.
+//
+// See: MIPS32 Architecture For Programmers
+//      Volume II: The MIPS32 Instruction Set
+// Try www.cs.cornell.edu/courses/cs3410/2008fa/MIPS_Vol2.pdf.
+
+namespace assembler {
+namespace mips {
+
+// -----------------------------------------------------------------------------
+// Registers and FPURegister.
+
+// Number of general purpose registers.
+static const int kNumRegisters = 32;
+static const int kInvalidRegister = -1;
+
+// Number of registers with HI, LO, and pc.
+static const int kNumSimuRegisters = 35;
+
+// In the simulator, the PC register is simulated as the 34th register.
+static const int kPCRegister = 34;
+
+// Number coprocessor registers.
+static const int kNumFPURegister = 32;
+static const int kInvalidFPURegister = -1;
+
+// Helper functions for converting between register numbers and names.
+class Registers {
+ public:
+  // Return the name of the register.
+  static const char* Name(int reg);
+
+  // Lookup the register number for the name provided.
+  static int Number(const char* name);
+
+  struct RegisterAlias {
+    int reg;
+    const char *name;
+  };
+
+  static const int32_t kMaxValue = 0x7fffffff;
+  static const int32_t kMinValue = 0x80000000;
+
+ private:
+
+  static const char* names_[kNumSimuRegisters];
+  static const RegisterAlias aliases_[];
+};
+
+// Helper functions for converting between register numbers and names.
+class FPURegister {
+ public:
+  // Return the name of the register.
+  static const char* Name(int reg);
+
+  // Lookup the register number for the name provided.
+  static int Number(const char* name);
+
+  struct RegisterAlias {
+    int creg;
+    const char *name;
+  };
+
+ private:
+
+  static const char* names_[kNumFPURegister];
+  static const RegisterAlias aliases_[];
+};
+
+
+// -----------------------------------------------------------------------------
+// Instructions encoding constants.
+
+// On MIPS all instructions are 32 bits.
+typedef int32_t Instr;
+
+typedef unsigned char byte_;
+
+// Special Software Interrupt codes when used in the presence of the MIPS
+// simulator.
+enum SoftwareInterruptCodes {
+  // Transition to C code.
+  call_rt_redirected = 0xfffff
+};
+
+// ----- Fields offset and length.
+static const int kOpcodeShift   = 26;
+static const int kOpcodeBits    = 6;
+static const int kRsShift       = 21;
+static const int kRsBits        = 5;
+static const int kRtShift       = 16;
+static const int kRtBits        = 5;
+static const int kRdShift       = 11;
+static const int kRdBits        = 5;
+static const int kSaShift       = 6;
+static const int kSaBits        = 5;
+static const int kFunctionShift = 0;
+static const int kFunctionBits  = 6;
+
+static const int kImm16Shift = 0;
+static const int kImm16Bits  = 16;
+static const int kImm26Shift = 0;
+static const int kImm26Bits  = 26;
+
+static const int kFsShift       = 11;
+static const int kFsBits        = 5;
+static const int kFtShift       = 16;
+static const int kFtBits        = 5;
+
+// ----- Miscellianous useful masks.
+// Instruction bit masks.
+static const int  kOpcodeMask   = ((1 << kOpcodeBits) - 1) << kOpcodeShift;
+static const int  kImm16Mask    = ((1 << kImm16Bits) - 1) << kImm16Shift;
+static const int  kImm26Mask    = ((1 << kImm26Bits) - 1) << kImm26Shift;
+static const int  kRsFieldMask  = ((1 << kRsBits) - 1) << kRsShift;
+static const int  kRtFieldMask  = ((1 << kRtBits) - 1) << kRtShift;
+static const int  kRdFieldMask  = ((1 << kRdBits) - 1) << kRdShift;
+static const int  kSaFieldMask  = ((1 << kSaBits) - 1) << kSaShift;
+static const int  kFunctionFieldMask =
+    ((1 << kFunctionBits) - 1) << kFunctionShift;
+// Misc masks.
+static const int  HIMask        =   0xffff << 16;
+static const int  LOMask        =   0xffff;
+static const int  signMask      =   0x80000000;
+
+
+// ----- MIPS Opcodes and Function Fields.
+// We use this presentation to stay close to the table representation in
+// MIPS32 Architecture For Programmers, Volume II: The MIPS32 Instruction Set.
+enum Opcode {
+  SPECIAL   =   0 << kOpcodeShift,
+  REGIMM    =   1 << kOpcodeShift,
+
+  J         =   ((0 << 3) + 2) << kOpcodeShift,
+  JAL       =   ((0 << 3) + 3) << kOpcodeShift,
+  BEQ       =   ((0 << 3) + 4) << kOpcodeShift,
+  BNE       =   ((0 << 3) + 5) << kOpcodeShift,
+  BLEZ      =   ((0 << 3) + 6) << kOpcodeShift,
+  BGTZ      =   ((0 << 3) + 7) << kOpcodeShift,
+
+  ADDI      =   ((1 << 3) + 0) << kOpcodeShift,
+  ADDIU     =   ((1 << 3) + 1) << kOpcodeShift,
+  SLTI      =   ((1 << 3) + 2) << kOpcodeShift,
+  SLTIU     =   ((1 << 3) + 3) << kOpcodeShift,
+  ANDI      =   ((1 << 3) + 4) << kOpcodeShift,
+  ORI       =   ((1 << 3) + 5) << kOpcodeShift,
+  XORI      =   ((1 << 3) + 6) << kOpcodeShift,
+  LUI       =   ((1 << 3) + 7) << kOpcodeShift,
+
+  COP1      =   ((2 << 3) + 1) << kOpcodeShift,  // Coprocessor 1 class
+  BEQL      =   ((2 << 3) + 4) << kOpcodeShift,
+  BNEL      =   ((2 << 3) + 5) << kOpcodeShift,
+  BLEZL     =   ((2 << 3) + 6) << kOpcodeShift,
+  BGTZL     =   ((2 << 3) + 7) << kOpcodeShift,
+
+  SPECIAL2  =   ((3 << 3) + 4) << kOpcodeShift,
+
+  LB        =   ((4 << 3) + 0) << kOpcodeShift,
+  LW        =   ((4 << 3) + 3) << kOpcodeShift,
+  LBU       =   ((4 << 3) + 4) << kOpcodeShift,
+  SB        =   ((5 << 3) + 0) << kOpcodeShift,
+  SW        =   ((5 << 3) + 3) << kOpcodeShift,
+
+  LWC1      =   ((6 << 3) + 1) << kOpcodeShift,
+  LDC1      =   ((6 << 3) + 5) << kOpcodeShift,
+
+  SWC1      =   ((7 << 3) + 1) << kOpcodeShift,
+  SDC1      =   ((7 << 3) + 5) << kOpcodeShift
+};
+
+enum SecondaryField {
+  // SPECIAL Encoding of Function Field.
+  SLL       =   ((0 << 3) + 0),
+  SRL       =   ((0 << 3) + 2),
+  SRA       =   ((0 << 3) + 3),
+  SLLV      =   ((0 << 3) + 4),
+  SRLV      =   ((0 << 3) + 6),
+  SRAV      =   ((0 << 3) + 7),
+
+  JR        =   ((1 << 3) + 0),
+  JALR      =   ((1 << 3) + 1),
+  BREAK     =   ((1 << 3) + 5),
+
+  MFHI      =   ((2 << 3) + 0),
+  MFLO      =   ((2 << 3) + 2),
+
+  MULT      =   ((3 << 3) + 0),
+  MULTU     =   ((3 << 3) + 1),
+  DIV       =   ((3 << 3) + 2),
+  DIVU      =   ((3 << 3) + 3),
+
+  ADD       =   ((4 << 3) + 0),
+  ADDU      =   ((4 << 3) + 1),
+  SUB       =   ((4 << 3) + 2),
+  SUBU      =   ((4 << 3) + 3),
+  AND       =   ((4 << 3) + 4),
+  OR        =   ((4 << 3) + 5),
+  XOR       =   ((4 << 3) + 6),
+  NOR       =   ((4 << 3) + 7),
+
+  SLT       =   ((5 << 3) + 2),
+  SLTU      =   ((5 << 3) + 3),
+
+  TGE       =   ((6 << 3) + 0),
+  TGEU      =   ((6 << 3) + 1),
+  TLT       =   ((6 << 3) + 2),
+  TLTU      =   ((6 << 3) + 3),
+  TEQ       =   ((6 << 3) + 4),
+  TNE       =   ((6 << 3) + 6),
+
+  // SPECIAL2 Encoding of Function Field.
+  MUL       =   ((0 << 3) + 2),
+
+  // REGIMM  encoding of rt Field.
+  BLTZ      =   ((0 << 3) + 0) << 16,
+  BGEZ      =   ((0 << 3) + 1) << 16,
+  BLTZAL    =   ((2 << 3) + 0) << 16,
+  BGEZAL    =   ((2 << 3) + 1) << 16,
+
+  // COP1 Encoding of rs Field.
+  MFC1      =   ((0 << 3) + 0) << 21,
+  MFHC1     =   ((0 << 3) + 3) << 21,
+  MTC1      =   ((0 << 3) + 4) << 21,
+  MTHC1     =   ((0 << 3) + 7) << 21,
+  BC1       =   ((1 << 3) + 0) << 21,
+  S         =   ((2 << 3) + 0) << 21,
+  D         =   ((2 << 3) + 1) << 21,
+  W         =   ((2 << 3) + 4) << 21,
+  L         =   ((2 << 3) + 5) << 21,
+  PS        =   ((2 << 3) + 6) << 21,
+  // COP1 Encoding of Function Field When rs=S.
+  CVT_D_S   =   ((4 << 3) + 1),
+  CVT_W_S   =   ((4 << 3) + 4),
+  CVT_L_S   =   ((4 << 3) + 5),
+  CVT_PS_S  =   ((4 << 3) + 6),
+  // COP1 Encoding of Function Field When rs=D.
+  CVT_S_D   =   ((4 << 3) + 0),
+  CVT_W_D   =   ((4 << 3) + 4),
+  CVT_L_D   =   ((4 << 3) + 5),
+  // COP1 Encoding of Function Field When rs=W or L.
+  CVT_S_W   =   ((4 << 3) + 0),
+  CVT_D_W   =   ((4 << 3) + 1),
+  CVT_S_L   =   ((4 << 3) + 0),
+  CVT_D_L   =   ((4 << 3) + 1),
+  // COP1 Encoding of Function Field When rs=PS.
+
+  NULLSF    =   0
+};
+
+
+// ----- Emulated conditions.
+// On MIPS we use this enum to abstract from conditionnal branch instructions.
+// the 'U' prefix is used to specify unsigned comparisons.
+enum Condition {
+  // Any value < 0 is considered no_condition.
+  no_condition  = -1,
+
+  overflow      =  0,
+  no_overflow   =  1,
+  Uless         =  2,
+  Ugreater_equal=  3,
+  equal         =  4,
+  not_equal     =  5,
+  Uless_equal   =  6,
+  Ugreater      =  7,
+  negative      =  8,
+  positive      =  9,
+  parity_even   = 10,
+  parity_odd    = 11,
+  less          = 12,
+  greater_equal = 13,
+  less_equal    = 14,
+  greater       = 15,
+
+  cc_always     = 16,
+
+  // aliases
+  carry         = Uless,
+  not_carry     = Ugreater_equal,
+  zero          = equal,
+  eq            = equal,
+  not_zero      = not_equal,
+  ne            = not_equal,
+  sign          = negative,
+  not_sign      = positive,
+
+  cc_default    = no_condition
+};
+
+// ----- Coprocessor conditions.
+enum FPUCondition {
+  F,    // False
+  UN,   // Unordered
+  EQ,   // Equal
+  UEQ,  // Unordered or Equal
+  OLT,  // Ordered or Less Than
+  ULT,  // Unordered or Less Than
+  OLE,  // Ordered or Less Than or Equal
+  ULE   // Unordered or Less Than or Equal
+};
+
+
+// Break 0xfffff, reserved for redirected real time call.
+const Instr rtCallRedirInstr = SPECIAL | BREAK | call_rt_redirected << 6;
+// A nop instruction. (Encoding of sll 0 0 0).
+const Instr nopInstr = 0;
+
+class Instruction {
+ public:
+  enum {
+    kInstructionSize = 4,
+    kInstructionSizeLog2 = 2,
+    // On MIPS PC cannot actually be directly accessed. We behave as if PC was
+    // always the value of the current instruction being exectued.
+    kPCReadOffset = 0
+  };
+
+  // Get the raw instruction bits.
+  inline Instr InstructionBits() const {
+    return *reinterpret_cast<const Instr*>(this);
+  }
+
+  // Set the raw instruction bits to value.
+  inline void SetInstructionBits(Instr value) {
+    *reinterpret_cast<Instr*>(this) = value;
+  }
+
+  // Read one particular bit out of the instruction bits.
+  inline int Bit(int nr) const {
+    return (InstructionBits() >> nr) & 1;
+  }
+
+  // Read a bit field out of the instruction bits.
+  inline int Bits(int hi, int lo) const {
+    return (InstructionBits() >> lo) & ((2 << (hi - lo)) - 1);
+  }
+
+  // Instruction type.
+  enum Type {
+    kRegisterType,
+    kImmediateType,
+    kJumpType,
+    kUnsupported = -1
+  };
+
+  // Get the encoding type of the instruction.
+  Type InstructionType() const;
+
+
+  // Accessors for the different named fields used in the MIPS encoding.
+  inline Opcode OpcodeField() const {
+    return static_cast<Opcode>(
+        Bits(kOpcodeShift + kOpcodeBits - 1, kOpcodeShift));
+  }
+
+  inline int RsField() const {
+    ASSERT(InstructionType() == kRegisterType ||
+           InstructionType() == kImmediateType);
+    return Bits(kRsShift + kRsBits - 1, kRsShift);
+  }
+
+  inline int RtField() const {
+    ASSERT(InstructionType() == kRegisterType ||
+           InstructionType() == kImmediateType);
+    return Bits(kRtShift + kRtBits - 1, kRtShift);
+  }
+
+  inline int RdField() const {
+    ASSERT(InstructionType() == kRegisterType);
+    return Bits(kRdShift + kRdBits - 1, kRdShift);
+  }
+
+  inline int SaField() const {
+    ASSERT(InstructionType() == kRegisterType);
+    return Bits(kSaShift + kSaBits - 1, kSaShift);
+  }
+
+  inline int FunctionField() const {
+    ASSERT(InstructionType() == kRegisterType ||
+           InstructionType() == kImmediateType);
+    return Bits(kFunctionShift + kFunctionBits - 1, kFunctionShift);
+  }
+
+  inline int FsField() const {
+    return Bits(kFsShift + kRsBits - 1, kFsShift);
+  }
+
+  inline int FtField() const {
+    return Bits(kFtShift + kRsBits - 1, kFtShift);
+  }
+
+  // Return the fields at their original place in the instruction encoding.
+  inline Opcode OpcodeFieldRaw() const {
+    return static_cast<Opcode>(InstructionBits() & kOpcodeMask);
+  }
+
+  inline int RsFieldRaw() const {
+    ASSERT(InstructionType() == kRegisterType ||
+           InstructionType() == kImmediateType);
+    return InstructionBits() & kRsFieldMask;
+  }
+
+  inline int RtFieldRaw() const {
+    ASSERT(InstructionType() == kRegisterType ||
+           InstructionType() == kImmediateType);
+    return InstructionBits() & kRtFieldMask;
+  }
+
+  inline int RdFieldRaw() const {
+    ASSERT(InstructionType() == kRegisterType);
+    return InstructionBits() & kRdFieldMask;
+  }
+
+  inline int SaFieldRaw() const {
+    ASSERT(InstructionType() == kRegisterType);
+    return InstructionBits() & kSaFieldMask;
+  }
+
+  inline int FunctionFieldRaw() const {
+    return InstructionBits() & kFunctionFieldMask;
+  }
+
+  // Get the secondary field according to the opcode.
+  inline int SecondaryField() const {
+    Opcode op = OpcodeFieldRaw();
+    switch (op) {
+      case SPECIAL:
+      case SPECIAL2:
+        return FunctionField();
+      case COP1:
+        return RsField();
+      case REGIMM:
+        return RtField();
+      default:
+        return NULLSF;
+    }
+  }
+
+  inline int32_t Imm16Field() const {
+    ASSERT(InstructionType() == kImmediateType);
+    return Bits(kImm16Shift + kImm16Bits - 1, kImm16Shift);
+  }
+
+  inline int32_t Imm26Field() const {
+    ASSERT(InstructionType() == kJumpType);
+    return Bits(kImm16Shift + kImm26Bits - 1, kImm26Shift);
+  }
+
+  // Say if the instruction should not be used in a branch delay slot.
+  bool IsForbiddenInBranchDelay();
+  // Say if the instruction 'links'. eg: jal, bal.
+  bool IsLinkingInstruction();
+  // Say if the instruction is a break or a trap.
+  bool IsTrap();
+
+  // Instructions are read of out a code stream. The only way to get a
+  // reference to an instruction is to convert a pointer. There is no way
+  // to allocate or create instances of class Instruction.
+  // Use the At(pc) function to create references to Instruction.
+  static Instruction* At(byte_* pc) {
+    return reinterpret_cast<Instruction*>(pc);
+  }
+
+ private:
+  // We need to prevent the creation of instances of class Instruction.
+  DISALLOW_IMPLICIT_CONSTRUCTORS(Instruction);
+};
+
+
+// -----------------------------------------------------------------------------
+// MIPS assembly various constants.
+
+static const int kArgsSlotsSize  = 4 * Instruction::kInstructionSize;
+static const int kArgsSlotsNum   = 4;
+
+static const int kBranchReturnOffset = 2 * Instruction::kInstructionSize;
+
+static const int kDoubleAlignment = 2 * 8;
+static const int kDoubleAlignmentMask = kDoubleAlignmentMask - 1;
+
+
+} }   // namespace assembler::mips
+
+#endif    // #ifndef V8_MIPS_CONSTANTS_H_
+
diff --git a/src/mips/cpu-mips.cc b/src/mips/cpu-mips.cc
new file mode 100644
index 0000000..f592257
--- /dev/null
+++ b/src/mips/cpu-mips.cc
@@ -0,0 +1,69 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// CPU specific code for arm independent of OS goes here.
+
+#include <sys/syscall.h>
+#include <unistd.h>
+
+#ifdef __mips
+#include <asm/cachectl.h>
+#endif  // #ifdef __mips
+
+#include "v8.h"
+#include "cpu.h"
+
+namespace v8 {
+namespace internal {
+
+void CPU::Setup() {
+  // Nothing to do.
+}
+
+void CPU::FlushICache(void* start, size_t size) {
+#ifdef __mips
+  int res;
+
+  // See http://www.linux-mips.org/wiki/Cacheflush_Syscall
+  res = syscall(__NR_cacheflush, start, size, ICACHE);
+
+  if (res) {
+    V8_Fatal(__FILE__, __LINE__, "Failed to flush the instruction cache");
+  }
+
+#endif    // #ifdef __mips
+}
+
+
+void CPU::DebugBreak() {
+#ifdef __mips
+  asm volatile("break");
+#endif  // #ifdef __mips
+}
+
+} }  // namespace v8::internal
+
diff --git a/src/mips/debug-mips.cc b/src/mips/debug-mips.cc
new file mode 100644
index 0000000..772bcc0
--- /dev/null
+++ b/src/mips/debug-mips.cc
@@ -0,0 +1,112 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "debug.h"
+
+namespace v8 {
+namespace internal {
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+bool BreakLocationIterator::IsDebugBreakAtReturn() {
+  return Debug::IsDebugBreakAtReturn(rinfo());
+}
+
+
+void BreakLocationIterator::SetDebugBreakAtReturn() {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+// Restore the JS frame exit code.
+void BreakLocationIterator::ClearDebugBreakAtReturn() {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+// A debug break in the exit code is identified by a call.
+bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) {
+  ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()));
+  return rinfo->IsPatchedReturnSequence();
+}
+
+
+#define __ ACCESS_MASM(masm)
+
+
+
+
+void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void Debug::GenerateStoreICDebugBreak(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void Debug::GenerateConstructCallDebugBreak(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void Debug::GenerateStubNoRegistersDebugBreak(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+#undef __
+
+#endif  // ENABLE_DEBUGGER_SUPPORT
+
+} }  // namespace v8::internal
+
diff --git a/src/mips/disasm-mips.cc b/src/mips/disasm-mips.cc
new file mode 100644
index 0000000..cab72d1
--- /dev/null
+++ b/src/mips/disasm-mips.cc
@@ -0,0 +1,784 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// A Disassembler object is used to disassemble a block of code instruction by
+// instruction. The default implementation of the NameConverter object can be
+// overriden to modify register names or to do symbol lookup on addresses.
+//
+// The example below will disassemble a block of code and print it to stdout.
+//
+//   NameConverter converter;
+//   Disassembler d(converter);
+//   for (byte_* pc = begin; pc < end;) {
+//     char buffer[128];
+//     buffer[0] = '\0';
+//     byte_* prev_pc = pc;
+//     pc += d.InstructionDecode(buffer, sizeof buffer, pc);
+//     printf("%p    %08x      %s\n",
+//            prev_pc, *reinterpret_cast<int32_t*>(prev_pc), buffer);
+//   }
+//
+// The Disassembler class also has a convenience method to disassemble a block
+// of code into a FILE*, meaning that the above functionality could also be
+// achieved by just calling Disassembler::Disassemble(stdout, begin, end);
+
+
+#include <assert.h>
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+#ifndef WIN32
+#include <stdint.h>
+#endif
+
+#include "v8.h"
+
+#include "constants-mips.h"
+#include "disasm.h"
+#include "macro-assembler.h"
+#include "platform.h"
+
+namespace assembler {
+namespace mips {
+
+
+namespace v8i = v8::internal;
+
+
+//------------------------------------------------------------------------------
+
+// Decoder decodes and disassembles instructions into an output buffer.
+// It uses the converter to convert register names and call destinations into
+// more informative description.
+class Decoder {
+ public:
+  Decoder(const disasm::NameConverter& converter,
+          v8::internal::Vector<char> out_buffer)
+    : converter_(converter),
+      out_buffer_(out_buffer),
+      out_buffer_pos_(0) {
+    out_buffer_[out_buffer_pos_] = '\0';
+  }
+
+  ~Decoder() {}
+
+  // Writes one disassembled instruction into 'buffer' (0-terminated).
+  // Returns the length of the disassembled machine instruction in bytes.
+  int InstructionDecode(byte_* instruction);
+
+ private:
+  // Bottleneck functions to print into the out_buffer.
+  void PrintChar(const char ch);
+  void Print(const char* str);
+
+  // Printing of common values.
+  void PrintRegister(int reg);
+  void PrintCRegister(int creg);
+  void PrintRs(Instruction* instr);
+  void PrintRt(Instruction* instr);
+  void PrintRd(Instruction* instr);
+  void PrintFs(Instruction* instr);
+  void PrintFt(Instruction* instr);
+  void PrintFd(Instruction* instr);
+  void PrintSa(Instruction* instr);
+  void PrintFunction(Instruction* instr);
+  void PrintSecondaryField(Instruction* instr);
+  void PrintUImm16(Instruction* instr);
+  void PrintSImm16(Instruction* instr);
+  void PrintXImm16(Instruction* instr);
+  void PrintImm26(Instruction* instr);
+  void PrintCode(Instruction* instr);   // For break and trap instructions.
+  // Printing of instruction name.
+  void PrintInstructionName(Instruction* instr);
+
+  // Handle formatting of instructions and their options.
+  int FormatRegister(Instruction* instr, const char* option);
+  int FormatCRegister(Instruction* instr, const char* option);
+  int FormatOption(Instruction* instr, const char* option);
+  void Format(Instruction* instr, const char* format);
+  void Unknown(Instruction* instr);
+
+  // Each of these functions decodes one particular instruction type.
+  void DecodeTypeRegister(Instruction* instr);
+  void DecodeTypeImmediate(Instruction* instr);
+  void DecodeTypeJump(Instruction* instr);
+
+  const disasm::NameConverter& converter_;
+  v8::internal::Vector<char> out_buffer_;
+  int out_buffer_pos_;
+
+  DISALLOW_COPY_AND_ASSIGN(Decoder);
+};
+
+
+// Support for assertions in the Decoder formatting functions.
+#define STRING_STARTS_WITH(string, compare_string) \
+  (strncmp(string, compare_string, strlen(compare_string)) == 0)
+
+
+// Append the ch to the output buffer.
+void Decoder::PrintChar(const char ch) {
+  out_buffer_[out_buffer_pos_++] = ch;
+}
+
+
+// Append the str to the output buffer.
+void Decoder::Print(const char* str) {
+  char cur = *str++;
+  while (cur != '\0' && (out_buffer_pos_ < (out_buffer_.length() - 1))) {
+    PrintChar(cur);
+    cur = *str++;
+  }
+  out_buffer_[out_buffer_pos_] = 0;
+}
+
+
+// Print the register name according to the active name converter.
+void Decoder::PrintRegister(int reg) {
+  Print(converter_.NameOfCPURegister(reg));
+}
+
+
+void Decoder::PrintRs(Instruction* instr) {
+  int reg = instr->RsField();
+  PrintRegister(reg);
+}
+
+
+void Decoder::PrintRt(Instruction* instr) {
+  int reg = instr->RtField();
+  PrintRegister(reg);
+}
+
+
+void Decoder::PrintRd(Instruction* instr) {
+  int reg = instr->RdField();
+  PrintRegister(reg);
+}
+
+
+// Print the Cregister name according to the active name converter.
+void Decoder::PrintCRegister(int creg) {
+  Print(converter_.NameOfXMMRegister(creg));
+}
+
+
+void Decoder::PrintFs(Instruction* instr) {
+  int creg = instr->RsField();
+  PrintCRegister(creg);
+}
+
+
+void Decoder::PrintFt(Instruction* instr) {
+  int creg = instr->RtField();
+  PrintCRegister(creg);
+}
+
+
+void Decoder::PrintFd(Instruction* instr) {
+  int creg = instr->RdField();
+  PrintCRegister(creg);
+}
+
+
+// Print the integer value of the sa field.
+void Decoder::PrintSa(Instruction* instr) {
+  int sa = instr->SaField();
+  out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+                                       "%d", sa);
+}
+
+
+// Print 16-bit unsigned immediate value.
+void Decoder::PrintUImm16(Instruction* instr) {
+  int32_t imm = instr->Imm16Field();
+  out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+                                       "%u", imm);
+}
+
+
+// Print 16-bit signed immediate value.
+void Decoder::PrintSImm16(Instruction* instr) {
+  int32_t imm = ((instr->Imm16Field())<<16)>>16;
+  out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+                                       "%d", imm);
+}
+
+
+// Print 16-bit hexa immediate value.
+void Decoder::PrintXImm16(Instruction* instr) {
+  int32_t imm = instr->Imm16Field();
+  out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+                                       "0x%x", imm);
+}
+
+
+// Print 26-bit immediate value.
+void Decoder::PrintImm26(Instruction* instr) {
+  int32_t imm = instr->Imm26Field();
+  out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+                                       "%d", imm);
+}
+
+
+// Print 26-bit immediate value.
+void Decoder::PrintCode(Instruction* instr) {
+  if (instr->OpcodeFieldRaw() != SPECIAL)
+    return;  // Not a break or trap instruction.
+  switch (instr->FunctionFieldRaw()) {
+    case BREAK: {
+      int32_t code = instr->Bits(25, 6);
+      out_buffer_pos_ +=
+          v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_, "0x%05x", code);
+      break;
+                }
+    case TGE:
+    case TGEU:
+    case TLT:
+    case TLTU:
+    case TEQ:
+    case TNE: {
+      int32_t code = instr->Bits(15, 6);
+      out_buffer_pos_ +=
+          v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_, "0x%03x", code);
+      break;
+    }
+    default:  // Not a break or trap instruction.
+    break;
+  };
+}
+
+
+// Printing of instruction name.
+void Decoder::PrintInstructionName(Instruction* instr) {
+}
+
+
+// Handle all register based formatting in this function to reduce the
+// complexity of FormatOption.
+int Decoder::FormatRegister(Instruction* instr, const char* format) {
+  ASSERT(format[0] == 'r');
+  if (format[1] == 's') {  // 'rs: Rs register
+    int reg = instr->RsField();
+    PrintRegister(reg);
+    return 2;
+  } else if (format[1] == 't') {  // 'rt: rt register
+    int reg = instr->RtField();
+    PrintRegister(reg);
+    return 2;
+  } else if (format[1] == 'd') {  // 'rd: rd register
+    int reg = instr->RdField();
+    PrintRegister(reg);
+    return 2;
+  }
+  UNREACHABLE();
+  return -1;
+}
+
+
+// Handle all Cregister based formatting in this function to reduce the
+// complexity of FormatOption.
+int Decoder::FormatCRegister(Instruction* instr, const char* format) {
+  ASSERT(format[0] == 'f');
+  if (format[1] == 's') {  // 'fs: fs register
+    int reg = instr->RsField();
+    PrintCRegister(reg);
+    return 2;
+  } else if (format[1] == 't') {  // 'ft: ft register
+    int reg = instr->RtField();
+    PrintCRegister(reg);
+    return 2;
+  } else if (format[1] == 'd') {  // 'fd: fd register
+    int reg = instr->RdField();
+    PrintCRegister(reg);
+    return 2;
+  }
+  UNREACHABLE();
+  return -1;
+}
+
+
+// FormatOption takes a formatting string and interprets it based on
+// the current instructions. The format string points to the first
+// character of the option string (the option escape has already been
+// consumed by the caller.)  FormatOption returns the number of
+// characters that were consumed from the formatting string.
+int Decoder::FormatOption(Instruction* instr, const char* format) {
+  switch (format[0]) {
+    case 'c': {   // 'code for break or trap instructions
+      ASSERT(STRING_STARTS_WITH(format, "code"));
+      PrintCode(instr);
+      return 4;
+    }
+    case 'i': {   // 'imm16u or 'imm26
+      if (format[3] == '1') {
+        ASSERT(STRING_STARTS_WITH(format, "imm16"));
+        if (format[5] == 's') {
+          ASSERT(STRING_STARTS_WITH(format, "imm16s"));
+          PrintSImm16(instr);
+        } else if (format[5] == 'u') {
+          ASSERT(STRING_STARTS_WITH(format, "imm16u"));
+          PrintSImm16(instr);
+        } else {
+          ASSERT(STRING_STARTS_WITH(format, "imm16x"));
+          PrintXImm16(instr);
+        }
+        return 6;
+      } else {
+        ASSERT(STRING_STARTS_WITH(format, "imm26"));
+        PrintImm26(instr);
+        return 5;
+      }
+    }
+    case 'r': {   // 'r: registers
+      return FormatRegister(instr, format);
+    }
+    case 'f': {   // 'f: Cregisters
+      return FormatCRegister(instr, format);
+    }
+    case 's': {   // 'sa
+      ASSERT(STRING_STARTS_WITH(format, "sa"));
+      PrintSa(instr);
+      return 2;
+    }
+  };
+  UNREACHABLE();
+  return -1;
+}
+
+
+// Format takes a formatting string for a whole instruction and prints it into
+// the output buffer. All escaped options are handed to FormatOption to be
+// parsed further.
+void Decoder::Format(Instruction* instr, const char* format) {
+  char cur = *format++;
+  while ((cur != 0) && (out_buffer_pos_ < (out_buffer_.length() - 1))) {
+    if (cur == '\'') {  // Single quote is used as the formatting escape.
+      format += FormatOption(instr, format);
+    } else {
+      out_buffer_[out_buffer_pos_++] = cur;
+    }
+    cur = *format++;
+  }
+  out_buffer_[out_buffer_pos_]  = '\0';
+}
+
+
+// For currently unimplemented decodings the disassembler calls Unknown(instr)
+// which will just print "unknown" of the instruction bits.
+void Decoder::Unknown(Instruction* instr) {
+  Format(instr, "unknown");
+}
+
+
+void Decoder::DecodeTypeRegister(Instruction* instr) {
+  switch (instr->OpcodeFieldRaw()) {
+    case COP1:    // Coprocessor instructions
+      switch (instr->RsFieldRaw()) {
+        case BC1:   // branch on coprocessor condition
+          UNREACHABLE();
+          break;
+        case MFC1:
+          Format(instr, "mfc1 'rt, 'fs");
+          break;
+        case MFHC1:
+          Format(instr, "mfhc1  rt, 'fs");
+          break;
+        case MTC1:
+          Format(instr, "mtc1 'rt, 'fs");
+          break;
+        case MTHC1:
+          Format(instr, "mthc1  rt, 'fs");
+          break;
+        case S:
+        case D:
+          UNIMPLEMENTED_MIPS();
+          break;
+        case W:
+          switch (instr->FunctionFieldRaw()) {
+            case CVT_S_W:
+              UNIMPLEMENTED_MIPS();
+              break;
+            case CVT_D_W:   // Convert word to double.
+              Format(instr, "cvt.d.w  'fd, 'fs");
+              break;
+            default:
+              UNREACHABLE();
+          };
+          break;
+        case L:
+        case PS:
+          UNIMPLEMENTED_MIPS();
+          break;
+          break;
+        default:
+          UNREACHABLE();
+      };
+      break;
+    case SPECIAL:
+      switch (instr->FunctionFieldRaw()) {
+        case JR:
+          Format(instr, "jr   'rs");
+          break;
+        case JALR:
+          Format(instr, "jalr 'rs");
+          break;
+        case SLL:
+          if ( 0x0 == static_cast<int>(instr->InstructionBits()))
+            Format(instr, "nop");
+          else
+            Format(instr, "sll  'rd, 'rt, 'sa");
+          break;
+        case SRL:
+          Format(instr, "srl  'rd, 'rt, 'sa");
+          break;
+        case SRA:
+          Format(instr, "sra  'rd, 'rt, 'sa");
+          break;
+        case SLLV:
+          Format(instr, "sllv 'rd, 'rt, 'rs");
+          break;
+        case SRLV:
+          Format(instr, "srlv 'rd, 'rt, 'rs");
+          break;
+        case SRAV:
+          Format(instr, "srav 'rd, 'rt, 'rs");
+          break;
+        case MFHI:
+          Format(instr, "mfhi 'rd");
+          break;
+        case MFLO:
+          Format(instr, "mflo 'rd");
+          break;
+        case MULT:
+          Format(instr, "mult 'rs, 'rt");
+          break;
+        case MULTU:
+          Format(instr, "multu  'rs, 'rt");
+          break;
+        case DIV:
+          Format(instr, "div  'rs, 'rt");
+          break;
+        case DIVU:
+          Format(instr, "divu 'rs, 'rt");
+          break;
+        case ADD:
+          Format(instr, "add  'rd, 'rs, 'rt");
+          break;
+        case ADDU:
+          Format(instr, "addu 'rd, 'rs, 'rt");
+          break;
+        case SUB:
+          Format(instr, "sub  'rd, 'rs, 'rt");
+          break;
+        case SUBU:
+          Format(instr, "sub  'rd, 'rs, 'rt");
+          break;
+        case AND:
+          Format(instr, "and  'rd, 'rs, 'rt");
+          break;
+        case OR:
+          if (0 == instr->RsField()) {
+            Format(instr, "mov  'rd, 'rt");
+          } else if (0 == instr->RtField()) {
+            Format(instr, "mov  'rd, 'rs");
+          } else {
+            Format(instr, "or   'rd, 'rs, 'rt");
+          }
+          break;
+        case XOR:
+          Format(instr, "xor  'rd, 'rs, 'rt");
+          break;
+        case NOR:
+          Format(instr, "nor  'rd, 'rs, 'rt");
+          break;
+        case SLT:
+          Format(instr, "slt  'rd, 'rs, 'rt");
+          break;
+        case SLTU:
+          Format(instr, "sltu 'rd, 'rs, 'rt");
+          break;
+        case BREAK:
+          Format(instr, "break, code: 'code");
+          break;
+        case TGE:
+          Format(instr, "tge  'rs, 'rt, code: 'code");
+          break;
+        case TGEU:
+          Format(instr, "tgeu 'rs, 'rt, code: 'code");
+          break;
+        case TLT:
+          Format(instr, "tlt  'rs, 'rt, code: 'code");
+          break;
+        case TLTU:
+          Format(instr, "tltu 'rs, 'rt, code: 'code");
+          break;
+        case TEQ:
+          Format(instr, "teq  'rs, 'rt, code: 'code");
+          break;
+        case TNE:
+          Format(instr, "tne  'rs, 'rt, code: 'code");
+          break;
+        default:
+          UNREACHABLE();
+      };
+      break;
+    case SPECIAL2:
+      switch (instr->FunctionFieldRaw()) {
+        case MUL:
+          break;
+        default:
+          UNREACHABLE();
+      };
+      break;
+    default:
+      UNREACHABLE();
+  };
+}
+
+
+void Decoder::DecodeTypeImmediate(Instruction* instr) {
+  switch (instr->OpcodeFieldRaw()) {
+    // ------------- REGIMM class.
+    case REGIMM:
+      switch (instr->RtFieldRaw()) {
+        case BLTZ:
+          Format(instr, "bltz 'rs, 'imm16u");
+          break;
+        case BLTZAL:
+          Format(instr, "bltzal 'rs, 'imm16u");
+          break;
+        case BGEZ:
+          Format(instr, "bgez 'rs, 'imm16u");
+          break;
+        case BGEZAL:
+          Format(instr, "bgezal 'rs, 'imm16u");
+          break;
+        default:
+          UNREACHABLE();
+      };
+    break;  // case REGIMM
+    // ------------- Branch instructions.
+    case BEQ:
+      Format(instr, "beq  'rs, 'rt, 'imm16u");
+      break;
+    case BNE:
+      Format(instr, "bne  'rs, 'rt, 'imm16u");
+      break;
+    case BLEZ:
+      Format(instr, "blez 'rs, 'imm16u");
+      break;
+    case BGTZ:
+      Format(instr, "bgtz 'rs, 'imm16u");
+      break;
+    // ------------- Arithmetic instructions.
+    case ADDI:
+      Format(instr, "addi   'rt, 'rs, 'imm16s");
+      break;
+    case ADDIU:
+      Format(instr, "addiu  'rt, 'rs, 'imm16s");
+      break;
+    case SLTI:
+      Format(instr, "slti   'rt, 'rs, 'imm16s");
+      break;
+    case SLTIU:
+      Format(instr, "sltiu  'rt, 'rs, 'imm16u");
+      break;
+    case ANDI:
+      Format(instr, "andi   'rt, 'rs, 'imm16x");
+      break;
+    case ORI:
+      Format(instr, "ori    'rt, 'rs, 'imm16x");
+      break;
+    case XORI:
+      Format(instr, "xori   'rt, 'rs, 'imm16x");
+      break;
+    case LUI:
+      Format(instr, "lui    'rt, 'imm16x");
+      break;
+    // ------------- Memory instructions.
+    case LB:
+      Format(instr, "lb     'rt, 'imm16s('rs)");
+      break;
+    case LW:
+      Format(instr, "lw     'rt, 'imm16s('rs)");
+      break;
+    case LBU:
+      Format(instr, "lbu    'rt, 'imm16s('rs)");
+      break;
+    case SB:
+      Format(instr, "sb     'rt, 'imm16s('rs)");
+      break;
+    case SW:
+      Format(instr, "sw     'rt, 'imm16s('rs)");
+      break;
+    case LWC1:
+      Format(instr, "lwc1   'ft, 'imm16s('rs)");
+      break;
+    case LDC1:
+      Format(instr, "ldc1   'ft, 'imm16s('rs)");
+      break;
+    case SWC1:
+      Format(instr, "swc1   'rt, 'imm16s('fs)");
+      break;
+    case SDC1:
+      Format(instr, "sdc1   'rt, 'imm16s('fs)");
+      break;
+    default:
+      UNREACHABLE();
+      break;
+  };
+}
+
+
+void Decoder::DecodeTypeJump(Instruction* instr) {
+  switch (instr->OpcodeFieldRaw()) {
+    case J:
+      Format(instr, "j    'imm26");
+      break;
+    case JAL:
+      Format(instr, "jal  'imm26");
+      break;
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+// Disassemble the instruction at *instr_ptr into the output buffer.
+int Decoder::InstructionDecode(byte_* instr_ptr) {
+  Instruction* instr = Instruction::At(instr_ptr);
+  // Print raw instruction bytes.
+  out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+                                       "%08x       ",
+                                       instr->InstructionBits());
+  switch (instr->InstructionType()) {
+    case Instruction::kRegisterType: {
+      DecodeTypeRegister(instr);
+      break;
+    }
+    case Instruction::kImmediateType: {
+      DecodeTypeImmediate(instr);
+      break;
+    }
+    case Instruction::kJumpType: {
+      DecodeTypeJump(instr);
+      break;
+    }
+    default: {
+      UNSUPPORTED_MIPS();
+    }
+  }
+  return Instruction::kInstructionSize;
+}
+
+
+} }  // namespace assembler::mips
+
+
+
+//------------------------------------------------------------------------------
+
+namespace disasm {
+
+namespace v8i = v8::internal;
+
+
+const char* NameConverter::NameOfAddress(byte_* addr) const {
+  static v8::internal::EmbeddedVector<char, 32> tmp_buffer;
+  v8::internal::OS::SNPrintF(tmp_buffer, "%p", addr);
+  return tmp_buffer.start();
+}
+
+
+const char* NameConverter::NameOfConstant(byte_* addr) const {
+  return NameOfAddress(addr);
+}
+
+
+const char* NameConverter::NameOfCPURegister(int reg) const {
+  return assembler::mips::Registers::Name(reg);
+}
+
+
+const char* NameConverter::NameOfXMMRegister(int reg) const {
+  return assembler::mips::FPURegister::Name(reg);
+}
+
+
+const char* NameConverter::NameOfByteCPURegister(int reg) const {
+  UNREACHABLE();  // MIPS does not have the concept of a byte register
+  return "nobytereg";
+}
+
+
+const char* NameConverter::NameInCode(byte_* addr) const {
+  // The default name converter is called for unknown code. So we will not try
+  // to access any memory.
+  return "";
+}
+
+
+//------------------------------------------------------------------------------
+
+Disassembler::Disassembler(const NameConverter& converter)
+    : converter_(converter) {}
+
+
+Disassembler::~Disassembler() {}
+
+
+int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer,
+                                    byte_* instruction) {
+  assembler::mips::Decoder d(converter_, buffer);
+  return d.InstructionDecode(instruction);
+}
+
+
+int Disassembler::ConstantPoolSizeAt(byte_* instruction) {
+  UNIMPLEMENTED_MIPS();
+  return -1;
+}
+
+
+void Disassembler::Disassemble(FILE* f, byte_* begin, byte_* end) {
+  NameConverter converter;
+  Disassembler d(converter);
+  for (byte_* pc = begin; pc < end;) {
+    v8::internal::EmbeddedVector<char, 128> buffer;
+    buffer[0] = '\0';
+    byte_* prev_pc = pc;
+    pc += d.InstructionDecode(buffer, pc);
+    fprintf(f, "%p    %08x      %s\n",
+            prev_pc, *reinterpret_cast<int32_t*>(prev_pc), buffer.start());
+  }
+}
+
+#undef UNSUPPORTED
+
+}  // namespace disasm
+
diff --git a/src/mips/fast-codegen-mips.cc b/src/mips/fast-codegen-mips.cc
new file mode 100644
index 0000000..c47f632
--- /dev/null
+++ b/src/mips/fast-codegen-mips.cc
@@ -0,0 +1,56 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "fast-codegen.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm_)
+
+void FastCodeGenerator::Generate(CompilationInfo* info) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FastCodeGenerator::EmitThisPropertyStore(Handle<String> name) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FastCodeGenerator::EmitGlobalVariableLoad(Handle<String> name) {
+  UNIMPLEMENTED_MIPS();
+}
+
+#undef __
+
+
+} }  // namespace v8::internal
+
diff --git a/src/mips/frames-mips.cc b/src/mips/frames-mips.cc
new file mode 100644
index 0000000..d2c717c
--- /dev/null
+++ b/src/mips/frames-mips.cc
@@ -0,0 +1,100 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#include "v8.h"
+
+#include "frames-inl.h"
+#include "mips/assembler-mips-inl.h"
+
+namespace v8 {
+namespace internal {
+
+
+StackFrame::Type StackFrame::ComputeType(State* state) {
+  ASSERT(state->fp != NULL);
+  if (StandardFrame::IsArgumentsAdaptorFrame(state->fp)) {
+    return ARGUMENTS_ADAPTOR;
+  }
+  // The marker and function offsets overlap. If the marker isn't a
+  // smi then the frame is a JavaScript frame -- and the marker is
+  // really the function.
+  const int offset = StandardFrameConstants::kMarkerOffset;
+  Object* marker = Memory::Object_at(state->fp + offset);
+  if (!marker->IsSmi()) return JAVA_SCRIPT;
+  return static_cast<StackFrame::Type>(Smi::cast(marker)->value());
+}
+
+
+StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) {
+  if (fp == 0) return NONE;
+  // Compute frame type and stack pointer.
+  Address sp = fp + ExitFrameConstants::kSPDisplacement;
+  const int offset = ExitFrameConstants::kCodeOffset;
+  Object* code = Memory::Object_at(fp + offset);
+  bool is_debug_exit = code->IsSmi();
+  if (is_debug_exit) {
+    sp -= kNumJSCallerSaved * kPointerSize;
+  }
+  // Fill in the state.
+  state->sp = sp;
+  state->fp = fp;
+  state->pc_address = reinterpret_cast<Address*>(sp - 1 * kPointerSize);
+  return EXIT;
+}
+
+
+void ExitFrame::Iterate(ObjectVisitor* v) const {
+  // Do nothing
+}
+
+
+int JavaScriptFrame::GetProvidedParametersCount() const {
+  return ComputeParametersCount();
+}
+
+
+Address JavaScriptFrame::GetCallerStackPointer() const {
+  UNIMPLEMENTED_MIPS();
+  return static_cast<Address>(NULL);  // UNIMPLEMENTED RETURN
+}
+
+
+Address ArgumentsAdaptorFrame::GetCallerStackPointer() const {
+  UNIMPLEMENTED_MIPS();
+  return static_cast<Address>(NULL);  // UNIMPLEMENTED RETURN
+}
+
+
+Address InternalFrame::GetCallerStackPointer() const {
+  UNIMPLEMENTED_MIPS();
+  return static_cast<Address>(NULL);  // UNIMPLEMENTED RETURN
+}
+
+
+} }  // namespace v8::internal
+
diff --git a/src/mips/frames-mips.h b/src/mips/frames-mips.h
new file mode 100644
index 0000000..ec1949d
--- /dev/null
+++ b/src/mips/frames-mips.h
@@ -0,0 +1,164 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+
+#ifndef V8_MIPS_FRAMES_MIPS_H_
+#define V8_MIPS_FRAMES_MIPS_H_
+
+
+namespace v8 {
+namespace internal {
+
+// Register lists.
+// Note that the bit values must match those used in actual instruction
+// encoding.
+static const int kNumRegs = 32;
+
+static const RegList kJSCallerSaved =
+  1 << 4 |  // a0
+  1 << 5 |  // a1
+  1 << 6 |  // a2
+  1 << 7;   // a3
+
+static const int kNumJSCallerSaved = 4;
+
+
+// Return the code of the n-th caller-saved register available to JavaScript
+// e.g. JSCallerSavedReg(0) returns r0.code() == 0.
+int JSCallerSavedCode(int n);
+
+
+// Callee-saved registers preserved when switching from C to JavaScript.
+static const RegList kCalleeSaved =
+  // Saved temporaries.
+  1 << 16 | 1 << 17 | 1 << 18 | 1 << 19 |
+  1 << 20 | 1 << 21 | 1 << 22 | 1 << 23 |
+  // gp, sp, fp
+  1 << 28 | 1 << 29 | 1 << 30;
+
+static const int kNumCalleeSaved = 11;
+
+
+typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
+
+
+// ----------------------------------------------------
+
+class StackHandlerConstants : public AllStatic {
+ public:
+  static const int kNextOffset  = 0 * kPointerSize;
+  static const int kStateOffset = 1 * kPointerSize;
+  static const int kFPOffset    = 2 * kPointerSize;
+  static const int kPCOffset    = 3 * kPointerSize;
+
+  static const int kSize = kPCOffset + kPointerSize;
+};
+
+
+class EntryFrameConstants : public AllStatic {
+ public:
+  static const int kCallerFPOffset      = -3 * kPointerSize;
+};
+
+
+class ExitFrameConstants : public AllStatic {
+ public:
+  // Exit frames have a debug marker on the stack.
+  static const int kSPDisplacement = -1 * kPointerSize;
+
+  // The debug marker is just above the frame pointer.
+  static const int kDebugMarkOffset = -1 * kPointerSize;
+  // Must be the same as kDebugMarkOffset. Alias introduced when upgrading.
+  static const int kCodeOffset = -1 * kPointerSize;
+
+  static const int kSavedRegistersOffset = 0 * kPointerSize;
+
+  // The caller fields are below the frame pointer on the stack.
+  static const int kCallerFPOffset = +0 * kPointerSize;
+  // The calling JS function is between FP and PC.
+  static const int kCallerPCOffset = +1 * kPointerSize;
+
+  // FP-relative displacement of the caller's SP.
+  static const int kCallerSPDisplacement = +4 * kPointerSize;
+};
+
+
+class StandardFrameConstants : public AllStatic {
+ public:
+  static const int kExpressionsOffset = -3 * kPointerSize;
+  static const int kMarkerOffset      = -2 * kPointerSize;
+  static const int kContextOffset     = -1 * kPointerSize;
+  static const int kCallerFPOffset    =  0 * kPointerSize;
+  static const int kCallerPCOffset    = +1 * kPointerSize;
+  static const int kCallerSPOffset    = +2 * kPointerSize;
+
+  // Size of the MIPS 4 32-bit argument slots.
+  // This is just an alias with a shorter name. Use it from now on.
+  static const int kRArgsSlotsSize = 4 * kPointerSize;
+  static const int kRegularArgsSlotsSize = kRArgsSlotsSize;
+
+  // C/C++ argument slots size.
+  static const int kCArgsSlotsSize = 4 * kPointerSize;
+  // JS argument slots size.
+  static const int kJSArgsSlotsSize = 0 * kPointerSize;
+};
+
+
+class JavaScriptFrameConstants : public AllStatic {
+ public:
+  // FP-relative.
+  static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
+  static const int kSavedRegistersOffset = +2 * kPointerSize;
+  static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
+
+  // Caller SP-relative.
+  static const int kParam0Offset   = -2 * kPointerSize;
+  static const int kReceiverOffset = -1 * kPointerSize;
+};
+
+
+class ArgumentsAdaptorFrameConstants : public AllStatic {
+ public:
+  static const int kLengthOffset = StandardFrameConstants::kExpressionsOffset;
+};
+
+
+class InternalFrameConstants : public AllStatic {
+ public:
+  static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
+};
+
+
+inline Object* JavaScriptFrame::function_slot_object() const {
+  const int offset = JavaScriptFrameConstants::kFunctionOffset;
+  return Memory::Object_at(fp() + offset);
+}
+
+} }  // namespace v8::internal
+
+#endif
diff --git a/src/mips/full-codegen-mips.cc b/src/mips/full-codegen-mips.cc
new file mode 100644
index 0000000..920329e
--- /dev/null
+++ b/src/mips/full-codegen-mips.cc
@@ -0,0 +1,268 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "compiler.h"
+#include "debug.h"
+#include "full-codegen.h"
+#include "parser.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm_)
+
+void FullCodeGenerator::Generate(CompilationInfo* info, Mode mode) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitReturnSequence(int position) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::Apply(Expression::Context context, Register reg) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::Apply(Expression::Context context, Slot* slot) {
+  UNIMPLEMENTED_MIPS();
+}
+
+void FullCodeGenerator::Apply(Expression::Context context, Literal* lit) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::ApplyTOS(Expression::Context context) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::DropAndApply(int count,
+                                     Expression::Context context,
+                                     Register reg) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::Apply(Expression::Context context,
+                              Label* materialize_true,
+                              Label* materialize_false) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::DoTest(Expression::Context context) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+MemOperand FullCodeGenerator::EmitSlotSearch(Slot* slot, Register scratch) {
+  UNIMPLEMENTED_MIPS();
+  return MemOperand(zero_reg, 0);   // UNIMPLEMENTED RETURN
+}
+
+
+void FullCodeGenerator::Move(Register destination, Slot* source) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::Move(Slot* dst,
+                             Register src,
+                             Register scratch1,
+                             Register scratch2) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::VisitDeclaration(Declaration* decl) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitVariableLoad(Variable* var,
+                                         Expression::Context context) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitBinaryOp(Token::Value op,
+                                     Expression::Context context) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitVariableAssignment(Variable* var,
+                                               Expression::Context context) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::VisitProperty(Property* expr) {
+  UNIMPLEMENTED_MIPS();
+}
+
+void FullCodeGenerator::EmitCallWithIC(Call* expr,
+                                       Handle<Object> ignored,
+                                       RelocInfo::Mode mode) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitCallWithStub(Call* expr) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::VisitCall(Call* expr) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::VisitCallNew(CallNew* expr) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+Register FullCodeGenerator::result_register() { return v0; }
+
+
+Register FullCodeGenerator::context_register() { return cp; }
+
+
+void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+// ----------------------------------------------------------------------------
+// Non-local control flow support.
+
+void FullCodeGenerator::EnterFinallyBlock() {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::ExitFinallyBlock() {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+#undef __
+
+} }  // namespace v8::internal
diff --git a/src/mips/ic-mips.cc b/src/mips/ic-mips.cc
new file mode 100644
index 0000000..5598cdf
--- /dev/null
+++ b/src/mips/ic-mips.cc
@@ -0,0 +1,187 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "ic-inl.h"
+#include "runtime.h"
+#include "stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+
+// ----------------------------------------------------------------------------
+// Static IC stub generators.
+//
+
+#define __ ACCESS_MASM(masm)
+
+
+void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void LoadIC::GenerateStringLength(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+// Defined in ic.cc.
+Object* CallIC_Miss(Arguments args);
+
+void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
+  UNIMPLEMENTED_MIPS();
+}
+
+void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
+  UNIMPLEMENTED_MIPS();
+}
+
+// Defined in ic.cc.
+Object* LoadIC_Miss(Arguments args);
+
+void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void LoadIC::GenerateNormal(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void LoadIC::GenerateMiss(MacroAssembler* masm) {
+  Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
+}
+
+
+void LoadIC::Generate(MacroAssembler* masm, const ExternalReference& f) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void LoadIC::ClearInlinedVersion(Address address) {}
+bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
+  return false;
+}
+
+void KeyedLoadIC::ClearInlinedVersion(Address address) {}
+bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
+  return false;
+}
+
+void KeyedStoreIC::ClearInlinedVersion(Address address) {}
+void KeyedStoreIC::RestoreInlinedVersion(Address address) {}
+bool KeyedStoreIC::PatchInlinedStore(Address address, Object* map) {
+  return false;
+}
+
+
+Object* KeyedLoadIC_Miss(Arguments args);
+
+
+void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
+  Generate(masm, ExternalReference(IC_Utility(kKeyedLoadIC_Miss)));
+}
+
+
+void KeyedLoadIC::Generate(MacroAssembler* masm, const ExternalReference& f) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
+                                        ExternalArrayType array_type) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void KeyedStoreIC::Generate(MacroAssembler* masm,
+                            const ExternalReference& f) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
+                                         ExternalArrayType array_type) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void KeyedStoreIC::GenerateExtendStorage(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void StoreIC::GenerateExtendStorage(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void StoreIC::GenerateMiss(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+#undef __
+
+} }  // namespace v8::internal
+
diff --git a/src/mips/jump-target-mips.cc b/src/mips/jump-target-mips.cc
new file mode 100644
index 0000000..3301d19
--- /dev/null
+++ b/src/mips/jump-target-mips.cc
@@ -0,0 +1,87 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "jump-target-inl.h"
+#include "register-allocator-inl.h"
+
+namespace v8 {
+namespace internal {
+
+// -------------------------------------------------------------------------
+// JumpTarget implementation.
+
+#define __ ACCESS_MASM(cgen()->masm())
+
+void JumpTarget::DoJump() {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void JumpTarget::DoBranch(Condition cc, Hint ignored) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void JumpTarget::Call() {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void JumpTarget::DoBind() {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void BreakTarget::Jump() {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void BreakTarget::Jump(Result* arg) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void BreakTarget::Bind() {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void BreakTarget::Bind(Result* arg) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+#undef __
+
+
+} }  // namespace v8::internal
+
diff --git a/src/mips/macro-assembler-mips.cc b/src/mips/macro-assembler-mips.cc
new file mode 100644
index 0000000..b733bdd
--- /dev/null
+++ b/src/mips/macro-assembler-mips.cc
@@ -0,0 +1,895 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+
+#include "v8.h"
+
+#include "bootstrapper.h"
+#include "codegen-inl.h"
+#include "debug.h"
+#include "runtime.h"
+
+namespace v8 {
+namespace internal {
+
+MacroAssembler::MacroAssembler(void* buffer, int size)
+    : Assembler(buffer, size),
+      unresolved_(0),
+      generating_stub_(false),
+      allow_stub_calls_(true),
+      code_object_(Heap::undefined_value()) {
+}
+
+
+
+void MacroAssembler::Jump(Register target, Condition cond,
+                          Register r1, const Operand& r2) {
+  Jump(Operand(target), cond, r1, r2);
+}
+
+
+void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
+                          Condition cond, Register r1, const Operand& r2) {
+  Jump(Operand(target), cond, r1, r2);
+}
+
+
+void MacroAssembler::Jump(byte* target, RelocInfo::Mode rmode,
+                          Condition cond, Register r1, const Operand& r2) {
+  ASSERT(!RelocInfo::IsCodeTarget(rmode));
+  Jump(reinterpret_cast<intptr_t>(target), rmode, cond, r1, r2);
+}
+
+
+void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
+                          Condition cond, Register r1, const Operand& r2) {
+  ASSERT(RelocInfo::IsCodeTarget(rmode));
+  Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
+}
+
+
+void MacroAssembler::Call(Register target,
+                          Condition cond, Register r1, const Operand& r2) {
+  Call(Operand(target), cond, r1, r2);
+}
+
+
+void MacroAssembler::Call(intptr_t target, RelocInfo::Mode rmode,
+                          Condition cond, Register r1, const Operand& r2) {
+  Call(Operand(target), cond, r1, r2);
+}
+
+
+void MacroAssembler::Call(byte* target, RelocInfo::Mode rmode,
+                          Condition cond, Register r1, const Operand& r2) {
+  ASSERT(!RelocInfo::IsCodeTarget(rmode));
+  Call(reinterpret_cast<intptr_t>(target), rmode, cond, r1, r2);
+}
+
+
+void MacroAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
+                          Condition cond, Register r1, const Operand& r2) {
+  ASSERT(RelocInfo::IsCodeTarget(rmode));
+  Call(reinterpret_cast<intptr_t>(code.location()), rmode, cond, r1, r2);
+}
+
+
+void MacroAssembler::Ret(Condition cond, Register r1, const Operand& r2) {
+  Jump(Operand(ra), cond, r1, r2);
+}
+
+
+void MacroAssembler::LoadRoot(Register destination,
+                              Heap::RootListIndex index) {
+  lw(destination, MemOperand(s4, index << kPointerSizeLog2));
+}
+
+void MacroAssembler::LoadRoot(Register destination,
+                              Heap::RootListIndex index,
+                              Condition cond,
+                              Register src1, const Operand& src2) {
+  Branch(NegateCondition(cond), 2, src1, src2);
+  nop();
+  lw(destination, MemOperand(s4, index << kPointerSizeLog2));
+}
+
+
+void MacroAssembler::RecordWrite(Register object, Register offset,
+                                 Register scratch) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+// ---------------------------------------------------------------------------
+// Instruction macros
+
+void MacroAssembler::Add(Register rd, Register rs, const Operand& rt) {
+  if (rt.is_reg()) {
+    add(rd, rs, rt.rm());
+  } else {
+    if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
+      addi(rd, rs, rt.imm32_);
+    } else {
+      // li handles the relocation.
+      ASSERT(!rs.is(at));
+      li(at, rt);
+      add(rd, rs, at);
+    }
+  }
+}
+
+
+void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) {
+  if (rt.is_reg()) {
+    addu(rd, rs, rt.rm());
+  } else {
+    if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
+      addiu(rd, rs, rt.imm32_);
+    } else {
+      // li handles the relocation.
+      ASSERT(!rs.is(at));
+      li(at, rt);
+      addu(rd, rs, at);
+    }
+  }
+}
+
+
+void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) {
+  if (rt.is_reg()) {
+    mul(rd, rs, rt.rm());
+  } else {
+    // li handles the relocation.
+    ASSERT(!rs.is(at));
+    li(at, rt);
+    mul(rd, rs, at);
+  }
+}
+
+
+void MacroAssembler::Mult(Register rs, const Operand& rt) {
+  if (rt.is_reg()) {
+    mult(rs, rt.rm());
+  } else {
+    // li handles the relocation.
+    ASSERT(!rs.is(at));
+    li(at, rt);
+    mult(rs, at);
+  }
+}
+
+
+void MacroAssembler::Multu(Register rs, const Operand& rt) {
+  if (rt.is_reg()) {
+    multu(rs, rt.rm());
+  } else {
+    // li handles the relocation.
+    ASSERT(!rs.is(at));
+    li(at, rt);
+    multu(rs, at);
+  }
+}
+
+
+void MacroAssembler::Div(Register rs, const Operand& rt) {
+  if (rt.is_reg()) {
+    div(rs, rt.rm());
+  } else {
+    // li handles the relocation.
+    ASSERT(!rs.is(at));
+    li(at, rt);
+    div(rs, at);
+  }
+}
+
+
+void MacroAssembler::Divu(Register rs, const Operand& rt) {
+  if (rt.is_reg()) {
+    divu(rs, rt.rm());
+  } else {
+    // li handles the relocation.
+    ASSERT(!rs.is(at));
+    li(at, rt);
+    divu(rs, at);
+  }
+}
+
+
+void MacroAssembler::And(Register rd, Register rs, const Operand& rt) {
+  if (rt.is_reg()) {
+    and_(rd, rs, rt.rm());
+  } else {
+    if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
+      andi(rd, rs, rt.imm32_);
+    } else {
+      // li handles the relocation.
+      ASSERT(!rs.is(at));
+      li(at, rt);
+      and_(rd, rs, at);
+    }
+  }
+}
+
+
+void MacroAssembler::Or(Register rd, Register rs, const Operand& rt) {
+  if (rt.is_reg()) {
+    or_(rd, rs, rt.rm());
+  } else {
+    if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
+      ori(rd, rs, rt.imm32_);
+    } else {
+      // li handles the relocation.
+      ASSERT(!rs.is(at));
+      li(at, rt);
+      or_(rd, rs, at);
+    }
+  }
+}
+
+
+void MacroAssembler::Xor(Register rd, Register rs, const Operand& rt) {
+  if (rt.is_reg()) {
+    xor_(rd, rs, rt.rm());
+  } else {
+    if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
+      xori(rd, rs, rt.imm32_);
+    } else {
+      // li handles the relocation.
+      ASSERT(!rs.is(at));
+      li(at, rt);
+      xor_(rd, rs, at);
+    }
+  }
+}
+
+
+void MacroAssembler::Nor(Register rd, Register rs, const Operand& rt) {
+  if (rt.is_reg()) {
+    nor(rd, rs, rt.rm());
+  } else {
+    // li handles the relocation.
+    ASSERT(!rs.is(at));
+    li(at, rt);
+    nor(rd, rs, at);
+  }
+}
+
+
+void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) {
+  if (rt.is_reg()) {
+    slt(rd, rs, rt.rm());
+  } else {
+    if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
+      slti(rd, rs, rt.imm32_);
+    } else {
+      // li handles the relocation.
+      ASSERT(!rs.is(at));
+      li(at, rt);
+      slt(rd, rs, at);
+    }
+  }
+}
+
+
+void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
+  if (rt.is_reg()) {
+    sltu(rd, rs, rt.rm());
+  } else {
+    if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
+      sltiu(rd, rs, rt.imm32_);
+    } else {
+      // li handles the relocation.
+      ASSERT(!rs.is(at));
+      li(at, rt);
+      sltu(rd, rs, at);
+    }
+  }
+}
+
+
+//------------Pseudo-instructions-------------
+
+void MacroAssembler::movn(Register rd, Register rt) {
+  addiu(at, zero_reg, -1);  // Fill at with ones.
+  xor_(rd, rt, at);
+}
+
+
+// load wartd in a register
+void MacroAssembler::li(Register rd, Operand j, bool gen2instr) {
+  ASSERT(!j.is_reg());
+
+  if (!MustUseAt(j.rmode_) && !gen2instr) {
+    // Normal load of an immediate value which does not need Relocation Info.
+    if (is_int16(j.imm32_)) {
+      addiu(rd, zero_reg, j.imm32_);
+    } else if (!(j.imm32_ & HIMask)) {
+      ori(rd, zero_reg, j.imm32_);
+    } else if (!(j.imm32_ & LOMask)) {
+      lui(rd, (HIMask & j.imm32_) >> 16);
+    } else {
+      lui(rd, (HIMask & j.imm32_) >> 16);
+      ori(rd, rd, (LOMask & j.imm32_));
+    }
+  } else if (MustUseAt(j.rmode_) || gen2instr) {
+    if (MustUseAt(j.rmode_)) {
+      RecordRelocInfo(j.rmode_, j.imm32_);
+    }
+    // We need always the same number of instructions as we may need to patch
+    // this code to load another value which may need 2 instructions to load.
+    if (is_int16(j.imm32_)) {
+      nop();
+      addiu(rd, zero_reg, j.imm32_);
+    } else if (!(j.imm32_ & HIMask)) {
+      nop();
+      ori(rd, zero_reg, j.imm32_);
+    } else if (!(j.imm32_ & LOMask)) {
+      nop();
+      lui(rd, (HIMask & j.imm32_) >> 16);
+    } else {
+      lui(rd, (HIMask & j.imm32_) >> 16);
+      ori(rd, rd, (LOMask & j.imm32_));
+    }
+  }
+}
+
+
+// Exception-generating instructions and debugging support
+void MacroAssembler::stop(const char* msg) {
+  // TO_UPGRADE: Just a break for now. Maybe we could upgrade it.
+  // We use the 0x54321 value to be able to find it easily when reading memory.
+  break_(0x54321);
+}
+
+
+void MacroAssembler::MultiPush(RegList regs) {
+  int16_t NumSaved = 0;
+  int16_t NumToPush = NumberOfBitsSet(regs);
+
+  addiu(sp, sp, -4 * NumToPush);
+  for (int16_t i = 0; i < kNumRegisters; i++) {
+    if ((regs & (1 << i)) != 0) {
+      sw(ToRegister(i), MemOperand(sp, 4 * (NumToPush - ++NumSaved)));
+    }
+  }
+}
+
+
+void MacroAssembler::MultiPushReversed(RegList regs) {
+  int16_t NumSaved = 0;
+  int16_t NumToPush = NumberOfBitsSet(regs);
+
+  addiu(sp, sp, -4 * NumToPush);
+  for (int16_t i = kNumRegisters; i > 0; i--) {
+    if ((regs & (1 << i)) != 0) {
+      sw(ToRegister(i), MemOperand(sp, 4 * (NumToPush - ++NumSaved)));
+    }
+  }
+}
+
+
+void MacroAssembler::MultiPop(RegList regs) {
+  int16_t NumSaved = 0;
+
+  for (int16_t i = kNumRegisters; i > 0; i--) {
+    if ((regs & (1 << i)) != 0) {
+      lw(ToRegister(i), MemOperand(sp, 4 * (NumSaved++)));
+    }
+  }
+  addiu(sp, sp, 4 * NumSaved);
+}
+
+
+void MacroAssembler::MultiPopReversed(RegList regs) {
+  int16_t NumSaved = 0;
+
+  for (int16_t i = 0; i < kNumRegisters; i++) {
+    if ((regs & (1 << i)) != 0) {
+      lw(ToRegister(i), MemOperand(sp, 4 * (NumSaved++)));
+    }
+  }
+  addiu(sp, sp, 4 * NumSaved);
+}
+
+
+// Emulated condtional branches do not emit a nop in the branch delay slot.
+
+// Trashes the at register if no scratch register is provided.
+void MacroAssembler::Branch(Condition cond, int16_t offset, Register rs,
+                            const Operand& rt, Register scratch) {
+  Register r2;
+  if (rt.is_reg()) {
+    // We don't want any other register but scratch clobbered.
+    ASSERT(!scratch.is(rs) && !scratch.is(rt.rm_));
+    r2 = rt.rm_;
+  } else if (cond != cc_always) {
+    // We don't want any other register but scratch clobbered.
+    ASSERT(!scratch.is(rs));
+    r2 = scratch;
+    li(r2, rt);
+  }
+
+  switch (cond) {
+    case cc_always:
+      b(offset);
+      break;
+    case eq:
+      beq(rs, r2, offset);
+      break;
+    case ne:
+      bne(rs, r2, offset);
+      break;
+
+      // Signed comparison
+    case greater:
+      slt(scratch, r2, rs);
+      bne(scratch, zero_reg, offset);
+      break;
+    case greater_equal:
+      slt(scratch, rs, r2);
+      beq(scratch, zero_reg, offset);
+      break;
+    case less:
+      slt(scratch, rs, r2);
+      bne(scratch, zero_reg, offset);
+      break;
+    case less_equal:
+      slt(scratch, r2, rs);
+      beq(scratch, zero_reg, offset);
+      break;
+
+      // Unsigned comparison.
+    case Ugreater:
+      sltu(scratch, r2, rs);
+      bne(scratch, zero_reg, offset);
+      break;
+    case Ugreater_equal:
+      sltu(scratch, rs, r2);
+      beq(scratch, zero_reg, offset);
+      break;
+    case Uless:
+      sltu(scratch, rs, r2);
+      bne(scratch, zero_reg, offset);
+      break;
+    case Uless_equal:
+      sltu(scratch, r2, rs);
+      beq(scratch, zero_reg, offset);
+      break;
+
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void MacroAssembler::Branch(Condition cond,  Label* L, Register rs,
+                            const Operand& rt, Register scratch) {
+  Register r2;
+  if (rt.is_reg()) {
+    r2 = rt.rm_;
+  } else if (cond != cc_always) {
+    r2 = scratch;
+    li(r2, rt);
+  }
+
+  // We use branch_offset as an argument for the branch instructions to be sure
+  // it is called just before generating the branch instruction, as needed.
+
+  switch (cond) {
+    case cc_always:
+      b(shifted_branch_offset(L, false));
+      break;
+    case eq:
+      beq(rs, r2, shifted_branch_offset(L, false));
+      break;
+    case ne:
+      bne(rs, r2, shifted_branch_offset(L, false));
+      break;
+
+    // Signed comparison
+    case greater:
+      slt(scratch, r2, rs);
+      bne(scratch, zero_reg, shifted_branch_offset(L, false));
+      break;
+    case greater_equal:
+      slt(scratch, rs, r2);
+      beq(scratch, zero_reg, shifted_branch_offset(L, false));
+      break;
+    case less:
+      slt(scratch, rs, r2);
+      bne(scratch, zero_reg, shifted_branch_offset(L, false));
+      break;
+    case less_equal:
+      slt(scratch, r2, rs);
+      beq(scratch, zero_reg, shifted_branch_offset(L, false));
+      break;
+
+    // Unsigned comparison.
+    case Ugreater:
+      sltu(scratch, r2, rs);
+      bne(scratch, zero_reg, shifted_branch_offset(L, false));
+      break;
+    case Ugreater_equal:
+      sltu(scratch, rs, r2);
+      beq(scratch, zero_reg, shifted_branch_offset(L, false));
+      break;
+    case Uless:
+      sltu(scratch, rs, r2);
+      bne(scratch, zero_reg, shifted_branch_offset(L, false));
+      break;
+    case Uless_equal:
+      sltu(scratch, r2, rs);
+      beq(scratch, zero_reg, shifted_branch_offset(L, false));
+      break;
+
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+// Trashes the at register if no scratch register is provided.
+// We need to use a bgezal or bltzal, but they can't be used directly with the
+// slt instructions. We could use sub or add instead but we would miss overflow
+// cases, so we keep slt and add an intermediate third instruction.
+void MacroAssembler::BranchAndLink(Condition cond, int16_t offset, Register rs,
+                                   const Operand& rt, Register scratch) {
+  Register r2;
+  if (rt.is_reg()) {
+    r2 = rt.rm_;
+  } else if (cond != cc_always) {
+    r2 = scratch;
+    li(r2, rt);
+  }
+
+  switch (cond) {
+    case cc_always:
+      bal(offset);
+      break;
+    case eq:
+      bne(rs, r2, 2);
+      nop();
+      bal(offset);
+      break;
+    case ne:
+      beq(rs, r2, 2);
+      nop();
+      bal(offset);
+      break;
+
+    // Signed comparison
+    case greater:
+      slt(scratch, r2, rs);
+      addiu(scratch, scratch, -1);
+      bgezal(scratch, offset);
+      break;
+    case greater_equal:
+      slt(scratch, rs, r2);
+      addiu(scratch, scratch, -1);
+      bltzal(scratch, offset);
+      break;
+    case less:
+      slt(scratch, rs, r2);
+      addiu(scratch, scratch, -1);
+      bgezal(scratch, offset);
+      break;
+    case less_equal:
+      slt(scratch, r2, rs);
+      addiu(scratch, scratch, -1);
+      bltzal(scratch, offset);
+      break;
+
+    // Unsigned comparison.
+    case Ugreater:
+      sltu(scratch, r2, rs);
+      addiu(scratch, scratch, -1);
+      bgezal(scratch, offset);
+      break;
+    case Ugreater_equal:
+      sltu(scratch, rs, r2);
+      addiu(scratch, scratch, -1);
+      bltzal(scratch, offset);
+      break;
+    case Uless:
+      sltu(scratch, rs, r2);
+      addiu(scratch, scratch, -1);
+      bgezal(scratch, offset);
+      break;
+    case Uless_equal:
+      sltu(scratch, r2, rs);
+      addiu(scratch, scratch, -1);
+      bltzal(scratch, offset);
+      break;
+
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void MacroAssembler::BranchAndLink(Condition cond, Label* L, Register rs,
+                                   const Operand& rt, Register scratch) {
+  Register r2;
+  if (rt.is_reg()) {
+    r2 = rt.rm_;
+  } else if (cond != cc_always) {
+    r2 = scratch;
+    li(r2, rt);
+  }
+
+  switch (cond) {
+    case cc_always:
+      bal(shifted_branch_offset(L, false));
+      break;
+    case eq:
+      bne(rs, r2, 2);
+      nop();
+      bal(shifted_branch_offset(L, false));
+      break;
+    case ne:
+      beq(rs, r2, 2);
+      nop();
+      bal(shifted_branch_offset(L, false));
+      break;
+
+    // Signed comparison
+    case greater:
+      slt(scratch, r2, rs);
+      addiu(scratch, scratch, -1);
+      bgezal(scratch, shifted_branch_offset(L, false));
+      break;
+    case greater_equal:
+      slt(scratch, rs, r2);
+      addiu(scratch, scratch, -1);
+      bltzal(scratch, shifted_branch_offset(L, false));
+      break;
+    case less:
+      slt(scratch, rs, r2);
+      addiu(scratch, scratch, -1);
+      bgezal(scratch, shifted_branch_offset(L, false));
+      break;
+    case less_equal:
+      slt(scratch, r2, rs);
+      addiu(scratch, scratch, -1);
+      bltzal(scratch, shifted_branch_offset(L, false));
+      break;
+
+    // Unsigned comparison.
+    case Ugreater:
+      sltu(scratch, r2, rs);
+      addiu(scratch, scratch, -1);
+      bgezal(scratch, shifted_branch_offset(L, false));
+      break;
+    case Ugreater_equal:
+      sltu(scratch, rs, r2);
+      addiu(scratch, scratch, -1);
+      bltzal(scratch, shifted_branch_offset(L, false));
+      break;
+    case Uless:
+      sltu(scratch, rs, r2);
+      addiu(scratch, scratch, -1);
+      bgezal(scratch, shifted_branch_offset(L, false));
+      break;
+    case Uless_equal:
+      sltu(scratch, r2, rs);
+      addiu(scratch, scratch, -1);
+      bltzal(scratch, shifted_branch_offset(L, false));
+      break;
+
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void MacroAssembler::Jump(const Operand& target,
+                          Condition cond, Register rs, const Operand& rt) {
+  if (target.is_reg()) {
+    if (cond == cc_always) {
+      jr(target.rm());
+    } else {
+      Branch(NegateCondition(cond), 2, rs, rt);
+      nop();
+      jr(target.rm());
+    }
+  } else {    // !target.is_reg()
+    if (!MustUseAt(target.rmode_)) {
+      if (cond == cc_always) {
+        j(target.imm32_);
+      } else {
+        Branch(NegateCondition(cond), 2, rs, rt);
+        nop();
+        j(target.imm32_);  // will generate only one instruction.
+      }
+    } else {  // MustUseAt(target)
+      li(at, rt);
+      if (cond == cc_always) {
+        jr(at);
+      } else {
+        Branch(NegateCondition(cond), 2, rs, rt);
+        nop();
+        jr(at);  // will generate only one instruction.
+      }
+    }
+  }
+}
+
+
+void MacroAssembler::Call(const Operand& target,
+                          Condition cond, Register rs, const Operand& rt) {
+  if (target.is_reg()) {
+    if (cond == cc_always) {
+      jalr(target.rm());
+    } else {
+      Branch(NegateCondition(cond), 2, rs, rt);
+      nop();
+      jalr(target.rm());
+    }
+  } else {    // !target.is_reg()
+    if (!MustUseAt(target.rmode_)) {
+      if (cond == cc_always) {
+        jal(target.imm32_);
+      } else {
+        Branch(NegateCondition(cond), 2, rs, rt);
+        nop();
+        jal(target.imm32_);  // will generate only one instruction.
+      }
+    } else {  // MustUseAt(target)
+      li(at, rt);
+      if (cond == cc_always) {
+        jalr(at);
+      } else {
+        Branch(NegateCondition(cond), 2, rs, rt);
+        nop();
+        jalr(at);  // will generate only one instruction.
+      }
+    }
+  }
+}
+
+void MacroAssembler::StackLimitCheck(Label* on_stack_overflow) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void MacroAssembler::Drop(int count, Condition cond) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void MacroAssembler::Call(Label* target) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+// ---------------------------------------------------------------------------
+// Exception handling
+
+void MacroAssembler::PushTryHandler(CodeLocation try_location,
+                                    HandlerType type) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void MacroAssembler::PopTryHandler() {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+
+// ---------------------------------------------------------------------------
+// Activation frames
+
+void MacroAssembler::CallStub(CodeStub* stub, Condition cond,
+                              Register r1, const Operand& r2) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void MacroAssembler::StubReturn(int argc) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void MacroAssembler::CallRuntime(Runtime::FunctionId fid, int num_arguments) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void MacroAssembler::TailCallRuntime(const ExternalReference& ext,
+                                     int num_arguments,
+                                     int result_size) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void MacroAssembler::JumpToRuntime(const ExternalReference& builtin) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+Handle<Code> MacroAssembler::ResolveBuiltin(Builtins::JavaScript id,
+                                            bool* resolved) {
+  UNIMPLEMENTED_MIPS();
+  return Handle<Code>(reinterpret_cast<Code*>(NULL));   // UNIMPLEMENTED RETURN
+}
+
+
+void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
+                                   InvokeJSFlags flags) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void MacroAssembler::SetCounter(StatsCounter* counter, int value,
+                                Register scratch1, Register scratch2) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
+                                      Register scratch1, Register scratch2) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
+                                      Register scratch1, Register scratch2) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+
+void MacroAssembler::Assert(Condition cc, const char* msg,
+                            Register rs, Operand rt) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void MacroAssembler::Check(Condition cc, const char* msg,
+                           Register rs, Operand rt) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void MacroAssembler::Abort(const char* msg) {
+  UNIMPLEMENTED_MIPS();
+}
+
+} }  // namespace v8::internal
+
diff --git a/src/mips/macro-assembler-mips.h b/src/mips/macro-assembler-mips.h
new file mode 100644
index 0000000..aea9836
--- /dev/null
+++ b/src/mips/macro-assembler-mips.h
@@ -0,0 +1,381 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
+#define V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
+
+#include "assembler.h"
+#include "mips/assembler-mips.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declaration.
+class JumpTarget;
+
+// Register at is used for instruction generation. So it is not safe to use it
+// unless we know exactly what we do.
+
+// Registers aliases
+const Register cp = s7;     // JavaScript context pointer
+const Register fp = s8_fp;  // Alias fp
+
+enum InvokeJSFlags {
+  CALL_JS,
+  JUMP_JS
+};
+
+// MacroAssembler implements a collection of frequently used macros.
+class MacroAssembler: public Assembler {
+ public:
+  MacroAssembler(void* buffer, int size);
+
+  // Jump, Call, and Ret pseudo instructions implementing inter-working.
+  void Jump(const Operand& target,
+            Condition cond = cc_always,
+            Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
+  void Call(const Operand& target,
+            Condition cond = cc_always,
+            Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
+  void Jump(Register target,
+            Condition cond = cc_always,
+            Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
+  void Jump(byte* target, RelocInfo::Mode rmode,
+            Condition cond = cc_always,
+            Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
+  void Jump(Handle<Code> code, RelocInfo::Mode rmode,
+            Condition cond = cc_always,
+            Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
+  void Call(Register target,
+            Condition cond = cc_always,
+            Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
+  void Call(byte* target, RelocInfo::Mode rmode,
+            Condition cond = cc_always,
+            Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
+  void Call(Handle<Code> code, RelocInfo::Mode rmode,
+            Condition cond = cc_always,
+            Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
+  void Ret(Condition cond = cc_always,
+           Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
+  void Branch(Condition cond, int16_t offset, Register rs = zero_reg,
+              const Operand& rt = Operand(zero_reg), Register scratch = at);
+  void Branch(Condition cond, Label* L, Register rs = zero_reg,
+              const Operand& rt = Operand(zero_reg), Register scratch = at);
+  // conditionnal branch and link
+  void BranchAndLink(Condition cond, int16_t offset, Register rs = zero_reg,
+                     const Operand& rt = Operand(zero_reg),
+                     Register scratch = at);
+  void BranchAndLink(Condition cond, Label* L, Register rs = zero_reg,
+                     const Operand& rt = Operand(zero_reg),
+                     Register scratch = at);
+
+  // Emit code to discard a non-negative number of pointer-sized elements
+  // from the stack, clobbering only the sp register.
+  void Drop(int count, Condition cond = cc_always);
+
+  void Call(Label* target);
+
+  // Jump unconditionally to given label.
+  // We NEED a nop in the branch delay slot, as it used by v8, for example in
+  // CodeGenerator::ProcessDeferred().
+  // Use rather b(Label) for code generation.
+  void jmp(Label* L) {
+    Branch(cc_always, L);
+    nop();
+  }
+
+  // Load an object from the root table.
+  void LoadRoot(Register destination,
+                Heap::RootListIndex index);
+  void LoadRoot(Register destination,
+                Heap::RootListIndex index,
+                Condition cond, Register src1, const Operand& src2);
+
+  // Sets the remembered set bit for [address+offset], where address is the
+  // address of the heap object 'object'.  The address must be in the first 8K
+  // of an allocated page. The 'scratch' register is used in the
+  // implementation and all 3 registers are clobbered by the operation, as
+  // well as the ip register.
+  void RecordWrite(Register object, Register offset, Register scratch);
+
+
+  // ---------------------------------------------------------------------------
+  // Instruction macros
+
+#define DEFINE_INSTRUCTION(instr)                                       \
+  void instr(Register rd, Register rs, const Operand& rt);                     \
+  void instr(Register rd, Register rs, Register rt) {                          \
+    instr(rd, rs, Operand(rt));                                                \
+  }                                                                            \
+  void instr(Register rs, Register rt, int32_t j) {                            \
+    instr(rs, rt, Operand(j));                                                 \
+  }
+
+#define DEFINE_INSTRUCTION2(instr)                                      \
+  void instr(Register rs, const Operand& rt);                                  \
+  void instr(Register rs, Register rt) {                                       \
+    instr(rs, Operand(rt));                                                    \
+  }                                                                            \
+  void instr(Register rs, int32_t j) {                                         \
+    instr(rs, Operand(j));                                                     \
+  }
+
+  DEFINE_INSTRUCTION(Add);
+  DEFINE_INSTRUCTION(Addu);
+  DEFINE_INSTRUCTION(Mul);
+  DEFINE_INSTRUCTION2(Mult);
+  DEFINE_INSTRUCTION2(Multu);
+  DEFINE_INSTRUCTION2(Div);
+  DEFINE_INSTRUCTION2(Divu);
+
+  DEFINE_INSTRUCTION(And);
+  DEFINE_INSTRUCTION(Or);
+  DEFINE_INSTRUCTION(Xor);
+  DEFINE_INSTRUCTION(Nor);
+
+  DEFINE_INSTRUCTION(Slt);
+  DEFINE_INSTRUCTION(Sltu);
+
+#undef DEFINE_INSTRUCTION
+#undef DEFINE_INSTRUCTION2
+
+
+  //------------Pseudo-instructions-------------
+
+  void mov(Register rd, Register rt) { or_(rd, rt, zero_reg); }
+  // Move the logical ones complement of source to dest.
+  void movn(Register rd, Register rt);
+
+
+  // load int32 in the rd register
+  void li(Register rd, Operand j, bool gen2instr = false);
+  inline void li(Register rd, int32_t j, bool gen2instr = false) {
+    li(rd, Operand(j), gen2instr);
+  }
+
+  // Exception-generating instructions and debugging support
+  void stop(const char* msg);
+
+
+  // Push multiple registers on the stack.
+  // With MultiPush, lower registers are pushed first on the stack.
+  // For example if you push t0, t1, s0, and ra you get:
+  // |                       |
+  // |-----------------------|
+  // |         t0            |                     +
+  // |-----------------------|                    |
+  // |         t1            |                    |
+  // |-----------------------|                    |
+  // |         s0            |                    v
+  // |-----------------------|                     -
+  // |         ra            |
+  // |-----------------------|
+  // |                       |
+  void MultiPush(RegList regs);
+  void MultiPushReversed(RegList regs);
+  void Push(Register src) {
+    Addu(sp, sp, Operand(-kPointerSize));
+    sw(src, MemOperand(sp, 0));
+  }
+  inline void push(Register src) { Push(src); }
+
+  void Push(Register src, Condition cond, Register tst1, Register tst2) {
+    // Since we don't have conditionnal execution we use a Branch.
+    Branch(cond, 3, tst1, Operand(tst2));
+    nop();
+    Addu(sp, sp, Operand(-kPointerSize));
+    sw(src, MemOperand(sp, 0));
+  }
+
+  // Pops multiple values from the stack and load them in the
+  // registers specified in regs. Pop order is the opposite as in MultiPush.
+  void MultiPop(RegList regs);
+  void MultiPopReversed(RegList regs);
+  void Pop(Register dst) {
+    lw(dst, MemOperand(sp, 0));
+    Addu(sp, sp, Operand(kPointerSize));
+  }
+  void Pop() {
+    Add(sp, sp, Operand(kPointerSize));
+  }
+
+
+  // ---------------------------------------------------------------------------
+  // Exception handling
+
+  // Push a new try handler and link into try handler chain.
+  // The return address must be passed in register lr.
+  // On exit, r0 contains TOS (code slot).
+  void PushTryHandler(CodeLocation try_location, HandlerType type);
+
+  // Unlink the stack handler on top of the stack from the try handler chain.
+  // Must preserve the result register.
+  void PopTryHandler();
+
+
+  // ---------------------------------------------------------------------------
+  // Support functions.
+
+  inline void BranchOnSmi(Register value, Label* smi_label,
+                          Register scratch = at) {
+    ASSERT_EQ(0, kSmiTag);
+    andi(scratch, value, kSmiTagMask);
+    Branch(eq, smi_label, scratch, Operand(zero_reg));
+  }
+
+
+  inline void BranchOnNotSmi(Register value, Label* not_smi_label,
+                             Register scratch = at) {
+    ASSERT_EQ(0, kSmiTag);
+    andi(scratch, value, kSmiTagMask);
+    Branch(ne, not_smi_label, scratch, Operand(zero_reg));
+  }
+
+
+  // ---------------------------------------------------------------------------
+  // Runtime calls
+
+  // Call a code stub.
+  void CallStub(CodeStub* stub, Condition cond = cc_always,
+                Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
+  void CallJSExitStub(CodeStub* stub);
+
+  // Return from a code stub after popping its arguments.
+  void StubReturn(int argc);
+
+  // Call a runtime routine.
+  // Eventually this should be used for all C calls.
+  void CallRuntime(Runtime::Function* f, int num_arguments);
+
+  // Convenience function: Same as above, but takes the fid instead.
+  void CallRuntime(Runtime::FunctionId fid, int num_arguments);
+
+  // Tail call of a runtime routine (jump).
+  // Like JumpToRuntime, but also takes care of passing the number
+  // of parameters.
+  void TailCallRuntime(const ExternalReference& ext,
+                       int num_arguments,
+                       int result_size);
+
+  // Jump to the builtin routine.
+  void JumpToRuntime(const ExternalReference& builtin);
+
+  // Invoke specified builtin JavaScript function. Adds an entry to
+  // the unresolved list if the name does not resolve.
+  void InvokeBuiltin(Builtins::JavaScript id, InvokeJSFlags flags);
+
+  // Store the code object for the given builtin in the target register and
+  // setup the function in r1.
+  void GetBuiltinEntry(Register target, Builtins::JavaScript id);
+
+  struct Unresolved {
+    int pc;
+    uint32_t flags;  // see Bootstrapper::FixupFlags decoders/encoders.
+    const char* name;
+  };
+  List<Unresolved>* unresolved() { return &unresolved_; }
+
+  Handle<Object> CodeObject() { return code_object_; }
+
+
+  // ---------------------------------------------------------------------------
+  // Stack limit support
+
+  void StackLimitCheck(Label* on_stack_limit_hit);
+
+
+  // ---------------------------------------------------------------------------
+  // StatsCounter support
+
+  void SetCounter(StatsCounter* counter, int value,
+                  Register scratch1, Register scratch2);
+  void IncrementCounter(StatsCounter* counter, int value,
+                        Register scratch1, Register scratch2);
+  void DecrementCounter(StatsCounter* counter, int value,
+                        Register scratch1, Register scratch2);
+
+
+  // ---------------------------------------------------------------------------
+  // Debugging
+
+  // Calls Abort(msg) if the condition cc is not satisfied.
+  // Use --debug_code to enable.
+  void Assert(Condition cc, const char* msg, Register rs, Operand rt);
+
+  // Like Assert(), but always enabled.
+  void Check(Condition cc, const char* msg, Register rs, Operand rt);
+
+  // Print a message to stdout and abort execution.
+  void Abort(const char* msg);
+
+  // Verify restrictions about code generated in stubs.
+  void set_generating_stub(bool value) { generating_stub_ = value; }
+  bool generating_stub() { return generating_stub_; }
+  void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
+  bool allow_stub_calls() { return allow_stub_calls_; }
+
+ private:
+  void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = cc_always,
+            Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
+  void Call(intptr_t target, RelocInfo::Mode rmode, Condition cond = cc_always,
+            Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
+
+  // Get the code for the given builtin. Returns if able to resolve
+  // the function in the 'resolved' flag.
+  Handle<Code> ResolveBuiltin(Builtins::JavaScript id, bool* resolved);
+
+  List<Unresolved> unresolved_;
+  bool generating_stub_;
+  bool allow_stub_calls_;
+  // This handle will be patched with the code object on installation.
+  Handle<Object> code_object_;
+};
+
+
+// -----------------------------------------------------------------------------
+// Static helper functions.
+
+// Generate a MemOperand for loading a field from an object.
+static inline MemOperand FieldMemOperand(Register object, int offset) {
+  return MemOperand(object, offset - kHeapObjectTag);
+}
+
+
+
+#ifdef GENERATED_CODE_COVERAGE
+#define CODE_COVERAGE_STRINGIFY(x) #x
+#define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
+#define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
+#define ACCESS_MASM(masm) masm->stop(__FILE_LINE__); masm->
+#else
+#define ACCESS_MASM(masm) masm->
+#endif
+
+} }  // namespace v8::internal
+
+#endif  // V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
+
diff --git a/src/mips/register-allocator-mips-inl.h b/src/mips/register-allocator-mips-inl.h
new file mode 100644
index 0000000..a876bee
--- /dev/null
+++ b/src/mips/register-allocator-mips-inl.h
@@ -0,0 +1,137 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_IA32_REGISTER_ALLOCATOR_MIPS_INL_H_
+#define V8_IA32_REGISTER_ALLOCATOR_MIPS_INL_H_
+
+#include "v8.h"
+#include "mips/assembler-mips.h"
+
+namespace v8 {
+namespace internal {
+
+// -------------------------------------------------------------------------
+// RegisterAllocator implementation.
+
+bool RegisterAllocator::IsReserved(Register reg) {
+  // The code for this test relies on the order of register codes.
+  return reg.is(cp) || reg.is(s8_fp) || reg.is(sp);
+}
+
+
+int RegisterAllocator::ToNumber(Register reg) {
+  ASSERT(reg.is_valid() && !IsReserved(reg));
+  const int kNumbers[] = {
+    0,    // zero_reg
+    1,    // at
+    2,    // v0
+    3,    // v1
+    4,    // a0
+    5,    // a1
+    6,    // a2
+    7,    // a3
+    8,    // t0
+    9,    // t1
+    10,   // t2
+    11,   // t3
+    12,   // t4
+    13,   // t5
+    14,   // t
+    15,   // t7
+    16,   // t8
+    17,   // t9
+    18,   // s0
+    19,   // s1
+    20,   // s2
+    21,   // s3
+    22,   // s4
+    23,   // s5
+    24,   // s6
+    25,   // s7
+    26,   // k0
+    27,   // k1
+    28,   // gp
+    29,   // sp
+    30,   // s8_fp
+    31,   // ra
+  };
+  return kNumbers[reg.code()];
+}
+
+
+Register RegisterAllocator::ToRegister(int num) {
+  ASSERT(num >= 0 && num < kNumRegisters);
+  const Register kRegisters[] = {
+    zero_reg,
+    at,
+    v0,
+    v1,
+    a0,
+    a1,
+    a2,
+    a3,
+    t0,
+    t1,
+    t2,
+    t3,
+    t4,
+    t5,
+    t6,
+    t7,
+    s0,
+    s1,
+    s2,
+    s3,
+    s4,
+    s5,
+    s6,
+    s7,
+    t8,
+    t9,
+    k0,
+    k1,
+    gp,
+    sp,
+    s8_fp,
+    ra
+  };
+  return kRegisters[num];
+}
+
+
+void RegisterAllocator::Initialize() {
+  Reset();
+  // The non-reserved a1 and ra registers are live on JS function entry.
+  Use(a1);  // JS function.
+  Use(ra);  // Return address.
+}
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_IA32_REGISTER_ALLOCATOR_MIPS_INL_H_
+
diff --git a/src/mips/register-allocator-mips.cc b/src/mips/register-allocator-mips.cc
new file mode 100644
index 0000000..f48d3a6
--- /dev/null
+++ b/src/mips/register-allocator-mips.cc
@@ -0,0 +1,60 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "register-allocator-inl.h"
+
+namespace v8 {
+namespace internal {
+
+// -------------------------------------------------------------------------
+// Result implementation.
+
+void Result::ToRegister() {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void Result::ToRegister(Register target) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+// -------------------------------------------------------------------------
+// RegisterAllocator implementation.
+
+Result RegisterAllocator::AllocateByteRegisterWithoutSpilling() {
+  // No byte registers on MIPS.
+  UNREACHABLE();
+  return Result();
+}
+
+
+} }  // namespace v8::internal
+
diff --git a/src/mips/register-allocator-mips.h b/src/mips/register-allocator-mips.h
new file mode 100644
index 0000000..e056fb8
--- /dev/null
+++ b/src/mips/register-allocator-mips.h
@@ -0,0 +1,46 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_MIPS_REGISTER_ALLOCATOR_MIPS_H_
+#define V8_MIPS_REGISTER_ALLOCATOR_MIPS_H_
+
+#include "mips/constants-mips.h"
+
+namespace v8 {
+namespace internal {
+
+class RegisterAllocatorConstants : public AllStatic {
+ public:
+  static const int kNumRegisters = assembler::mips::kNumRegisters;
+  static const int kInvalidRegister = assembler::mips::kInvalidRegister;
+};
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_MIPS_REGISTER_ALLOCATOR_MIPS_H_
+
diff --git a/src/mips/simulator-mips.cc b/src/mips/simulator-mips.cc
new file mode 100644
index 0000000..2e2dc86
--- /dev/null
+++ b/src/mips/simulator-mips.cc
@@ -0,0 +1,1648 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdlib.h>
+#include <cstdarg>
+#include "v8.h"
+
+#include "disasm.h"
+#include "assembler.h"
+#include "globals.h"    // Need the bit_cast
+#include "mips/constants-mips.h"
+#include "mips/simulator-mips.h"
+
+namespace v8i = v8::internal;
+
+#if !defined(__mips)
+
+// Only build the simulator if not compiling for real MIPS hardware.
+namespace assembler {
+namespace mips {
+
+using ::v8::internal::Object;
+using ::v8::internal::PrintF;
+using ::v8::internal::OS;
+using ::v8::internal::ReadLine;
+using ::v8::internal::DeleteArray;
+
+// Utils functions
+bool HaveSameSign(int32_t a, int32_t b) {
+  return ((a ^ b) > 0);
+}
+
+
+// This macro provides a platform independent use of sscanf. The reason for
+// SScanF not being implemented in a platform independent was through
+// ::v8::internal::OS in the same way as SNPrintF is that the Windows C Run-Time
+// Library does not provide vsscanf.
+#define SScanF sscanf  // NOLINT
+
+// The Debugger class is used by the simulator while debugging simulated MIPS
+// code.
+class Debugger {
+ public:
+  explicit Debugger(Simulator* sim);
+  ~Debugger();
+
+  void Stop(Instruction* instr);
+  void Debug();
+
+ private:
+  // We set the breakpoint code to 0xfffff to easily recognize it.
+  static const Instr kBreakpointInstr = SPECIAL | BREAK | 0xfffff << 6;
+  static const Instr kNopInstr =  0x0;
+
+  Simulator* sim_;
+
+  int32_t GetRegisterValue(int regnum);
+  bool GetValue(const char* desc, int32_t* value);
+
+  // Set or delete a breakpoint. Returns true if successful.
+  bool SetBreakpoint(Instruction* breakpc);
+  bool DeleteBreakpoint(Instruction* breakpc);
+
+  // Undo and redo all breakpoints. This is needed to bracket disassembly and
+  // execution to skip past breakpoints when run from the debugger.
+  void UndoBreakpoints();
+  void RedoBreakpoints();
+
+  // Print all registers with a nice formatting.
+  void PrintAllRegs();
+};
+
+Debugger::Debugger(Simulator* sim) {
+  sim_ = sim;
+}
+
+Debugger::~Debugger() {
+}
+
+#ifdef GENERATED_CODE_COVERAGE
+static FILE* coverage_log = NULL;
+
+
+static void InitializeCoverage() {
+  char* file_name = getenv("V8_GENERATED_CODE_COVERAGE_LOG");
+  if (file_name != NULL) {
+    coverage_log = fopen(file_name, "aw+");
+  }
+}
+
+
+void Debugger::Stop(Instruction* instr) {
+  UNIMPLEMENTED_MIPS();
+  char* str = reinterpret_cast<char*>(instr->InstructionBits());
+  if (strlen(str) > 0) {
+    if (coverage_log != NULL) {
+      fprintf(coverage_log, "%s\n", str);
+      fflush(coverage_log);
+    }
+    instr->SetInstructionBits(0x0);  // Overwrite with nop.
+  }
+  sim_->set_pc(sim_->get_pc() + Instruction::kInstructionSize);
+}
+
+#else  // ndef GENERATED_CODE_COVERAGE
+
+#define UNSUPPORTED() printf("Unsupported instruction.\n");
+
+static void InitializeCoverage() {}
+
+
+void Debugger::Stop(Instruction* instr) {
+  const char* str = reinterpret_cast<char*>(instr->InstructionBits());
+  PrintF("Simulator hit %s\n", str);
+  sim_->set_pc(sim_->get_pc() + Instruction::kInstructionSize);
+  Debug();
+}
+#endif  // def GENERATED_CODE_COVERAGE
+
+
+int32_t Debugger::GetRegisterValue(int regnum) {
+  if (regnum == kNumSimuRegisters) {
+    return sim_->get_pc();
+  } else {
+    return sim_->get_register(regnum);
+  }
+}
+
+
+bool Debugger::GetValue(const char* desc, int32_t* value) {
+  int regnum = Registers::Number(desc);
+  if (regnum != kInvalidRegister) {
+    *value = GetRegisterValue(regnum);
+    return true;
+  } else {
+    return SScanF(desc, "%i", value) == 1;
+  }
+  return false;
+}
+
+
+bool Debugger::SetBreakpoint(Instruction* breakpc) {
+  // Check if a breakpoint can be set. If not return without any side-effects.
+  if (sim_->break_pc_ != NULL) {
+    return false;
+  }
+
+  // Set the breakpoint.
+  sim_->break_pc_ = breakpc;
+  sim_->break_instr_ = breakpc->InstructionBits();
+  // Not setting the breakpoint instruction in the code itself. It will be set
+  // when the debugger shell continues.
+  return true;
+}
+
+
+bool Debugger::DeleteBreakpoint(Instruction* breakpc) {
+  if (sim_->break_pc_ != NULL) {
+    sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
+  }
+
+  sim_->break_pc_ = NULL;
+  sim_->break_instr_ = 0;
+  return true;
+}
+
+
+void Debugger::UndoBreakpoints() {
+  if (sim_->break_pc_ != NULL) {
+    sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
+  }
+}
+
+
+void Debugger::RedoBreakpoints() {
+  if (sim_->break_pc_ != NULL) {
+    sim_->break_pc_->SetInstructionBits(kBreakpointInstr);
+  }
+}
+
+void Debugger::PrintAllRegs() {
+#define REG_INFO(n) Registers::Name(n), GetRegisterValue(n), GetRegisterValue(n)
+
+  PrintF("\n");
+  // at, v0, a0
+  PrintF("%3s: 0x%08x %10d\t%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n",
+         REG_INFO(1), REG_INFO(2), REG_INFO(4));
+  // v1, a1
+  PrintF("%26s\t%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n",
+         "", REG_INFO(3), REG_INFO(5));
+  // a2
+  PrintF("%26s\t%26s\t%3s: 0x%08x %10d\n", "", "", REG_INFO(6));
+  // a3
+  PrintF("%26s\t%26s\t%3s: 0x%08x %10d\n", "", "", REG_INFO(7));
+  PrintF("\n");
+  // t0-t7, s0-s7
+  for (int i = 0; i < 8; i++) {
+    PrintF("%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n",
+           REG_INFO(8+i), REG_INFO(16+i));
+  }
+  PrintF("\n");
+  // t8, k0, LO
+  PrintF("%3s: 0x%08x %10d\t%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n",
+         REG_INFO(24), REG_INFO(26), REG_INFO(32));
+  // t9, k1, HI
+  PrintF("%3s: 0x%08x %10d\t%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n",
+         REG_INFO(25), REG_INFO(27), REG_INFO(33));
+  // sp, fp, gp
+  PrintF("%3s: 0x%08x %10d\t%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n",
+         REG_INFO(29), REG_INFO(30), REG_INFO(28));
+  // pc
+  PrintF("%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n",
+         REG_INFO(31), REG_INFO(34));
+#undef REG_INFO
+}
+
+void Debugger::Debug() {
+  intptr_t last_pc = -1;
+  bool done = false;
+
+#define COMMAND_SIZE 63
+#define ARG_SIZE 255
+
+#define STR(a) #a
+#define XSTR(a) STR(a)
+
+  char cmd[COMMAND_SIZE + 1];
+  char arg1[ARG_SIZE + 1];
+  char arg2[ARG_SIZE + 1];
+
+  // make sure to have a proper terminating character if reaching the limit
+  cmd[COMMAND_SIZE] = 0;
+  arg1[ARG_SIZE] = 0;
+  arg2[ARG_SIZE] = 0;
+
+  // Undo all set breakpoints while running in the debugger shell. This will
+  // make them invisible to all commands.
+  UndoBreakpoints();
+
+  while (!done && (sim_->get_pc() != Simulator::end_sim_pc)) {
+    if (last_pc != sim_->get_pc()) {
+      disasm::NameConverter converter;
+      disasm::Disassembler dasm(converter);
+      // use a reasonably large buffer
+      v8::internal::EmbeddedVector<char, 256> buffer;
+      dasm.InstructionDecode(buffer,
+                             reinterpret_cast<byte_*>(sim_->get_pc()));
+      PrintF("  0x%08x  %s\n", sim_->get_pc(), buffer.start());
+      last_pc = sim_->get_pc();
+    }
+    char* line = ReadLine("sim> ");
+    if (line == NULL) {
+      break;
+    } else {
+      // Use sscanf to parse the individual parts of the command line. At the
+      // moment no command expects more than two parameters.
+      int args = SScanF(line,
+                        "%" XSTR(COMMAND_SIZE) "s "
+                        "%" XSTR(ARG_SIZE) "s "
+                        "%" XSTR(ARG_SIZE) "s",
+                        cmd, arg1, arg2);
+      if ((strcmp(cmd, "si") == 0) || (strcmp(cmd, "stepi") == 0)) {
+        if (!(reinterpret_cast<Instruction*>(sim_->get_pc())->IsTrap())) {
+          sim_->InstructionDecode(
+                                reinterpret_cast<Instruction*>(sim_->get_pc()));
+        } else {
+          // Allow si to jump over generated breakpoints.
+          PrintF("/!\\ Jumping over generated breakpoint.\n");
+          sim_->set_pc(sim_->get_pc() + Instruction::kInstructionSize);
+        }
+      } else if ((strcmp(cmd, "c") == 0) || (strcmp(cmd, "cont") == 0)) {
+        // Execute the one instruction we broke at with breakpoints disabled.
+        sim_->InstructionDecode(reinterpret_cast<Instruction*>(sim_->get_pc()));
+        // Leave the debugger shell.
+        done = true;
+      } else if ((strcmp(cmd, "p") == 0) || (strcmp(cmd, "print") == 0)) {
+        if (args == 2) {
+          int32_t value;
+          if (strcmp(arg1, "all") == 0) {
+            PrintAllRegs();
+          } else {
+            if (GetValue(arg1, &value)) {
+              PrintF("%s: 0x%08x %d \n", arg1, value, value);
+            } else {
+              PrintF("%s unrecognized\n", arg1);
+            }
+          }
+        } else {
+          PrintF("print <register>\n");
+        }
+      } else if ((strcmp(cmd, "po") == 0)
+                 || (strcmp(cmd, "printobject") == 0)) {
+        if (args == 2) {
+          int32_t value;
+          if (GetValue(arg1, &value)) {
+            Object* obj = reinterpret_cast<Object*>(value);
+            PrintF("%s: \n", arg1);
+#ifdef DEBUG
+            obj->PrintLn();
+#else
+            obj->ShortPrint();
+            PrintF("\n");
+#endif
+          } else {
+            PrintF("%s unrecognized\n", arg1);
+          }
+        } else {
+          PrintF("printobject <value>\n");
+        }
+      } else if ((strcmp(cmd, "disasm") == 0) || (strcmp(cmd, "dpc") == 0)) {
+        disasm::NameConverter converter;
+        disasm::Disassembler dasm(converter);
+        // use a reasonably large buffer
+        v8::internal::EmbeddedVector<char, 256> buffer;
+
+        byte_* cur = NULL;
+        byte_* end = NULL;
+
+        if (args == 1) {
+          cur = reinterpret_cast<byte_*>(sim_->get_pc());
+          end = cur + (10 * Instruction::kInstructionSize);
+        } else if (args == 2) {
+          int32_t value;
+          if (GetValue(arg1, &value)) {
+            cur = reinterpret_cast<byte_*>(value);
+            // no length parameter passed, assume 10 instructions
+            end = cur + (10 * Instruction::kInstructionSize);
+          }
+        } else {
+          int32_t value1;
+          int32_t value2;
+          if (GetValue(arg1, &value1) && GetValue(arg2, &value2)) {
+            cur = reinterpret_cast<byte_*>(value1);
+            end = cur + (value2 * Instruction::kInstructionSize);
+          }
+        }
+
+        while (cur < end) {
+          dasm.InstructionDecode(buffer, cur);
+          PrintF("  0x%08x  %s\n", cur, buffer.start());
+          cur += Instruction::kInstructionSize;
+        }
+      } else if (strcmp(cmd, "gdb") == 0) {
+        PrintF("relinquishing control to gdb\n");
+        v8::internal::OS::DebugBreak();
+        PrintF("regaining control from gdb\n");
+      } else if (strcmp(cmd, "break") == 0) {
+        if (args == 2) {
+          int32_t value;
+          if (GetValue(arg1, &value)) {
+            if (!SetBreakpoint(reinterpret_cast<Instruction*>(value))) {
+              PrintF("setting breakpoint failed\n");
+            }
+          } else {
+            PrintF("%s unrecognized\n", arg1);
+          }
+        } else {
+          PrintF("break <address>\n");
+        }
+      } else if (strcmp(cmd, "del") == 0) {
+        if (!DeleteBreakpoint(NULL)) {
+          PrintF("deleting breakpoint failed\n");
+        }
+      } else if (strcmp(cmd, "flags") == 0) {
+        PrintF("No flags on MIPS !\n");
+      } else if (strcmp(cmd, "unstop") == 0) {
+          PrintF("Unstop command not implemented on MIPS.");
+      } else if ((strcmp(cmd, "stat") == 0) || (strcmp(cmd, "st") == 0)) {
+        // Print registers and disassemble
+        PrintAllRegs();
+        PrintF("\n");
+
+        disasm::NameConverter converter;
+        disasm::Disassembler dasm(converter);
+        // use a reasonably large buffer
+        v8::internal::EmbeddedVector<char, 256> buffer;
+
+        byte_* cur = NULL;
+        byte_* end = NULL;
+
+        if (args == 1) {
+          cur = reinterpret_cast<byte_*>(sim_->get_pc());
+          end = cur + (10 * Instruction::kInstructionSize);
+        } else if (args == 2) {
+          int32_t value;
+          if (GetValue(arg1, &value)) {
+            cur = reinterpret_cast<byte_*>(value);
+            // no length parameter passed, assume 10 instructions
+            end = cur + (10 * Instruction::kInstructionSize);
+          }
+        } else {
+          int32_t value1;
+          int32_t value2;
+          if (GetValue(arg1, &value1) && GetValue(arg2, &value2)) {
+            cur = reinterpret_cast<byte_*>(value1);
+            end = cur + (value2 * Instruction::kInstructionSize);
+          }
+        }
+
+        while (cur < end) {
+          dasm.InstructionDecode(buffer, cur);
+          PrintF("  0x%08x  %s\n", cur, buffer.start());
+          cur += Instruction::kInstructionSize;
+        }
+      } else if ((strcmp(cmd, "h") == 0) || (strcmp(cmd, "help") == 0)) {
+        PrintF("cont\n");
+        PrintF("  continue execution (alias 'c')\n");
+        PrintF("stepi\n");
+        PrintF("  step one instruction (alias 'si')\n");
+        PrintF("print <register>\n");
+        PrintF("  print register content (alias 'p')\n");
+        PrintF("  use register name 'all' to print all registers\n");
+        PrintF("printobject <register>\n");
+        PrintF("  print an object from a register (alias 'po')\n");
+        PrintF("flags\n");
+        PrintF("  print flags\n");
+        PrintF("disasm [<instructions>]\n");
+        PrintF("disasm [[<address>] <instructions>]\n");
+        PrintF("  disassemble code, default is 10 instructions from pc\n");
+        PrintF("gdb\n");
+        PrintF("  enter gdb\n");
+        PrintF("break <address>\n");
+        PrintF("  set a break point on the address\n");
+        PrintF("del\n");
+        PrintF("  delete the breakpoint\n");
+        PrintF("unstop\n");
+        PrintF("  ignore the stop instruction at the current location");
+        PrintF(" from now on\n");
+      } else {
+        PrintF("Unknown command: %s\n", cmd);
+      }
+    }
+    DeleteArray(line);
+  }
+
+  // Add all the breakpoints back to stop execution and enter the debugger
+  // shell when hit.
+  RedoBreakpoints();
+
+#undef COMMAND_SIZE
+#undef ARG_SIZE
+
+#undef STR
+#undef XSTR
+}
+
+
+// Create one simulator per thread and keep it in thread local storage.
+static v8::internal::Thread::LocalStorageKey simulator_key;
+
+
+bool Simulator::initialized_ = false;
+
+
+void Simulator::Initialize() {
+  if (initialized_) return;
+  simulator_key = v8::internal::Thread::CreateThreadLocalKey();
+  initialized_ = true;
+  ::v8::internal::ExternalReference::set_redirector(&RedirectExternalReference);
+}
+
+
+Simulator::Simulator() {
+  Initialize();
+  // Setup simulator support first. Some of this information is needed to
+  // setup the architecture state.
+  size_t stack_size = 1 * 1024*1024;  // allocate 1MB for stack
+  stack_ = reinterpret_cast<char*>(malloc(stack_size));
+  pc_modified_ = false;
+  icount_ = 0;
+  break_pc_ = NULL;
+  break_instr_ = 0;
+
+  // Setup architecture state.
+  // All registers are initialized to zero to start with.
+  for (int i = 0; i < kNumSimuRegisters; i++) {
+    registers_[i] = 0;
+  }
+
+  // The sp is initialized to point to the bottom (high address) of the
+  // allocated stack area. To be safe in potential stack underflows we leave
+  // some buffer below.
+  registers_[sp] = reinterpret_cast<int32_t>(stack_) + stack_size - 64;
+  // The ra and pc are initialized to a known bad value that will cause an
+  // access violation if the simulator ever tries to execute it.
+  registers_[pc] = bad_ra;
+  registers_[ra] = bad_ra;
+  InitializeCoverage();
+}
+
+
+// When the generated code calls an external reference we need to catch that in
+// the simulator.  The external reference will be a function compiled for the
+// host architecture.  We need to call that function instead of trying to
+// execute it with the simulator.  We do that by redirecting the external
+// reference to a swi (software-interrupt) instruction that is handled by
+// the simulator.  We write the original destination of the jump just at a known
+// offset from the swi instruction so the simulator knows what to call.
+class Redirection {
+ public:
+  Redirection(void* external_function, bool fp_return)
+      : external_function_(external_function),
+        swi_instruction_(rtCallRedirInstr),
+        fp_return_(fp_return),
+        next_(list_) {
+    list_ = this;
+  }
+
+  void* address_of_swi_instruction() {
+    return reinterpret_cast<void*>(&swi_instruction_);
+  }
+
+  void* external_function() { return external_function_; }
+  bool fp_return() { return fp_return_; }
+
+  static Redirection* Get(void* external_function, bool fp_return) {
+    Redirection* current;
+    for (current = list_; current != NULL; current = current->next_) {
+      if (current->external_function_ == external_function) return current;
+    }
+    return new Redirection(external_function, fp_return);
+  }
+
+  static Redirection* FromSwiInstruction(Instruction* swi_instruction) {
+    char* addr_of_swi = reinterpret_cast<char*>(swi_instruction);
+    char* addr_of_redirection =
+        addr_of_swi - OFFSET_OF(Redirection, swi_instruction_);
+    return reinterpret_cast<Redirection*>(addr_of_redirection);
+  }
+
+ private:
+  void* external_function_;
+  uint32_t swi_instruction_;
+  bool fp_return_;
+  Redirection* next_;
+  static Redirection* list_;
+};
+
+
+Redirection* Redirection::list_ = NULL;
+
+
+void* Simulator::RedirectExternalReference(void* external_function,
+                                           bool fp_return) {
+  Redirection* redirection = Redirection::Get(external_function, fp_return);
+  return redirection->address_of_swi_instruction();
+}
+
+
+// Get the active Simulator for the current thread.
+Simulator* Simulator::current() {
+  Initialize();
+  Simulator* sim = reinterpret_cast<Simulator*>(
+      v8::internal::Thread::GetThreadLocal(simulator_key));
+  if (sim == NULL) {
+    // TODO(146): delete the simulator object when a thread goes away.
+    sim = new Simulator();
+    v8::internal::Thread::SetThreadLocal(simulator_key, sim);
+  }
+  return sim;
+}
+
+
+// Sets the register in the architecture state. It will also deal with updating
+// Simulator internal state for special registers such as PC.
+void Simulator::set_register(int reg, int32_t value) {
+  ASSERT((reg >= 0) && (reg < kNumSimuRegisters));
+  if (reg == pc) {
+    pc_modified_ = true;
+  }
+
+  // zero register always hold 0.
+  registers_[reg] = (reg == 0) ? 0 : value;
+}
+
+void Simulator::set_fpu_register(int fpureg, int32_t value) {
+  ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters));
+  FPUregisters_[fpureg] = value;
+}
+
+void Simulator::set_fpu_register_double(int fpureg, double value) {
+  ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters) && ((fpureg % 2) == 0));
+  *v8i::bit_cast<double*, int32_t*>(&FPUregisters_[fpureg]) = value;
+}
+
+
+// Get the register from the architecture state. This function does handle
+// the special case of accessing the PC register.
+int32_t Simulator::get_register(int reg) const {
+  ASSERT((reg >= 0) && (reg < kNumSimuRegisters));
+  if (reg == 0)
+    return 0;
+  else
+    return registers_[reg] + ((reg == pc) ? Instruction::kPCReadOffset : 0);
+}
+
+int32_t Simulator::get_fpu_register(int fpureg) const {
+  ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters));
+  return FPUregisters_[fpureg];
+}
+
+double Simulator::get_fpu_register_double(int fpureg) const {
+  ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters) && ((fpureg % 2) == 0));
+  return *v8i::bit_cast<double*, int32_t*>(
+      const_cast<int32_t*>(&FPUregisters_[fpureg]));
+}
+
+// Raw access to the PC register.
+void Simulator::set_pc(int32_t value) {
+  pc_modified_ = true;
+  registers_[pc] = value;
+}
+
+// Raw access to the PC register without the special adjustment when reading.
+int32_t Simulator::get_pc() const {
+  return registers_[pc];
+}
+
+
+// The MIPS cannot do unaligned reads and writes.  On some MIPS platforms an
+// interrupt is caused.  On others it does a funky rotation thing.  For now we
+// simply disallow unaligned reads, but at some point we may want to move to
+// emulating the rotate behaviour.  Note that simulator runs have the runtime
+// system running directly on the host system and only generated code is
+// executed in the simulator.  Since the host is typically IA32 we will not
+// get the correct MIPS-like behaviour on unaligned accesses.
+
+int Simulator::ReadW(int32_t addr, Instruction* instr) {
+  if ((addr & v8i::kPointerAlignmentMask) == 0) {
+    intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
+    return *ptr;
+  }
+  PrintF("Unaligned read at 0x%08x, pc=%p\n", addr, instr);
+  OS::Abort();
+  return 0;
+}
+
+
+void Simulator::WriteW(int32_t addr, int value, Instruction* instr) {
+  if ((addr & v8i::kPointerAlignmentMask) == 0) {
+    intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
+    *ptr = value;
+    return;
+  }
+  PrintF("Unaligned write at 0x%08x, pc=%p\n", addr, instr);
+  OS::Abort();
+}
+
+
+double Simulator::ReadD(int32_t addr, Instruction* instr) {
+  if ((addr & kDoubleAlignmentMask) == 0) {
+    double* ptr = reinterpret_cast<double*>(addr);
+    return *ptr;
+  }
+  PrintF("Unaligned read at 0x%08x, pc=%p\n", addr, instr);
+  OS::Abort();
+  return 0;
+}
+
+
+void Simulator::WriteD(int32_t addr, double value, Instruction* instr) {
+  if ((addr & kDoubleAlignmentMask) == 0) {
+    double* ptr = reinterpret_cast<double*>(addr);
+    *ptr = value;
+    return;
+  }
+  PrintF("Unaligned write at 0x%08x, pc=%p\n", addr, instr);
+  OS::Abort();
+}
+
+
+uint16_t Simulator::ReadHU(int32_t addr, Instruction* instr) {
+  if ((addr & 1) == 0) {
+    uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
+    return *ptr;
+  }
+  PrintF("Unaligned unsigned halfword read at 0x%08x, pc=%p\n", addr, instr);
+  OS::Abort();
+  return 0;
+}
+
+
+int16_t Simulator::ReadH(int32_t addr, Instruction* instr) {
+  if ((addr & 1) == 0) {
+    int16_t* ptr = reinterpret_cast<int16_t*>(addr);
+    return *ptr;
+  }
+  PrintF("Unaligned signed halfword read at 0x%08x, pc=%p\n", addr, instr);
+  OS::Abort();
+  return 0;
+}
+
+
+void Simulator::WriteH(int32_t addr, uint16_t value, Instruction* instr) {
+  if ((addr & 1) == 0) {
+    uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
+    *ptr = value;
+    return;
+  }
+  PrintF("Unaligned unsigned halfword write at 0x%08x, pc=%p\n", addr, instr);
+  OS::Abort();
+}
+
+
+void Simulator::WriteH(int32_t addr, int16_t value, Instruction* instr) {
+  if ((addr & 1) == 0) {
+    int16_t* ptr = reinterpret_cast<int16_t*>(addr);
+    *ptr = value;
+    return;
+  }
+  PrintF("Unaligned halfword write at 0x%08x, pc=%p\n", addr, instr);
+  OS::Abort();
+}
+
+
+uint32_t Simulator::ReadBU(int32_t addr) {
+  uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
+  return *ptr & 0xff;
+}
+
+
+int32_t Simulator::ReadB(int32_t addr) {
+  int8_t* ptr = reinterpret_cast<int8_t*>(addr);
+  return ((*ptr << 24) >> 24) & 0xff;
+}
+
+
+void Simulator::WriteB(int32_t addr, uint8_t value) {
+  uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
+  *ptr = value;
+}
+
+
+void Simulator::WriteB(int32_t addr, int8_t value) {
+  int8_t* ptr = reinterpret_cast<int8_t*>(addr);
+  *ptr = value;
+}
+
+
+// Returns the limit of the stack area to enable checking for stack overflows.
+uintptr_t Simulator::StackLimit() const {
+  // Leave a safety margin of 256 bytes to prevent overrunning the stack when
+  // pushing values.
+  return reinterpret_cast<uintptr_t>(stack_) + 256;
+}
+
+
+// Unsupported instructions use Format to print an error and stop execution.
+void Simulator::Format(Instruction* instr, const char* format) {
+  PrintF("Simulator found unsupported instruction:\n 0x%08x: %s\n",
+         instr, format);
+  UNIMPLEMENTED_MIPS();
+}
+
+
+// Calls into the V8 runtime are based on this very simple interface.
+// Note: To be able to return two values from some calls the code in runtime.cc
+// uses the ObjectPair which is essentially two 32-bit values stuffed into a
+// 64-bit value. With the code below we assume that all runtime calls return
+// 64 bits of result. If they don't, the r1 result register contains a bogus
+// value, which is fine because it is caller-saved.
+typedef int64_t (*SimulatorRuntimeCall)(int32_t arg0,
+                                        int32_t arg1,
+                                        int32_t arg2,
+                                        int32_t arg3);
+typedef double (*SimulatorRuntimeFPCall)(double fparg0,
+                                         double fparg1);
+
+
+// Software interrupt instructions are used by the simulator to call into the
+// C-based V8 runtime.
+void Simulator::SoftwareInterrupt(Instruction* instr) {
+  // We first check if we met a call_rt_redirected.
+  if (instr->InstructionBits() == rtCallRedirInstr) {
+    Redirection* redirection = Redirection::FromSwiInstruction(instr);
+    int32_t arg0 = get_register(a0);
+    int32_t arg1 = get_register(a1);
+    int32_t arg2 = get_register(a2);
+    int32_t arg3 = get_register(a3);
+    // fp args are (not always) in f12 and f14.
+    // See MIPS conventions for more details.
+    double fparg0 = get_fpu_register_double(f12);
+    double fparg1 = get_fpu_register_double(f14);
+    // This is dodgy but it works because the C entry stubs are never moved.
+    // See comment in codegen-arm.cc and bug 1242173.
+    int32_t saved_ra = get_register(ra);
+    if (redirection->fp_return()) {
+      intptr_t external =
+          reinterpret_cast<intptr_t>(redirection->external_function());
+      SimulatorRuntimeFPCall target =
+          reinterpret_cast<SimulatorRuntimeFPCall>(external);
+      if (::v8::internal::FLAG_trace_sim) {
+        PrintF("Call to host function at %p with args %f, %f\n",
+               FUNCTION_ADDR(target), fparg0, fparg1);
+      }
+      double result = target(fparg0, fparg1);
+      set_fpu_register_double(f0, result);
+    } else {
+      intptr_t external =
+          reinterpret_cast<int32_t>(redirection->external_function());
+      SimulatorRuntimeCall target =
+          reinterpret_cast<SimulatorRuntimeCall>(external);
+      if (::v8::internal::FLAG_trace_sim) {
+        PrintF(
+            "Call to host function at %p with args %08x, %08x, %08x, %08x\n",
+            FUNCTION_ADDR(target),
+            arg0,
+            arg1,
+            arg2,
+            arg3);
+      }
+      int64_t result = target(arg0, arg1, arg2, arg3);
+      int32_t lo_res = static_cast<int32_t>(result);
+      int32_t hi_res = static_cast<int32_t>(result >> 32);
+      if (::v8::internal::FLAG_trace_sim) {
+        PrintF("Returned %08x\n", lo_res);
+      }
+      set_register(v0, lo_res);
+      set_register(v1, hi_res);
+    }
+    set_register(ra, saved_ra);
+    set_pc(get_register(ra));
+  } else {
+    Debugger dbg(this);
+    dbg.Debug();
+  }
+}
+
+void Simulator::SignalExceptions() {
+  for (int i = 1; i < kNumExceptions; i++) {
+    if (exceptions[i] != 0) {
+      V8_Fatal(__FILE__, __LINE__, "Error: Exception %i raised.", i);
+    }
+  }
+}
+
+// Handle execution based on instruction types.
+void Simulator::DecodeTypeRegister(Instruction* instr) {
+  // Instruction fields
+  Opcode   op     = instr->OpcodeFieldRaw();
+  int32_t  rs_reg = instr->RsField();
+  int32_t  rs     = get_register(rs_reg);
+  uint32_t rs_u   = static_cast<uint32_t>(rs);
+  int32_t  rt_reg = instr->RtField();
+  int32_t  rt     = get_register(rt_reg);
+  uint32_t rt_u   = static_cast<uint32_t>(rt);
+  int32_t  rd_reg = instr->RdField();
+  uint32_t sa     = instr->SaField();
+
+  int32_t fs_reg= instr->FsField();
+
+  // ALU output
+  // It should not be used as is. Instructions using it should always initialize
+  // it first.
+  int32_t alu_out = 0x12345678;
+  // Output or temporary for floating point.
+  double fp_out = 0.0;
+
+  // For break and trap instructions.
+  bool do_interrupt = false;
+
+  // For jr and jalr
+  // Get current pc.
+  int32_t current_pc = get_pc();
+  // Next pc
+  int32_t next_pc = 0;
+
+  // ---------- Configuration
+  switch (op) {
+    case COP1:    // Coprocessor instructions
+      switch (instr->RsFieldRaw()) {
+        case BC1:   // branch on coprocessor condition
+          UNREACHABLE();
+          break;
+        case MFC1:
+          alu_out = get_fpu_register(fs_reg);
+          break;
+        case MFHC1:
+          fp_out = get_fpu_register_double(fs_reg);
+          alu_out = *v8i::bit_cast<int32_t*, double*>(&fp_out);
+          break;
+        case MTC1:
+        case MTHC1:
+          // Do the store in the execution step.
+          break;
+        case S:
+        case D:
+        case W:
+        case L:
+        case PS:
+          // Do everything in the execution step.
+          break;
+        default:
+          UNIMPLEMENTED_MIPS();
+      };
+      break;
+    case SPECIAL:
+      switch (instr->FunctionFieldRaw()) {
+        case JR:
+        case JALR:
+          next_pc = get_register(instr->RsField());
+          break;
+        case SLL:
+          alu_out = rt << sa;
+          break;
+        case SRL:
+          alu_out = rt_u >> sa;
+          break;
+        case SRA:
+          alu_out = rt >> sa;
+          break;
+        case SLLV:
+          alu_out = rt << rs;
+          break;
+        case SRLV:
+          alu_out = rt_u >> rs;
+          break;
+        case SRAV:
+          alu_out = rt >> rs;
+          break;
+        case MFHI:
+          alu_out = get_register(HI);
+          break;
+        case MFLO:
+          alu_out = get_register(LO);
+          break;
+        case MULT:
+          UNIMPLEMENTED_MIPS();
+          break;
+        case MULTU:
+          UNIMPLEMENTED_MIPS();
+          break;
+        case DIV:
+        case DIVU:
+            exceptions[kDivideByZero] = rt == 0;
+          break;
+        case ADD:
+          if (HaveSameSign(rs, rt)) {
+            if (rs > 0) {
+              exceptions[kIntegerOverflow] = rs > (Registers::kMaxValue - rt);
+            } else if (rs < 0) {
+              exceptions[kIntegerUnderflow] = rs < (Registers::kMinValue - rt);
+            }
+          }
+          alu_out = rs + rt;
+          break;
+        case ADDU:
+          alu_out = rs + rt;
+          break;
+        case SUB:
+          if (!HaveSameSign(rs, rt)) {
+            if (rs > 0) {
+              exceptions[kIntegerOverflow] = rs > (Registers::kMaxValue + rt);
+            } else if (rs < 0) {
+              exceptions[kIntegerUnderflow] = rs < (Registers::kMinValue + rt);
+            }
+          }
+          alu_out = rs - rt;
+          break;
+        case SUBU:
+          alu_out = rs - rt;
+          break;
+        case AND:
+          alu_out = rs & rt;
+          break;
+        case OR:
+          alu_out = rs | rt;
+          break;
+        case XOR:
+          alu_out = rs ^ rt;
+          break;
+        case NOR:
+          alu_out = ~(rs | rt);
+          break;
+        case SLT:
+          alu_out = rs < rt ? 1 : 0;
+          break;
+        case SLTU:
+          alu_out = rs_u < rt_u ? 1 : 0;
+          break;
+        // Break and trap instructions
+        case BREAK:
+          do_interrupt = true;
+          break;
+        case TGE:
+          do_interrupt = rs >= rt;
+          break;
+        case TGEU:
+          do_interrupt = rs_u >= rt_u;
+          break;
+        case TLT:
+          do_interrupt = rs < rt;
+          break;
+        case TLTU:
+          do_interrupt = rs_u < rt_u;
+          break;
+        case TEQ:
+          do_interrupt = rs == rt;
+          break;
+        case TNE:
+          do_interrupt = rs != rt;
+          break;
+        default:
+          UNREACHABLE();
+      };
+      break;
+    case SPECIAL2:
+      switch (instr->FunctionFieldRaw()) {
+        case MUL:
+          alu_out = rs_u * rt_u;  // Only the lower 32 bits are kept.
+          break;
+        default:
+          UNREACHABLE();
+      }
+      break;
+    default:
+      UNREACHABLE();
+  };
+
+  // ---------- Raise exceptions triggered.
+  SignalExceptions();
+
+  // ---------- Execution
+  switch (op) {
+    case COP1:
+      switch (instr->RsFieldRaw()) {
+        case BC1:   // branch on coprocessor condition
+          UNREACHABLE();
+          break;
+        case MFC1:
+        case MFHC1:
+          set_register(rt_reg, alu_out);
+          break;
+        case MTC1:
+          // We don't need to set the higher bits to 0, because MIPS ISA says
+          // they are in an unpredictable state after executing MTC1.
+          FPUregisters_[fs_reg] = registers_[rt_reg];
+          FPUregisters_[fs_reg+1] = Unpredictable;
+          break;
+        case MTHC1:
+          // Here we need to keep the lower bits unchanged.
+          FPUregisters_[fs_reg+1] = registers_[rt_reg];
+          break;
+        case S:
+          switch (instr->FunctionFieldRaw()) {
+            case CVT_D_S:
+            case CVT_W_S:
+            case CVT_L_S:
+            case CVT_PS_S:
+              UNIMPLEMENTED_MIPS();
+              break;
+            default:
+              UNREACHABLE();
+          }
+          break;
+        case D:
+          switch (instr->FunctionFieldRaw()) {
+            case CVT_S_D:
+            case CVT_W_D:
+            case CVT_L_D:
+              UNIMPLEMENTED_MIPS();
+              break;
+            default:
+              UNREACHABLE();
+          }
+          break;
+        case W:
+          switch (instr->FunctionFieldRaw()) {
+            case CVT_S_W:
+              UNIMPLEMENTED_MIPS();
+              break;
+            case CVT_D_W:   // Convert word to double.
+              set_fpu_register(rd_reg, static_cast<double>(rs));
+              break;
+            default:
+              UNREACHABLE();
+          };
+          break;
+        case L:
+          switch (instr->FunctionFieldRaw()) {
+            case CVT_S_L:
+            case CVT_D_L:
+              UNIMPLEMENTED_MIPS();
+              break;
+            default:
+              UNREACHABLE();
+          }
+          break;
+        case PS:
+          break;
+        default:
+          UNREACHABLE();
+      };
+      break;
+    case SPECIAL:
+      switch (instr->FunctionFieldRaw()) {
+        case JR: {
+          Instruction* branch_delay_instr = reinterpret_cast<Instruction*>(
+              current_pc+Instruction::kInstructionSize);
+          BranchDelayInstructionDecode(branch_delay_instr);
+          set_pc(next_pc);
+          pc_modified_ = true;
+          break;
+        }
+        case JALR: {
+          Instruction* branch_delay_instr = reinterpret_cast<Instruction*>(
+              current_pc+Instruction::kInstructionSize);
+          BranchDelayInstructionDecode(branch_delay_instr);
+          set_register(31, current_pc + 2* Instruction::kInstructionSize);
+          set_pc(next_pc);
+          pc_modified_ = true;
+          break;
+        }
+        // Instructions using HI and LO registers.
+        case MULT:
+        case MULTU:
+          break;
+        case DIV:
+          // Divide by zero was checked in the configuration step.
+          set_register(LO, rs / rt);
+          set_register(HI, rs % rt);
+          break;
+        case DIVU:
+          set_register(LO, rs_u / rt_u);
+          set_register(HI, rs_u % rt_u);
+          break;
+        // Break and trap instructions
+        case BREAK:
+        case TGE:
+        case TGEU:
+        case TLT:
+        case TLTU:
+        case TEQ:
+        case TNE:
+          if (do_interrupt) {
+            SoftwareInterrupt(instr);
+          }
+          break;
+        default:  // For other special opcodes we do the default operation.
+          set_register(rd_reg, alu_out);
+      };
+      break;
+    case SPECIAL2:
+      switch (instr->FunctionFieldRaw()) {
+        case MUL:
+          set_register(rd_reg, alu_out);
+          // HI and LO are UNPREDICTABLE after the operation.
+          set_register(LO, Unpredictable);
+          set_register(HI, Unpredictable);
+          break;
+        default:
+          UNREACHABLE();
+      }
+      break;
+    // Unimplemented opcodes raised an error in the configuration step before,
+    // so we can use the default here to set the destination register in common
+    // cases.
+    default:
+      set_register(rd_reg, alu_out);
+  };
+}
+
+// Type 2: instructions using a 16 bytes immediate. (eg: addi, beq)
+void Simulator::DecodeTypeImmediate(Instruction* instr) {
+  // Instruction fields
+  Opcode   op     = instr->OpcodeFieldRaw();
+  int32_t  rs     = get_register(instr->RsField());
+  uint32_t rs_u   = static_cast<uint32_t>(rs);
+  int32_t  rt_reg = instr->RtField();  // destination register
+  int32_t  rt     = get_register(rt_reg);
+  int16_t  imm16  = instr->Imm16Field();
+
+  int32_t  ft_reg = instr->FtField();  // destination register
+  int32_t  ft     = get_register(ft_reg);
+
+  // zero extended immediate
+  uint32_t  oe_imm16 = 0xffff & imm16;
+  // sign extended immediate
+  int32_t   se_imm16 = imm16;
+
+  // Get current pc.
+  int32_t current_pc = get_pc();
+  // Next pc.
+  int32_t next_pc = bad_ra;
+
+  // Used for conditional branch instructions
+  bool do_branch = false;
+  bool execute_branch_delay_instruction = false;
+
+  // Used for arithmetic instructions
+  int32_t alu_out = 0;
+  // Floating point
+  double fp_out = 0.0;
+
+  // Used for memory instructions
+  int32_t addr = 0x0;
+
+  // ---------- Configuration (and execution for REGIMM)
+  switch (op) {
+    // ------------- COP1. Coprocessor instructions
+    case COP1:
+      switch (instr->RsFieldRaw()) {
+        case BC1:   // branch on coprocessor condition
+          UNIMPLEMENTED_MIPS();
+          break;
+        default:
+          UNREACHABLE();
+      };
+      break;
+    // ------------- REGIMM class
+    case REGIMM:
+      switch (instr->RtFieldRaw()) {
+        case BLTZ:
+          do_branch = (rs  < 0);
+          break;
+        case BLTZAL:
+          do_branch = rs  < 0;
+          break;
+        case BGEZ:
+          do_branch = rs >= 0;
+          break;
+        case BGEZAL:
+          do_branch = rs >= 0;
+          break;
+        default:
+          UNREACHABLE();
+      };
+      switch (instr->RtFieldRaw()) {
+        case BLTZ:
+        case BLTZAL:
+        case BGEZ:
+        case BGEZAL:
+          // Branch instructions common part.
+          execute_branch_delay_instruction = true;
+          // Set next_pc
+          if (do_branch) {
+            next_pc = current_pc + (imm16 << 2) + Instruction::kInstructionSize;
+            if (instr->IsLinkingInstruction()) {
+              set_register(31, current_pc + kBranchReturnOffset);
+            }
+          } else {
+            next_pc = current_pc + kBranchReturnOffset;
+          }
+        default:
+          break;
+        };
+    break;  // case REGIMM
+    // ------------- Branch instructions
+    // When comparing to zero, the encoding of rt field is always 0, so we don't
+    // need to replace rt with zero.
+    case BEQ:
+      do_branch = (rs == rt);
+      break;
+    case BNE:
+      do_branch = rs != rt;
+      break;
+    case BLEZ:
+      do_branch = rs <= 0;
+      break;
+    case BGTZ:
+      do_branch = rs  > 0;
+      break;
+    // ------------- Arithmetic instructions
+    case ADDI:
+      if (HaveSameSign(rs, se_imm16)) {
+        if (rs > 0) {
+          exceptions[kIntegerOverflow] = rs > (Registers::kMaxValue - se_imm16);
+        } else if (rs < 0) {
+          exceptions[kIntegerUnderflow] =
+              rs < (Registers::kMinValue - se_imm16);
+        }
+      }
+      alu_out = rs + se_imm16;
+      break;
+    case ADDIU:
+      alu_out = rs + se_imm16;
+      break;
+    case SLTI:
+      alu_out = (rs < se_imm16) ? 1 : 0;
+      break;
+    case SLTIU:
+      alu_out = (rs_u < static_cast<uint32_t>(se_imm16)) ? 1 : 0;
+      break;
+    case ANDI:
+        alu_out = rs & oe_imm16;
+      break;
+    case ORI:
+        alu_out = rs | oe_imm16;
+      break;
+    case XORI:
+        alu_out = rs ^ oe_imm16;
+      break;
+    case LUI:
+        alu_out = (oe_imm16 << 16);
+      break;
+    // ------------- Memory instructions
+    case LB:
+      addr = rs + se_imm16;
+      alu_out = ReadB(addr);
+      break;
+    case LW:
+      addr = rs + se_imm16;
+      alu_out = ReadW(addr, instr);
+      break;
+    case LBU:
+      addr = rs + se_imm16;
+      alu_out = ReadBU(addr);
+      break;
+    case SB:
+      addr = rs + se_imm16;
+      break;
+    case SW:
+      addr = rs + se_imm16;
+      break;
+    case LWC1:
+      addr = rs + se_imm16;
+      alu_out = ReadW(addr, instr);
+      break;
+    case LDC1:
+      addr = rs + se_imm16;
+      fp_out = ReadD(addr, instr);
+      break;
+    case SWC1:
+    case SDC1:
+      addr = rs + se_imm16;
+      break;
+    default:
+      UNREACHABLE();
+  };
+
+  // ---------- Raise exceptions triggered.
+  SignalExceptions();
+
+  // ---------- Execution
+  switch (op) {
+    // ------------- Branch instructions
+    case BEQ:
+    case BNE:
+    case BLEZ:
+    case BGTZ:
+      // Branch instructions common part.
+      execute_branch_delay_instruction = true;
+      // Set next_pc
+      if (do_branch) {
+        next_pc = current_pc + (imm16 << 2) + Instruction::kInstructionSize;
+        if (instr->IsLinkingInstruction()) {
+          set_register(31, current_pc + 2* Instruction::kInstructionSize);
+        }
+      } else {
+        next_pc = current_pc + 2 * Instruction::kInstructionSize;
+      }
+      break;
+    // ------------- Arithmetic instructions
+    case ADDI:
+    case ADDIU:
+    case SLTI:
+    case SLTIU:
+    case ANDI:
+    case ORI:
+    case XORI:
+    case LUI:
+      set_register(rt_reg, alu_out);
+      break;
+    // ------------- Memory instructions
+    case LB:
+    case LW:
+    case LBU:
+      set_register(rt_reg, alu_out);
+      break;
+    case SB:
+      WriteB(addr, static_cast<int8_t>(rt));
+      break;
+    case SW:
+      WriteW(addr, rt, instr);
+      break;
+    case LWC1:
+      set_fpu_register(ft_reg, alu_out);
+      break;
+    case LDC1:
+      set_fpu_register_double(ft_reg, fp_out);
+      break;
+    case SWC1:
+      addr = rs + se_imm16;
+      WriteW(addr, get_fpu_register(ft_reg), instr);
+      break;
+    case SDC1:
+      addr = rs + se_imm16;
+      WriteD(addr, ft, instr);
+      break;
+    default:
+      break;
+  };
+
+
+  if (execute_branch_delay_instruction) {
+    // Execute branch delay slot
+    // We don't check for end_sim_pc. First it should not be met as the current
+    // pc is valid. Secondly a jump should always execute its branch delay slot.
+    Instruction* branch_delay_instr =
+      reinterpret_cast<Instruction*>(current_pc+Instruction::kInstructionSize);
+    BranchDelayInstructionDecode(branch_delay_instr);
+  }
+
+  // If needed update pc after the branch delay execution.
+  if (next_pc != bad_ra) {
+    set_pc(next_pc);
+  }
+}
+
+// Type 3: instructions using a 26 bytes immediate. (eg: j, jal)
+void Simulator::DecodeTypeJump(Instruction* instr) {
+  // Get current pc.
+  int32_t current_pc = get_pc();
+  // Get unchanged bits of pc.
+  int32_t pc_high_bits = current_pc & 0xf0000000;
+  // Next pc
+  int32_t next_pc = pc_high_bits | (instr->Imm26Field() << 2);
+
+  // Execute branch delay slot
+  // We don't check for end_sim_pc. First it should not be met as the current pc
+  // is valid. Secondly a jump should always execute its branch delay slot.
+  Instruction* branch_delay_instr =
+    reinterpret_cast<Instruction*>(current_pc+Instruction::kInstructionSize);
+  BranchDelayInstructionDecode(branch_delay_instr);
+
+  // Update pc and ra if necessary.
+  // Do this after the branch delay execution.
+  if (instr->IsLinkingInstruction()) {
+    set_register(31, current_pc + 2* Instruction::kInstructionSize);
+  }
+  set_pc(next_pc);
+  pc_modified_ = true;
+}
+
+// Executes the current instruction.
+void Simulator::InstructionDecode(Instruction* instr) {
+  pc_modified_ = false;
+  if (::v8::internal::FLAG_trace_sim) {
+    disasm::NameConverter converter;
+    disasm::Disassembler dasm(converter);
+    // use a reasonably large buffer
+    v8::internal::EmbeddedVector<char, 256> buffer;
+    dasm.InstructionDecode(buffer,
+                           reinterpret_cast<byte_*>(instr));
+    PrintF("  0x%08x  %s\n", instr, buffer.start());
+  }
+
+  switch (instr->InstructionType()) {
+    case Instruction::kRegisterType:
+      DecodeTypeRegister(instr);
+      break;
+    case Instruction::kImmediateType:
+      DecodeTypeImmediate(instr);
+      break;
+    case Instruction::kJumpType:
+      DecodeTypeJump(instr);
+      break;
+    default:
+      UNSUPPORTED();
+  }
+  if (!pc_modified_) {
+    set_register(pc, reinterpret_cast<int32_t>(instr) +
+                 Instruction::kInstructionSize);
+  }
+}
+
+
+
+void Simulator::Execute() {
+  // Get the PC to simulate. Cannot use the accessor here as we need the
+  // raw PC value and not the one used as input to arithmetic instructions.
+  int program_counter = get_pc();
+  if (::v8::internal::FLAG_stop_sim_at == 0) {
+    // Fast version of the dispatch loop without checking whether the simulator
+    // should be stopping at a particular executed instruction.
+    while (program_counter != end_sim_pc) {
+      Instruction* instr = reinterpret_cast<Instruction*>(program_counter);
+      icount_++;
+      InstructionDecode(instr);
+      program_counter = get_pc();
+    }
+  } else {
+    // FLAG_stop_sim_at is at the non-default value. Stop in the debugger when
+    // we reach the particular instuction count.
+    while (program_counter != end_sim_pc) {
+      Instruction* instr = reinterpret_cast<Instruction*>(program_counter);
+      icount_++;
+      if (icount_ == ::v8::internal::FLAG_stop_sim_at) {
+        Debugger dbg(this);
+        dbg.Debug();
+      } else {
+        InstructionDecode(instr);
+      }
+      program_counter = get_pc();
+    }
+  }
+}
+
+
+int32_t Simulator::Call(byte_* entry, int argument_count, ...) {
+  va_list parameters;
+  va_start(parameters, argument_count);
+  // Setup arguments
+
+  // First four arguments passed in registers.
+  ASSERT(argument_count >= 4);
+  set_register(a0, va_arg(parameters, int32_t));
+  set_register(a1, va_arg(parameters, int32_t));
+  set_register(a2, va_arg(parameters, int32_t));
+  set_register(a3, va_arg(parameters, int32_t));
+
+  // Remaining arguments passed on stack.
+  int original_stack = get_register(sp);
+  // Compute position of stack on entry to generated code.
+  int entry_stack = (original_stack - (argument_count - 4) * sizeof(int32_t)
+                                    - kArgsSlotsSize);
+  if (OS::ActivationFrameAlignment() != 0) {
+    entry_stack &= -OS::ActivationFrameAlignment();
+  }
+  // Store remaining arguments on stack, from low to high memory.
+  intptr_t* stack_argument = reinterpret_cast<intptr_t*>(entry_stack);
+  for (int i = 4; i < argument_count; i++) {
+    stack_argument[i - 4 + kArgsSlotsNum] = va_arg(parameters, int32_t);
+  }
+  va_end(parameters);
+  set_register(sp, entry_stack);
+
+  // Prepare to execute the code at entry
+  set_register(pc, reinterpret_cast<int32_t>(entry));
+  // Put down marker for end of simulation. The simulator will stop simulation
+  // when the PC reaches this value. By saving the "end simulation" value into
+  // the LR the simulation stops when returning to this call point.
+  set_register(ra, end_sim_pc);
+
+  // Remember the values of callee-saved registers.
+  // The code below assumes that r9 is not used as sb (static base) in
+  // simulator code and therefore is regarded as a callee-saved register.
+  int32_t s0_val = get_register(s0);
+  int32_t s1_val = get_register(s1);
+  int32_t s2_val = get_register(s2);
+  int32_t s3_val = get_register(s3);
+  int32_t s4_val = get_register(s4);
+  int32_t s5_val = get_register(s5);
+  int32_t s6_val = get_register(s6);
+  int32_t s7_val = get_register(s7);
+  int32_t gp_val = get_register(gp);
+  int32_t sp_val = get_register(sp);
+  int32_t fp_val = get_register(fp);
+
+  // Setup the callee-saved registers with a known value. To be able to check
+  // that they are preserved properly across JS execution.
+  int32_t callee_saved_value = icount_;
+  set_register(s0, callee_saved_value);
+  set_register(s1, callee_saved_value);
+  set_register(s2, callee_saved_value);
+  set_register(s3, callee_saved_value);
+  set_register(s4, callee_saved_value);
+  set_register(s5, callee_saved_value);
+  set_register(s6, callee_saved_value);
+  set_register(s7, callee_saved_value);
+  set_register(gp, callee_saved_value);
+  set_register(fp, callee_saved_value);
+
+  // Start the simulation
+  Execute();
+
+  // Check that the callee-saved registers have been preserved.
+  CHECK_EQ(callee_saved_value, get_register(s0));
+  CHECK_EQ(callee_saved_value, get_register(s1));
+  CHECK_EQ(callee_saved_value, get_register(s2));
+  CHECK_EQ(callee_saved_value, get_register(s3));
+  CHECK_EQ(callee_saved_value, get_register(s4));
+  CHECK_EQ(callee_saved_value, get_register(s5));
+  CHECK_EQ(callee_saved_value, get_register(s6));
+  CHECK_EQ(callee_saved_value, get_register(s7));
+  CHECK_EQ(callee_saved_value, get_register(gp));
+  CHECK_EQ(callee_saved_value, get_register(fp));
+
+  // Restore callee-saved registers with the original value.
+  set_register(s0, s0_val);
+  set_register(s1, s1_val);
+  set_register(s2, s2_val);
+  set_register(s3, s3_val);
+  set_register(s4, s4_val);
+  set_register(s5, s5_val);
+  set_register(s6, s6_val);
+  set_register(s7, s7_val);
+  set_register(gp, gp_val);
+  set_register(sp, sp_val);
+  set_register(fp, fp_val);
+
+  // Pop stack passed arguments.
+  CHECK_EQ(entry_stack, get_register(sp));
+  set_register(sp, original_stack);
+
+  int32_t result = get_register(v0);
+  return result;
+}
+
+
+uintptr_t Simulator::PushAddress(uintptr_t address) {
+  int new_sp = get_register(sp) - sizeof(uintptr_t);
+  uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(new_sp);
+  *stack_slot = address;
+  set_register(sp, new_sp);
+  return new_sp;
+}
+
+
+uintptr_t Simulator::PopAddress() {
+  int current_sp = get_register(sp);
+  uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(current_sp);
+  uintptr_t address = *stack_slot;
+  set_register(sp, current_sp + sizeof(uintptr_t));
+  return address;
+}
+
+
+#undef UNSUPPORTED
+
+} }  // namespace assembler::mips
+
+#endif  // !defined(__mips)
+
diff --git a/src/mips/simulator-mips.h b/src/mips/simulator-mips.h
new file mode 100644
index 0000000..d5dfc30
--- /dev/null
+++ b/src/mips/simulator-mips.h
@@ -0,0 +1,311 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+// Declares a Simulator for MIPS instructions if we are not generating a native
+// MIPS binary. This Simulator allows us to run and debug MIPS code generation
+// on regular desktop machines.
+// V8 calls into generated code by "calling" the CALL_GENERATED_CODE macro,
+// which will start execution in the Simulator or forwards to the real entry
+// on a MIPS HW platform.
+
+#ifndef V8_MIPS_SIMULATOR_MIPS_H_
+#define V8_MIPS_SIMULATOR_MIPS_H_
+
+#include "allocation.h"
+
+#if defined(__mips)
+
+// When running without a simulator we call the entry directly.
+#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
+  entry(p0, p1, p2, p3, p4);
+
+// The stack limit beyond which we will throw stack overflow errors in
+// generated code. Because generated code on mips uses the C stack, we
+// just use the C stack limit.
+class SimulatorStack : public v8::internal::AllStatic {
+ public:
+  static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) {
+    return c_limit;
+  }
+
+  static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
+    return try_catch_address;
+  }
+
+  static inline void UnregisterCTryCatch() { }
+};
+
+// Calculated the stack limit beyond which we will throw stack overflow errors.
+// This macro must be called from a C++ method. It relies on being able to take
+// the address of "this" to get a value on the current execution stack and then
+// calculates the stack limit based on that value.
+// NOTE: The check for overflow is not safe as there is no guarantee that the
+// running thread has its stack in all memory up to address 0x00000000.
+#define GENERATED_CODE_STACK_LIMIT(limit) \
+  (reinterpret_cast<uintptr_t>(this) >= limit ? \
+      reinterpret_cast<uintptr_t>(this) - limit : 0)
+
+// Call the generated regexp code directly. The entry function pointer should
+// expect seven int/pointer sized arguments and return an int.
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
+  entry(p0, p1, p2, p3, p4, p5, p6)
+
+#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
+  reinterpret_cast<TryCatch*>(try_catch_address)
+
+
+#else   // #if defined(__mips)
+
+// When running with the simulator transition into simulated execution at this
+// point.
+#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
+  reinterpret_cast<Object*>(\
+      assembler::mips::Simulator::current()->Call(FUNCTION_ADDR(entry), 5, \
+                                                  p0, p1, p2, p3, p4))
+
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
+  assembler::mips::Simulator::current()->Call(\
+    FUNCTION_ADDR(entry), 7, p0, p1, p2, p3, p4, p5, p6)
+
+#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
+  try_catch_address == NULL ? \
+      NULL : *(reinterpret_cast<TryCatch**>(try_catch_address))
+
+
+namespace assembler {
+namespace mips {
+
+class Simulator {
+ public:
+  friend class Debugger;
+
+  // Registers are declared in order. See SMRL chapter 2.
+  enum Register {
+    no_reg = -1,
+    zero_reg = 0,
+    at,
+    v0, v1,
+    a0, a1, a2, a3,
+    t0, t1, t2, t3, t4, t5, t6, t7,
+    s0, s1, s2, s3, s4, s5, s6, s7,
+    t8, t9,
+    k0, k1,
+    gp,
+    sp,
+    s8,
+    ra,
+    // LO, HI, and pc
+    LO,
+    HI,
+    pc,   // pc must be the last register.
+    kNumSimuRegisters,
+    // aliases
+    fp = s8
+  };
+
+  // Coprocessor registers.
+  // Generated code will always use doubles. So we will only use even registers.
+  enum FPURegister {
+    f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11,
+    f12, f13, f14, f15,   // f12 and f14 are arguments FPURegisters
+    f16, f17, f18, f19, f20, f21, f22, f23, f24, f25,
+    f26, f27, f28, f29, f30, f31,
+    kNumFPURegisters
+  };
+
+  Simulator();
+  ~Simulator();
+
+  // The currently executing Simulator instance. Potentially there can be one
+  // for each native thread.
+  static Simulator* current();
+
+  // Accessors for register state. Reading the pc value adheres to the MIPS
+  // architecture specification and is off by a 8 from the currently executing
+  // instruction.
+  void set_register(int reg, int32_t value);
+  int32_t get_register(int reg) const;
+  // Same for FPURegisters
+  void set_fpu_register(int fpureg, int32_t value);
+  void set_fpu_register_double(int fpureg, double value);
+  int32_t get_fpu_register(int fpureg) const;
+  double get_fpu_register_double(int fpureg) const;
+
+  // Special case of set_register and get_register to access the raw PC value.
+  void set_pc(int32_t value);
+  int32_t get_pc() const;
+
+  // Accessor to the internal simulator stack area.
+  uintptr_t StackLimit() const;
+
+  // Executes MIPS instructions until the PC reaches end_sim_pc.
+  void Execute();
+
+  // Call on program start.
+  static void Initialize();
+
+  // V8 generally calls into generated JS code with 5 parameters and into
+  // generated RegExp code with 7 parameters. This is a convenience function,
+  // which sets up the simulator state and grabs the result on return.
+  int32_t Call(byte_* entry, int argument_count, ...);
+
+  // Push an address onto the JS stack.
+  uintptr_t PushAddress(uintptr_t address);
+
+  // Pop an address from the JS stack.
+  uintptr_t PopAddress();
+
+ private:
+  enum special_values {
+    // Known bad pc value to ensure that the simulator does not execute
+    // without being properly setup.
+    bad_ra = -1,
+    // A pc value used to signal the simulator to stop execution.  Generally
+    // the ra is set to this value on transition from native C code to
+    // simulated execution, so that the simulator can "return" to the native
+    // C code.
+    end_sim_pc = -2,
+    // Unpredictable value.
+    Unpredictable = 0xbadbeaf
+  };
+
+  // Unsupported instructions use Format to print an error and stop execution.
+  void Format(Instruction* instr, const char* format);
+
+  // Read and write memory.
+  inline uint32_t ReadBU(int32_t addr);
+  inline int32_t ReadB(int32_t addr);
+  inline void WriteB(int32_t addr, uint8_t value);
+  inline void WriteB(int32_t addr, int8_t value);
+
+  inline uint16_t ReadHU(int32_t addr, Instruction* instr);
+  inline int16_t ReadH(int32_t addr, Instruction* instr);
+  // Note: Overloaded on the sign of the value.
+  inline void WriteH(int32_t addr, uint16_t value, Instruction* instr);
+  inline void WriteH(int32_t addr, int16_t value, Instruction* instr);
+
+  inline int ReadW(int32_t addr, Instruction* instr);
+  inline void WriteW(int32_t addr, int value, Instruction* instr);
+
+  inline double ReadD(int32_t addr, Instruction* instr);
+  inline void WriteD(int32_t addr, double value, Instruction* instr);
+
+  // Operations depending on endianness.
+  // Get Double Higher / Lower word.
+  inline int32_t GetDoubleHIW(double* addr);
+  inline int32_t GetDoubleLOW(double* addr);
+  // Set Double Higher / Lower word.
+  inline int32_t SetDoubleHIW(double* addr);
+  inline int32_t SetDoubleLOW(double* addr);
+
+
+  // Executing is handled based on the instruction type.
+  void DecodeTypeRegister(Instruction* instr);
+  void DecodeTypeImmediate(Instruction* instr);
+  void DecodeTypeJump(Instruction* instr);
+
+  // Used for breakpoints and traps.
+  void SoftwareInterrupt(Instruction* instr);
+
+  // Executes one instruction.
+  void InstructionDecode(Instruction* instr);
+  // Execute one instruction placed in a branch delay slot.
+  void BranchDelayInstructionDecode(Instruction* instr) {
+    if (instr->IsForbiddenInBranchDelay()) {
+      V8_Fatal(__FILE__, __LINE__,
+               "Eror:Unexpected %i opcode in a branch delay slot.",
+               instr->OpcodeField());
+    }
+    InstructionDecode(instr);
+  }
+
+  enum Exception {
+    none,
+    kIntegerOverflow,
+    kIntegerUnderflow,
+    kDivideByZero,
+    kNumExceptions
+  };
+  int16_t exceptions[kNumExceptions];
+
+  // Exceptions.
+  void SignalExceptions();
+
+  // Runtime call support.
+  static void* RedirectExternalReference(void* external_function,
+                                         bool fp_return);
+
+  // Used for real time calls that takes two double values as arguments and
+  // returns a double.
+  void SetFpResult(double result);
+
+  // Architecture state.
+  // Registers.
+  int32_t registers_[kNumSimuRegisters];
+  // Coprocessor Registers.
+  int32_t FPUregisters_[kNumFPURegisters];
+
+  // Simulator support.
+  char* stack_;
+  bool pc_modified_;
+  int icount_;
+  static bool initialized_;
+
+  // Registered breakpoints.
+  Instruction* break_pc_;
+  Instr break_instr_;
+};
+
+} }   // namespace assembler::mips
+
+
+// The simulator has its own stack. Thus it has a different stack limit from
+// the C-based native code.  Setting the c_limit to indicate a very small
+// stack cause stack overflow errors, since the simulator ignores the input.
+// This is unlikely to be an issue in practice, though it might cause testing
+// trouble down the line.
+class SimulatorStack : public v8::internal::AllStatic {
+ public:
+  static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) {
+    return assembler::mips::Simulator::current()->StackLimit();
+  }
+
+  static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
+    assembler::mips::Simulator* sim = assembler::mips::Simulator::current();
+    return sim->PushAddress(try_catch_address);
+  }
+
+  static inline void UnregisterCTryCatch() {
+    assembler::mips::Simulator::current()->PopAddress();
+  }
+};
+
+#endif  // defined(__mips)
+
+#endif  // V8_MIPS_SIMULATOR_MIPS_H_
+
diff --git a/src/mips/stub-cache-mips.cc b/src/mips/stub-cache-mips.cc
new file mode 100644
index 0000000..a87a49b
--- /dev/null
+++ b/src/mips/stub-cache-mips.cc
@@ -0,0 +1,384 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "ic-inl.h"
+#include "codegen-inl.h"
+#include "stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+
+void StubCache::GenerateProbe(MacroAssembler* masm,
+                              Code::Flags flags,
+                              Register receiver,
+                              Register name,
+                              Register scratch,
+                              Register extra) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
+                                                       int index,
+                                                       Register prototype) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+// Load a fast property out of a holder object (src). In-object properties
+// are loaded directly otherwise the property is loaded from the properties
+// fixed array.
+void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
+                                            Register dst, Register src,
+                                            JSObject* holder, int index) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
+                                           Register receiver,
+                                           Register scratch,
+                                           Label* miss_label) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+// Generate code to load the length from a string object and return the length.
+// If the receiver object is not a string or a wrapped string object the
+// execution continues at the miss label. The register containing the
+// receiver is potentially clobbered.
+void StubCompiler::GenerateLoadStringLength2(MacroAssembler* masm,
+                                             Register receiver,
+                                             Register scratch1,
+                                             Register scratch2,
+                                             Label* miss) {
+  UNIMPLEMENTED_MIPS();
+  __ break_(0x249);
+}
+
+
+void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
+                                                 Register receiver,
+                                                 Register scratch1,
+                                                 Register scratch2,
+                                                 Label* miss_label) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+// Generate StoreField code, value is passed in r0 register.
+// After executing generated code, the receiver_reg and name_reg
+// may be clobbered.
+void StubCompiler::GenerateStoreField(MacroAssembler* masm,
+                                      Builtins::Name storage_extend,
+                                      JSObject* object,
+                                      int index,
+                                      Map* transition,
+                                      Register receiver_reg,
+                                      Register name_reg,
+                                      Register scratch,
+                                      Label* miss_label) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm())
+
+
+Register StubCompiler::CheckPrototypes(JSObject* object,
+                                       Register object_reg,
+                                       JSObject* holder,
+                                       Register holder_reg,
+                                       Register scratch,
+                                       String* name,
+                                       Label* miss) {
+  UNIMPLEMENTED_MIPS();
+  return at;    // UNIMPLEMENTED RETURN
+}
+
+
+void StubCompiler::GenerateLoadField(JSObject* object,
+                                     JSObject* holder,
+                                     Register receiver,
+                                     Register scratch1,
+                                     Register scratch2,
+                                     int index,
+                                     String* name,
+                                     Label* miss) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void StubCompiler::GenerateLoadConstant(JSObject* object,
+                                        JSObject* holder,
+                                        Register receiver,
+                                        Register scratch1,
+                                        Register scratch2,
+                                        Object* value,
+                                        String* name,
+                                        Label* miss) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+bool StubCompiler::GenerateLoadCallback(JSObject* object,
+                                        JSObject* holder,
+                                        Register receiver,
+                                        Register name_reg,
+                                        Register scratch1,
+                                        Register scratch2,
+                                        AccessorInfo* callback,
+                                        String* name,
+                                        Label* miss,
+                                        Failure** failure) {
+  UNIMPLEMENTED_MIPS();
+  __ break_(0x470);
+  return false;   // UNIMPLEMENTED RETURN
+}
+
+
+void StubCompiler::GenerateLoadInterceptor(JSObject* object,
+                                           JSObject* holder,
+                                           LookupResult* lookup,
+                                           Register receiver,
+                                           Register name_reg,
+                                           Register scratch1,
+                                           Register scratch2,
+                                           String* name,
+                                           Label* miss) {
+  UNIMPLEMENTED_MIPS();
+  __ break_(0x505);
+}
+
+
+Object* StubCompiler::CompileLazyCompile(Code::Flags flags) {
+  UNIMPLEMENTED_MIPS();
+  return reinterpret_cast<Object*>(NULL);   // UNIMPLEMENTED RETURN
+}
+
+
+Object* CallStubCompiler::CompileCallField(Object* object,
+                                           JSObject* holder,
+                                           int index,
+                                           String* name) {
+  UNIMPLEMENTED_MIPS();
+  return reinterpret_cast<Object*>(NULL);   // UNIMPLEMENTED RETURN
+}
+
+
+Object* CallStubCompiler::CompileCallConstant(Object* object,
+                                              JSObject* holder,
+                                              JSFunction* function,
+                                              String* name,
+                                              CheckType check) {
+  UNIMPLEMENTED_MIPS();
+  return reinterpret_cast<Object*>(NULL);   // UNIMPLEMENTED RETURN
+}
+
+
+Object* CallStubCompiler::CompileCallInterceptor(Object* object,
+                                                 JSObject* holder,
+                                                 String* name) {
+  UNIMPLEMENTED_MIPS();
+  __ break_(0x782);
+  return GetCode(INTERCEPTOR, name);
+}
+
+
+Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
+                                            GlobalObject* holder,
+                                            JSGlobalPropertyCell* cell,
+                                            JSFunction* function,
+                                            String* name) {
+  UNIMPLEMENTED_MIPS();
+  return reinterpret_cast<Object*>(NULL);   // UNIMPLEMENTED RETURN
+}
+
+
+Object* StoreStubCompiler::CompileStoreField(JSObject* object,
+                                             int index,
+                                             Map* transition,
+                                             String* name) {
+  UNIMPLEMENTED_MIPS();
+  return reinterpret_cast<Object*>(NULL);   // UNIMPLEMENTED RETURN
+}
+
+
+Object* StoreStubCompiler::CompileStoreCallback(JSObject* object,
+                                                AccessorInfo* callback,
+                                                String* name) {
+  UNIMPLEMENTED_MIPS();
+  __ break_(0x906);
+  return reinterpret_cast<Object*>(NULL);   // UNIMPLEMENTED RETURN
+}
+
+
+Object* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
+                                                   String* name) {
+  UNIMPLEMENTED_MIPS();
+  return reinterpret_cast<Object*>(NULL);   // UNIMPLEMENTED RETURN
+}
+
+
+Object* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
+                                              JSGlobalPropertyCell* cell,
+                                              String* name) {
+  UNIMPLEMENTED_MIPS();
+  return reinterpret_cast<Object*>(NULL);   // UNIMPLEMENTED RETURN
+}
+
+
+Object* LoadStubCompiler::CompileLoadField(JSObject* object,
+                                           JSObject* holder,
+                                           int index,
+                                           String* name) {
+  UNIMPLEMENTED_MIPS();
+  return reinterpret_cast<Object*>(NULL);   // UNIMPLEMENTED RETURN
+}
+
+
+Object* LoadStubCompiler::CompileLoadCallback(String* name,
+                                              JSObject* object,
+                                              JSObject* holder,
+                                              AccessorInfo* callback) {
+  UNIMPLEMENTED_MIPS();
+  return reinterpret_cast<Object*>(NULL);   // UNIMPLEMENTED RETURN
+}
+
+
+Object* LoadStubCompiler::CompileLoadConstant(JSObject* object,
+                                              JSObject* holder,
+                                              Object* value,
+                                              String* name) {
+  UNIMPLEMENTED_MIPS();
+  return reinterpret_cast<Object*>(NULL);   // UNIMPLEMENTED RETURN
+}
+
+
+Object* LoadStubCompiler::CompileLoadInterceptor(JSObject* object,
+                                                 JSObject* holder,
+                                                 String* name) {
+  UNIMPLEMENTED_MIPS();
+  return reinterpret_cast<Object*>(NULL);   // UNIMPLEMENTED RETURN
+}
+
+
+Object* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
+                                            GlobalObject* holder,
+                                            JSGlobalPropertyCell* cell,
+                                            String* name,
+                                            bool is_dont_delete) {
+  UNIMPLEMENTED_MIPS();
+  return reinterpret_cast<Object*>(NULL);   // UNIMPLEMENTED RETURN
+}
+
+
+Object* KeyedLoadStubCompiler::CompileLoadField(String* name,
+                                                JSObject* receiver,
+                                                JSObject* holder,
+                                                int index) {
+  UNIMPLEMENTED_MIPS();
+  return reinterpret_cast<Object*>(NULL);   // UNIMPLEMENTED RETURN
+}
+
+
+Object* KeyedLoadStubCompiler::CompileLoadCallback(String* name,
+                                                   JSObject* receiver,
+                                                   JSObject* holder,
+                                                   AccessorInfo* callback) {
+  UNIMPLEMENTED_MIPS();
+  return reinterpret_cast<Object*>(NULL);   // UNIMPLEMENTED RETURN
+}
+
+
+Object* KeyedLoadStubCompiler::CompileLoadConstant(String* name,
+                                                   JSObject* receiver,
+                                                   JSObject* holder,
+                                                   Object* value) {
+  UNIMPLEMENTED_MIPS();
+  return reinterpret_cast<Object*>(NULL);   // UNIMPLEMENTED RETURN
+}
+
+
+Object* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
+                                                      JSObject* holder,
+                                                      String* name) {
+  UNIMPLEMENTED_MIPS();
+  return reinterpret_cast<Object*>(NULL);   // UNIMPLEMENTED RETURN
+}
+
+
+Object* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
+  UNIMPLEMENTED_MIPS();
+  return reinterpret_cast<Object*>(NULL);   // UNIMPLEMENTED RETURN
+}
+
+
+Object* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
+  UNIMPLEMENTED_MIPS();
+  return reinterpret_cast<Object*>(NULL);   // UNIMPLEMENTED RETURN
+}
+
+
+// TODO(1224671): implement the fast case.
+Object* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
+  UNIMPLEMENTED_MIPS();
+  return reinterpret_cast<Object*>(NULL);   // UNIMPLEMENTED RETURN
+}
+
+
+Object* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
+                                                  int index,
+                                                  Map* transition,
+                                                  String* name) {
+  UNIMPLEMENTED_MIPS();
+  return reinterpret_cast<Object*>(NULL);   // UNIMPLEMENTED RETURN
+}
+
+
+Object* ConstructStubCompiler::CompileConstructStub(
+    SharedFunctionInfo* shared) {
+  UNIMPLEMENTED_MIPS();
+  return reinterpret_cast<Object*>(NULL);   // UNIMPLEMENTED RETURN
+}
+
+
+#undef __
+
+} }  // namespace v8::internal
+
diff --git a/src/mips/virtual-frame-mips.cc b/src/mips/virtual-frame-mips.cc
new file mode 100644
index 0000000..fad7ec4
--- /dev/null
+++ b/src/mips/virtual-frame-mips.cc
@@ -0,0 +1,240 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "register-allocator-inl.h"
+#include "scopes.h"
+
+namespace v8 {
+namespace internal {
+
+// -------------------------------------------------------------------------
+// VirtualFrame implementation.
+
+#define __ ACCESS_MASM(masm())
+
+
+// On entry to a function, the virtual frame already contains the
+// receiver and the parameters.  All initial frame elements are in
+// memory.
+VirtualFrame::VirtualFrame()
+    : elements_(parameter_count() + local_count() + kPreallocatedElements),
+      stack_pointer_(parameter_count()) {  // 0-based index of TOS.
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::SyncElementBelowStackPointer(int index) {
+  UNREACHABLE();
+}
+
+
+void VirtualFrame::SyncElementByPushing(int index) {
+  UNREACHABLE();
+}
+
+
+void VirtualFrame::SyncRange(int begin, int end) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::MergeTo(VirtualFrame* expected) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::Enter() {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::Exit() {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::AllocateStackSlots() {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::SaveContextRegister() {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::RestoreContextRegister() {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::PushReceiverSlotAddress() {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+int VirtualFrame::InvalidateFrameSlotAt(int index) {
+  return kIllegalIndex;
+}
+
+
+void VirtualFrame::TakeFrameSlotAt(int index) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::StoreToFrameSlotAt(int index) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::PushTryHandler(HandlerType type) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::RawCallStub(CodeStub* stub) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::CallStub(CodeStub* stub, Result* arg) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::CallStub(CodeStub* stub, Result* arg0, Result* arg1) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::CallRuntime(Runtime::Function* f, int arg_count) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::CallRuntime(Runtime::FunctionId id, int arg_count) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::CallAlignedRuntime(Runtime::Function* f, int arg_count) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::CallAlignedRuntime(Runtime::FunctionId id, int arg_count) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::InvokeBuiltin(Builtins::JavaScript id,
+                                 InvokeJSFlags flags,
+                                 Result* arg_count_register,
+                                 int arg_count) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::RawCallCodeObject(Handle<Code> code,
+                                       RelocInfo::Mode rmode) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::CallCodeObject(Handle<Code> code,
+                                  RelocInfo::Mode rmode,
+                                  int dropped_args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::CallCodeObject(Handle<Code> code,
+                                  RelocInfo::Mode rmode,
+                                  Result* arg,
+                                  int dropped_args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::CallCodeObject(Handle<Code> code,
+                                  RelocInfo::Mode rmode,
+                                  Result* arg0,
+                                  Result* arg1,
+                                  int dropped_args,
+                                  bool set_auto_args_slots) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::Drop(int count) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::DropFromVFrameOnly(int count) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+Result VirtualFrame::Pop() {
+  UNIMPLEMENTED_MIPS();
+  Result res = Result();
+  return res;    // UNIMPLEMENTED RETUR
+}
+
+
+void VirtualFrame::EmitPop(Register reg) {
+  UNIMPLEMENTED_MIPS();
+}
+
+void VirtualFrame::EmitMultiPop(RegList regs) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::EmitPush(Register reg) {
+  UNIMPLEMENTED_MIPS();
+}
+
+void VirtualFrame::EmitMultiPush(RegList regs) {
+  UNIMPLEMENTED_MIPS();
+}
+
+void VirtualFrame::EmitArgumentSlots(RegList reglist) {
+  UNIMPLEMENTED_MIPS();
+}
+
+#undef __
+
+} }  // namespace v8::internal
+
diff --git a/src/mips/virtual-frame-mips.h b/src/mips/virtual-frame-mips.h
new file mode 100644
index 0000000..79f973f
--- /dev/null
+++ b/src/mips/virtual-frame-mips.h
@@ -0,0 +1,548 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#ifndef V8_MIPS_VIRTUAL_FRAME_MIPS_H_
+#define V8_MIPS_VIRTUAL_FRAME_MIPS_H_
+
+#include "register-allocator.h"
+#include "scopes.h"
+
+namespace v8 {
+namespace internal {
+
+
+// -------------------------------------------------------------------------
+// Virtual frames
+//
+// The virtual frame is an abstraction of the physical stack frame.  It
+// encapsulates the parameters, frame-allocated locals, and the expression
+// stack.  It supports push/pop operations on the expression stack, as well
+// as random access to the expression stack elements, locals, and
+// parameters.
+
+class VirtualFrame : public ZoneObject {
+ public:
+  // A utility class to introduce a scope where the virtual frame is
+  // expected to remain spilled.  The constructor spills the code
+  // generator's current frame, but no attempt is made to require it
+  // to stay spilled.  It is intended as documentation while the code
+  // generator is being transformed.
+  class SpilledScope BASE_EMBEDDED {
+   public:
+    SpilledScope() {}
+  };
+
+  // An illegal index into the virtual frame.
+  static const int kIllegalIndex = -1;
+
+  // Construct an initial virtual frame on entry to a JS function.
+  VirtualFrame();
+
+  // Construct a virtual frame as a clone of an existing one.
+  explicit VirtualFrame(VirtualFrame* original);
+
+  CodeGenerator* cgen() { return CodeGeneratorScope::Current(); }
+  MacroAssembler* masm() { return cgen()->masm(); }
+
+  // Create a duplicate of an existing valid frame element.
+  FrameElement CopyElementAt(int index);
+
+  // The number of elements on the virtual frame.
+  int element_count() { return elements_.length(); }
+
+  // The height of the virtual expression stack.
+  int height() {
+    return element_count() - expression_base_index();
+  }
+
+  int register_location(int num) {
+    ASSERT(num >= 0 && num < RegisterAllocator::kNumRegisters);
+    return register_locations_[num];
+  }
+
+  int register_location(Register reg) {
+    return register_locations_[RegisterAllocator::ToNumber(reg)];
+  }
+
+  void set_register_location(Register reg, int index) {
+    register_locations_[RegisterAllocator::ToNumber(reg)] = index;
+  }
+
+  bool is_used(int num) {
+    ASSERT(num >= 0 && num < RegisterAllocator::kNumRegisters);
+    return register_locations_[num] != kIllegalIndex;
+  }
+
+  bool is_used(Register reg) {
+    return register_locations_[RegisterAllocator::ToNumber(reg)]
+        != kIllegalIndex;
+  }
+
+  // Add extra in-memory elements to the top of the frame to match an actual
+  // frame (eg, the frame after an exception handler is pushed).  No code is
+  // emitted.
+  void Adjust(int count);
+
+  // Forget elements from the top of the frame to match an actual frame (eg,
+  // the frame after a runtime call).  No code is emitted.
+  void Forget(int count) {
+    ASSERT(count >= 0);
+    ASSERT(stack_pointer_ == element_count() - 1);
+    stack_pointer_ -= count;
+    // On mips, all elements are in memory, so there is no extra bookkeeping
+    // (registers, copies, etc.) beyond dropping the elements.
+    elements_.Rewind(stack_pointer_ + 1);
+  }
+
+  // Forget count elements from the top of the frame and adjust the stack
+  // pointer downward.  This is used, for example, before merging frames at
+  // break, continue, and return targets.
+  void ForgetElements(int count);
+
+  // Spill all values from the frame to memory.
+  void SpillAll();
+
+  // Spill all occurrences of a specific register from the frame.
+  void Spill(Register reg) {
+    if (is_used(reg)) SpillElementAt(register_location(reg));
+  }
+
+  // Spill all occurrences of an arbitrary register if possible.  Return the
+  // register spilled or no_reg if it was not possible to free any register
+  // (ie, they all have frame-external references).
+  Register SpillAnyRegister();
+
+  // Prepare this virtual frame for merging to an expected frame by
+  // performing some state changes that do not require generating
+  // code.  It is guaranteed that no code will be generated.
+  void PrepareMergeTo(VirtualFrame* expected);
+
+  // Make this virtual frame have a state identical to an expected virtual
+  // frame.  As a side effect, code may be emitted to make this frame match
+  // the expected one.
+  void MergeTo(VirtualFrame* expected);
+
+  // Detach a frame from its code generator, perhaps temporarily.  This
+  // tells the register allocator that it is free to use frame-internal
+  // registers.  Used when the code generator's frame is switched from this
+  // one to NULL by an unconditional jump.
+  void DetachFromCodeGenerator() {
+    RegisterAllocator* cgen_allocator = cgen()->allocator();
+    for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
+      if (is_used(i)) cgen_allocator->Unuse(i);
+    }
+  }
+
+  // (Re)attach a frame to its code generator.  This informs the register
+  // allocator that the frame-internal register references are active again.
+  // Used when a code generator's frame is switched from NULL to this one by
+  // binding a label.
+  void AttachToCodeGenerator() {
+    RegisterAllocator* cgen_allocator = cgen()->allocator();
+    for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
+      if (is_used(i)) cgen_allocator->Unuse(i);
+    }
+  }
+
+  // Emit code for the physical JS entry and exit frame sequences.  After
+  // calling Enter, the virtual frame is ready for use; and after calling
+  // Exit it should not be used.  Note that Enter does not allocate space in
+  // the physical frame for storing frame-allocated locals.
+  void Enter();
+  void Exit();
+
+  // Prepare for returning from the frame by spilling locals and
+  // dropping all non-locals elements in the virtual frame.  This
+  // avoids generating unnecessary merge code when jumping to the
+  // shared return site.  Emits code for spills.
+  void PrepareForReturn();
+
+  // Allocate and initialize the frame-allocated locals.
+  void AllocateStackSlots();
+
+  // The current top of the expression stack as an assembly operand.
+  MemOperand Top() { return MemOperand(sp, 0); }
+
+  // An element of the expression stack as an assembly operand.
+  MemOperand ElementAt(int index) {
+    return MemOperand(sp, index * kPointerSize);
+  }
+
+  // Random-access store to a frame-top relative frame element.  The result
+  // becomes owned by the frame and is invalidated.
+  void SetElementAt(int index, Result* value);
+
+  // Set a frame element to a constant.  The index is frame-top relative.
+  void SetElementAt(int index, Handle<Object> value) {
+    Result temp(value);
+    SetElementAt(index, &temp);
+  }
+
+  void PushElementAt(int index) {
+    PushFrameSlotAt(element_count() - index - 1);
+  }
+
+  // A frame-allocated local as an assembly operand.
+  MemOperand LocalAt(int index) {
+    ASSERT(0 <= index);
+    ASSERT(index < local_count());
+    return MemOperand(s8_fp, kLocal0Offset - index * kPointerSize);
+  }
+
+  // Push a copy of the value of a local frame slot on top of the frame.
+  void PushLocalAt(int index) {
+    PushFrameSlotAt(local0_index() + index);
+  }
+
+  // Push the value of a local frame slot on top of the frame and invalidate
+  // the local slot.  The slot should be written to before trying to read
+  // from it again.
+  void TakeLocalAt(int index) {
+    TakeFrameSlotAt(local0_index() + index);
+  }
+
+  // Store the top value on the virtual frame into a local frame slot.  The
+  // value is left in place on top of the frame.
+  void StoreToLocalAt(int index) {
+    StoreToFrameSlotAt(local0_index() + index);
+  }
+
+  // Push the address of the receiver slot on the frame.
+  void PushReceiverSlotAddress();
+
+  // The function frame slot.
+  MemOperand Function() { return MemOperand(s8_fp, kFunctionOffset); }
+
+  // Push the function on top of the frame.
+  void PushFunction() { PushFrameSlotAt(function_index()); }
+
+  // The context frame slot.
+  MemOperand Context() { return MemOperand(s8_fp, kContextOffset); }
+
+  // Save the value of the cp register to the context frame slot.
+  void SaveContextRegister();
+
+  // Restore the cp register from the value of the context frame
+  // slot.
+  void RestoreContextRegister();
+
+  // A parameter as an assembly operand.
+  MemOperand ParameterAt(int index) {
+    // Index -1 corresponds to the receiver.
+    ASSERT(-1 <= index);  // -1 is the receiver.
+    ASSERT(index <= parameter_count());
+    uint16_t a = 0;   // Number of argument slots.
+    return MemOperand(s8_fp, (1 + parameter_count() + a - index) *kPointerSize);
+  }
+
+  // Push a copy of the value of a parameter frame slot on top of the frame.
+  void PushParameterAt(int index) {
+    PushFrameSlotAt(param0_index() + index);
+  }
+
+  // Push the value of a paramter frame slot on top of the frame and
+  // invalidate the parameter slot.  The slot should be written to before
+  // trying to read from it again.
+  void TakeParameterAt(int index) {
+    TakeFrameSlotAt(param0_index() + index);
+  }
+
+  // Store the top value on the virtual frame into a parameter frame slot.
+  // The value is left in place on top of the frame.
+  void StoreToParameterAt(int index) {
+    StoreToFrameSlotAt(param0_index() + index);
+  }
+
+  // The receiver frame slot.
+  MemOperand Receiver() { return ParameterAt(-1); }
+
+  // Push a try-catch or try-finally handler on top of the virtual frame.
+  void PushTryHandler(HandlerType type);
+
+  // Call stub given the number of arguments it expects on (and
+  // removes from) the stack.
+  void CallStub(CodeStub* stub, int arg_count) {
+    PrepareForCall(arg_count, arg_count);
+    RawCallStub(stub);
+  }
+
+  // Call stub that expects its argument in r0.  The argument is given
+  // as a result which must be the register r0.
+  void CallStub(CodeStub* stub, Result* arg);
+
+  // Call stub that expects its arguments in r1 and r0.  The arguments
+  // are given as results which must be the appropriate registers.
+  void CallStub(CodeStub* stub, Result* arg0, Result* arg1);
+
+  // Call runtime given the number of arguments expected on (and
+  // removed from) the stack.
+  void CallRuntime(Runtime::Function* f, int arg_count);
+  void CallRuntime(Runtime::FunctionId id, int arg_count);
+
+  // Call runtime with sp aligned to 8 bytes.
+  void CallAlignedRuntime(Runtime::Function* f, int arg_count);
+  void CallAlignedRuntime(Runtime::FunctionId id, int arg_count);
+
+  // Invoke builtin given the number of arguments it expects on (and
+  // removes from) the stack.
+  void InvokeBuiltin(Builtins::JavaScript id,
+                     InvokeJSFlags flag,
+                     Result* arg_count_register,
+                     int arg_count);
+
+  // Call into an IC stub given the number of arguments it removes
+  // from the stack.  Register arguments are passed as results and
+  // consumed by the call.
+  void CallCodeObject(Handle<Code> ic,
+                      RelocInfo::Mode rmode,
+                      int dropped_args);
+  void CallCodeObject(Handle<Code> ic,
+                      RelocInfo::Mode rmode,
+                      Result* arg,
+                      int dropped_args);
+  void CallCodeObject(Handle<Code> ic,
+                      RelocInfo::Mode rmode,
+                      Result* arg0,
+                      Result* arg1,
+                      int dropped_args,
+                      bool set_auto_args_slots = false);
+
+  // Drop a number of elements from the top of the expression stack.  May
+  // emit code to affect the physical frame.  Does not clobber any registers
+  // excepting possibly the stack pointer.
+  void Drop(int count);
+  // Similar to VirtualFrame::Drop but we don't modify the actual stack.
+  // This is because we need to manually restore sp to the correct position.
+  void DropFromVFrameOnly(int count);
+
+  // Drop one element.
+  void Drop() { Drop(1); }
+  void DropFromVFrameOnly() { DropFromVFrameOnly(1); }
+
+  // Duplicate the top element of the frame.
+  void Dup() { PushFrameSlotAt(element_count() - 1); }
+
+  // Pop an element from the top of the expression stack.  Returns a
+  // Result, which may be a constant or a register.
+  Result Pop();
+
+  // Pop and save an element from the top of the expression stack and
+  // emit a corresponding pop instruction.
+  void EmitPop(Register reg);
+  // Same but for multiple registers
+  void EmitMultiPop(RegList regs);  // higher indexed registers popped first
+  void EmitMultiPopReversed(RegList regs);  // lower first
+
+  // Push an element on top of the expression stack and emit a
+  // corresponding push instruction.
+  void EmitPush(Register reg);
+  // Same but for multiple registers.
+  void EmitMultiPush(RegList regs);  // lower indexed registers are pushed first
+  void EmitMultiPushReversed(RegList regs);  // higher first
+
+  // Push an element on the virtual frame.
+  void Push(Register reg);
+  void Push(Handle<Object> value);
+  void Push(Smi* value) { Push(Handle<Object>(value)); }
+
+  // Pushing a result invalidates it (its contents become owned by the frame).
+  void Push(Result* result) {
+    if (result->is_register()) {
+      Push(result->reg());
+    } else {
+      ASSERT(result->is_constant());
+      Push(result->handle());
+    }
+    result->Unuse();
+  }
+
+  // Nip removes zero or more elements from immediately below the top
+  // of the frame, leaving the previous top-of-frame value on top of
+  // the frame.  Nip(k) is equivalent to x = Pop(), Drop(k), Push(x).
+  void Nip(int num_dropped);
+
+  // This pushes 4 arguments slots on the stack and saves asked 'a' registers
+  // 'a' registers are arguments register a0 to a3.
+  void EmitArgumentSlots(RegList reglist);
+
+ private:
+  static const int kLocal0Offset = JavaScriptFrameConstants::kLocal0Offset;
+  static const int kFunctionOffset = JavaScriptFrameConstants::kFunctionOffset;
+  static const int kContextOffset = StandardFrameConstants::kContextOffset;
+
+  static const int kHandlerSize = StackHandlerConstants::kSize / kPointerSize;
+  static const int kPreallocatedElements = 5 + 8;  // 8 expression stack slots.
+
+  ZoneList<FrameElement> elements_;
+
+  // The index of the element that is at the processor's stack pointer
+  // (the sp register).
+  int stack_pointer_;
+
+  // The index of the register frame element using each register, or
+  // kIllegalIndex if a register is not on the frame.
+  int register_locations_[RegisterAllocator::kNumRegisters];
+
+  // The number of frame-allocated locals and parameters respectively.
+  int parameter_count() { return cgen()->scope()->num_parameters(); }
+  int local_count() { return cgen()->scope()->num_stack_slots(); }
+
+  // The index of the element that is at the processor's frame pointer
+  // (the fp register).  The parameters, receiver, function, and context
+  // are below the frame pointer.
+  int frame_pointer() { return parameter_count() + 3; }
+
+  // The index of the first parameter.  The receiver lies below the first
+  // parameter.
+  int param0_index() { return 1; }
+
+  // The index of the context slot in the frame.  It is immediately
+  // below the frame pointer.
+  int context_index() { return frame_pointer() - 1; }
+
+  // The index of the function slot in the frame.  It is below the frame
+  // pointer and context slot.
+  int function_index() { return frame_pointer() - 2; }
+
+  // The index of the first local.  Between the frame pointer and the
+  // locals lies the return address.
+  int local0_index() { return frame_pointer() + 2; }
+
+  // The index of the base of the expression stack.
+  int expression_base_index() { return local0_index() + local_count(); }
+
+  // Convert a frame index into a frame pointer relative offset into the
+  // actual stack.
+  int fp_relative(int index) {
+    ASSERT(index < element_count());
+    ASSERT(frame_pointer() < element_count());  // FP is on the frame.
+    return (frame_pointer() - index) * kPointerSize;
+  }
+
+  // Record an occurrence of a register in the virtual frame.  This has the
+  // effect of incrementing the register's external reference count and
+  // of updating the index of the register's location in the frame.
+  void Use(Register reg, int index) {
+    ASSERT(!is_used(reg));
+    set_register_location(reg, index);
+    cgen()->allocator()->Use(reg);
+  }
+
+  // Record that a register reference has been dropped from the frame.  This
+  // decrements the register's external reference count and invalidates the
+  // index of the register's location in the frame.
+  void Unuse(Register reg) {
+    ASSERT(is_used(reg));
+    set_register_location(reg, kIllegalIndex);
+    cgen()->allocator()->Unuse(reg);
+  }
+
+  // Spill the element at a particular index---write it to memory if
+  // necessary, free any associated register, and forget its value if
+  // constant.
+  void SpillElementAt(int index);
+
+  // Sync the element at a particular index.  If it is a register or
+  // constant that disagrees with the value on the stack, write it to memory.
+  // Keep the element type as register or constant, and clear the dirty bit.
+  void SyncElementAt(int index);
+
+  // Sync the range of elements in [begin, end] with memory.
+  void SyncRange(int begin, int end);
+
+  // Sync a single unsynced element that lies beneath or at the stack pointer.
+  void SyncElementBelowStackPointer(int index);
+
+  // Sync a single unsynced element that lies just above the stack pointer.
+  void SyncElementByPushing(int index);
+
+  // Push a copy of a frame slot (typically a local or parameter) on top of
+  // the frame.
+  void PushFrameSlotAt(int index);
+
+  // Push a the value of a frame slot (typically a local or parameter) on
+  // top of the frame and invalidate the slot.
+  void TakeFrameSlotAt(int index);
+
+  // Store the value on top of the frame to a frame slot (typically a local
+  // or parameter).
+  void StoreToFrameSlotAt(int index);
+
+  // Spill all elements in registers. Spill the top spilled_args elements
+  // on the frame.  Sync all other frame elements.
+  // Then drop dropped_args elements from the virtual frame, to match
+  // the effect of an upcoming call that will drop them from the stack.
+  void PrepareForCall(int spilled_args, int dropped_args);
+
+  // Move frame elements currently in registers or constants, that
+  // should be in memory in the expected frame, to memory.
+  void MergeMoveRegistersToMemory(VirtualFrame* expected);
+
+  // Make the register-to-register moves necessary to
+  // merge this frame with the expected frame.
+  // Register to memory moves must already have been made,
+  // and memory to register moves must follow this call.
+  // This is because some new memory-to-register moves are
+  // created in order to break cycles of register moves.
+  // Used in the implementation of MergeTo().
+  void MergeMoveRegistersToRegisters(VirtualFrame* expected);
+
+  // Make the memory-to-register and constant-to-register moves
+  // needed to make this frame equal the expected frame.
+  // Called after all register-to-memory and register-to-register
+  // moves have been made.  After this function returns, the frames
+  // should be equal.
+  void MergeMoveMemoryToRegisters(VirtualFrame* expected);
+
+  // Invalidates a frame slot (puts an invalid frame element in it).
+  // Copies on the frame are correctly handled, and if this slot was
+  // the backing store of copies, the index of the new backing store
+  // is returned.  Otherwise, returns kIllegalIndex.
+  // Register counts are correctly updated.
+  int InvalidateFrameSlotAt(int index);
+
+  // Call a code stub that has already been prepared for calling (via
+  // PrepareForCall).
+  void RawCallStub(CodeStub* stub);
+
+  // Calls a code object which has already been prepared for calling
+  // (via PrepareForCall).
+  void RawCallCodeObject(Handle<Code> code, RelocInfo::Mode rmode);
+
+  bool Equals(VirtualFrame* other);
+
+  // Classes that need raw access to the elements_ array.
+  friend class DeferredCode;
+  friend class JumpTarget;
+};
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_MIPS_VIRTUAL_FRAME_MIPS_H_
+
diff --git a/src/mirror-debugger.js b/src/mirror-debugger.js
new file mode 100644
index 0000000..3b34797
--- /dev/null
+++ b/src/mirror-debugger.js
@@ -0,0 +1,2327 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Handle id counters.
+var next_handle_ = 0;
+var next_transient_handle_ = -1;
+
+// Mirror cache.
+var mirror_cache_ = [];
+
+
+/**
+ * Clear the mirror handle cache.
+ */
+function ClearMirrorCache() {
+  next_handle_ = 0;
+  mirror_cache_ = [];
+}
+
+
+/**
+ * Returns the mirror for a specified value or object.
+ *
+ * @param {value or Object} value the value or object to retreive the mirror for
+ * @param {boolean} transient indicate whether this object is transient and
+ *    should not be added to the mirror cache. The default is not transient.
+ * @returns {Mirror} the mirror reflects the passed value or object
+ */
+function MakeMirror(value, opt_transient) {
+  var mirror;
+
+  // Look for non transient mirrors in the mirror cache.
+  if (!opt_transient) {
+    for (id in mirror_cache_) {
+      mirror = mirror_cache_[id];
+      if (mirror.value() === value) {
+        return mirror;
+      }
+      // Special check for NaN as NaN == NaN is false.
+      if (mirror.isNumber() && isNaN(mirror.value()) &&
+          typeof value == 'number' && isNaN(value)) {
+        return mirror;
+      }
+    }
+  }
+  
+  if (IS_UNDEFINED(value)) {
+    mirror = new UndefinedMirror();
+  } else if (IS_NULL(value)) {
+    mirror = new NullMirror();
+  } else if (IS_BOOLEAN(value)) {
+    mirror = new BooleanMirror(value);
+  } else if (IS_NUMBER(value)) {
+    mirror = new NumberMirror(value);
+  } else if (IS_STRING(value)) {
+    mirror = new StringMirror(value);
+  } else if (IS_ARRAY(value)) {
+    mirror = new ArrayMirror(value);
+  } else if (IS_DATE(value)) {
+    mirror = new DateMirror(value);
+  } else if (IS_FUNCTION(value)) {
+    mirror = new FunctionMirror(value);
+  } else if (IS_REGEXP(value)) {
+    mirror = new RegExpMirror(value);
+  } else if (IS_ERROR(value)) {
+    mirror = new ErrorMirror(value);
+  } else if (IS_SCRIPT(value)) {
+    mirror = new ScriptMirror(value);
+  } else {
+    mirror = new ObjectMirror(value, OBJECT_TYPE, opt_transient);
+  }
+
+  mirror_cache_[mirror.handle()] = mirror;
+  return mirror;
+}
+
+
+/**
+ * Returns the mirror for a specified mirror handle.
+ *
+ * @param {number} handle the handle to find the mirror for
+ * @returns {Mirror or undefiend} the mirror with the requested handle or
+ *     undefined if no mirror with the requested handle was found
+ */
+function LookupMirror(handle) {
+  return mirror_cache_[handle];
+}
+
+  
+/**
+ * Returns the mirror for the undefined value.
+ *
+ * @returns {Mirror} the mirror reflects the undefined value
+ */
+function GetUndefinedMirror() {
+  return MakeMirror(void 0);
+}
+
+
+/**
+ * Inherit the prototype methods from one constructor into another.
+ *
+ * The Function.prototype.inherits from lang.js rewritten as a standalone
+ * function (not on Function.prototype). NOTE: If this file is to be loaded
+ * during bootstrapping this function needs to be revritten using some native
+ * functions as prototype setup using normal JavaScript does not work as
+ * expected during bootstrapping (see mirror.js in r114903).
+ *
+ * @param {function} ctor Constructor function which needs to inherit the
+ *     prototype
+ * @param {function} superCtor Constructor function to inherit prototype from
+ */
+function inherits(ctor, superCtor) {
+  var tempCtor = function(){};
+  tempCtor.prototype = superCtor.prototype;
+  ctor.super_ = superCtor.prototype;
+  ctor.prototype = new tempCtor();
+  ctor.prototype.constructor = ctor;
+}
+
+
+// Type names of the different mirrors.
+const UNDEFINED_TYPE = 'undefined';
+const NULL_TYPE = 'null';
+const BOOLEAN_TYPE = 'boolean';
+const NUMBER_TYPE = 'number';
+const STRING_TYPE = 'string';
+const OBJECT_TYPE = 'object';
+const FUNCTION_TYPE = 'function';
+const REGEXP_TYPE = 'regexp';
+const ERROR_TYPE = 'error';
+const PROPERTY_TYPE = 'property';
+const FRAME_TYPE = 'frame';
+const SCRIPT_TYPE = 'script';
+const CONTEXT_TYPE = 'context';
+const SCOPE_TYPE = 'scope';
+
+// Maximum length when sending strings through the JSON protocol.
+const kMaxProtocolStringLength = 80;
+
+// Different kind of properties.
+PropertyKind = {};
+PropertyKind.Named   = 1;
+PropertyKind.Indexed = 2;
+
+
+// A copy of the PropertyType enum from global.h
+PropertyType = {};
+PropertyType.Normal             = 0;
+PropertyType.Field              = 1;
+PropertyType.ConstantFunction   = 2;
+PropertyType.Callbacks          = 3;
+PropertyType.Interceptor        = 4;
+PropertyType.MapTransition      = 5;
+PropertyType.ConstantTransition = 6;
+PropertyType.NullDescriptor     = 7;
+
+
+// Different attributes for a property.
+PropertyAttribute = {};
+PropertyAttribute.None       = NONE;
+PropertyAttribute.ReadOnly   = READ_ONLY;
+PropertyAttribute.DontEnum   = DONT_ENUM;
+PropertyAttribute.DontDelete = DONT_DELETE;
+
+
+// A copy of the scope types from runtime.cc.
+ScopeType = { Global: 0,
+              Local: 1,
+              With: 2,
+              Closure: 3,
+              Catch: 4 };
+
+
+// Mirror hierarchy:
+//   - Mirror
+//     - ValueMirror
+//       - UndefinedMirror
+//       - NullMirror
+//       - NumberMirror
+//       - StringMirror
+//       - ObjectMirror
+//         - FunctionMirror
+//           - UnresolvedFunctionMirror
+//         - ArrayMirror
+//         - DateMirror
+//         - RegExpMirror
+//         - ErrorMirror
+//     - PropertyMirror
+//     - FrameMirror
+//     - ScriptMirror
+
+
+/**
+ * Base class for all mirror objects.
+ * @param {string} type The type of the mirror
+ * @constructor
+ */
+function Mirror(type) {
+  this.type_ = type;
+};
+
+
+Mirror.prototype.type = function() {
+  return this.type_;
+};
+
+
+/**
+ * Check whether the mirror reflects a value.
+ * @returns {boolean} True if the mirror reflects a value.
+ */
+Mirror.prototype.isValue = function() {
+  return this instanceof ValueMirror;
+}
+
+
+/**
+ * Check whether the mirror reflects the undefined value.
+ * @returns {boolean} True if the mirror reflects the undefined value.
+ */
+Mirror.prototype.isUndefined = function() {
+  return this instanceof UndefinedMirror;
+}
+
+
+/**
+ * Check whether the mirror reflects the null value.
+ * @returns {boolean} True if the mirror reflects the null value
+ */
+Mirror.prototype.isNull = function() {
+  return this instanceof NullMirror;
+}
+
+
+/**
+ * Check whether the mirror reflects a boolean value.
+ * @returns {boolean} True if the mirror reflects a boolean value
+ */
+Mirror.prototype.isBoolean = function() {
+  return this instanceof BooleanMirror;
+}
+
+
+/**
+ * Check whether the mirror reflects a number value.
+ * @returns {boolean} True if the mirror reflects a number value
+ */
+Mirror.prototype.isNumber = function() {
+  return this instanceof NumberMirror;
+}
+
+
+/**
+ * Check whether the mirror reflects a string value.
+ * @returns {boolean} True if the mirror reflects a string value
+ */
+Mirror.prototype.isString = function() {
+  return this instanceof StringMirror;
+}
+
+
+/**
+ * Check whether the mirror reflects an object.
+ * @returns {boolean} True if the mirror reflects an object
+ */
+Mirror.prototype.isObject = function() {
+  return this instanceof ObjectMirror;
+}
+
+
+/**
+ * Check whether the mirror reflects a function.
+ * @returns {boolean} True if the mirror reflects a function
+ */
+Mirror.prototype.isFunction = function() {
+  return this instanceof FunctionMirror;
+}
+
+
+/**
+ * Check whether the mirror reflects an unresolved function.
+ * @returns {boolean} True if the mirror reflects an unresolved function
+ */
+Mirror.prototype.isUnresolvedFunction = function() {
+  return this instanceof UnresolvedFunctionMirror;
+}
+
+
+/**
+ * Check whether the mirror reflects an array.
+ * @returns {boolean} True if the mirror reflects an array
+ */
+Mirror.prototype.isArray = function() {
+  return this instanceof ArrayMirror;
+}
+
+
+/**
+ * Check whether the mirror reflects a date.
+ * @returns {boolean} True if the mirror reflects a date
+ */
+Mirror.prototype.isDate = function() {
+  return this instanceof DateMirror;
+}
+
+
+/**
+ * Check whether the mirror reflects a regular expression.
+ * @returns {boolean} True if the mirror reflects a regular expression
+ */
+Mirror.prototype.isRegExp = function() {
+  return this instanceof RegExpMirror;
+}
+
+
+/**
+ * Check whether the mirror reflects an error.
+ * @returns {boolean} True if the mirror reflects an error
+ */
+Mirror.prototype.isError = function() {
+  return this instanceof ErrorMirror;
+}
+
+
+/**
+ * Check whether the mirror reflects a property.
+ * @returns {boolean} True if the mirror reflects a property
+ */
+Mirror.prototype.isProperty = function() {
+  return this instanceof PropertyMirror;
+}
+
+
+/**
+ * Check whether the mirror reflects a stack frame.
+ * @returns {boolean} True if the mirror reflects a stack frame
+ */
+Mirror.prototype.isFrame = function() {
+  return this instanceof FrameMirror;
+}
+
+
+/**
+ * Check whether the mirror reflects a script.
+ * @returns {boolean} True if the mirror reflects a script
+ */
+Mirror.prototype.isScript = function() {
+  return this instanceof ScriptMirror;
+}
+
+
+/**
+ * Check whether the mirror reflects a context.
+ * @returns {boolean} True if the mirror reflects a context
+ */
+Mirror.prototype.isContext = function() {
+  return this instanceof ContextMirror;
+}
+
+
+/**
+ * Check whether the mirror reflects a scope.
+ * @returns {boolean} True if the mirror reflects a scope
+ */
+Mirror.prototype.isScope = function() {
+  return this instanceof ScopeMirror;
+}
+
+
+/**
+ * Allocate a handle id for this object.
+ */
+Mirror.prototype.allocateHandle_ = function() {
+  this.handle_ = next_handle_++;
+}
+
+
+/**
+ * Allocate a transient handle id for this object. Transient handles are
+ * negative.
+ */
+Mirror.prototype.allocateTransientHandle_ = function() {
+  this.handle_ = next_transient_handle_--;
+}
+
+
+Mirror.prototype.toText = function() {
+  // Simpel to text which is used when on specialization in subclass.
+  return "#<" + builtins.GetInstanceName(this.constructor.name) + ">";
+}
+
+
+/**
+ * Base class for all value mirror objects.
+ * @param {string} type The type of the mirror
+ * @param {value} value The value reflected by this mirror
+ * @param {boolean} transient indicate whether this object is transient with a
+ *    transient handle
+ * @constructor
+ * @extends Mirror
+ */
+function ValueMirror(type, value, transient) {
+  Mirror.call(this, type);
+  this.value_ = value;
+  if (!transient) {
+    this.allocateHandle_();
+  } else {
+    this.allocateTransientHandle_();
+  }
+}
+inherits(ValueMirror, Mirror);
+
+
+Mirror.prototype.handle = function() {
+  return this.handle_;
+};
+
+
+/**
+ * Check whether this is a primitive value.
+ * @return {boolean} True if the mirror reflects a primitive value
+ */
+ValueMirror.prototype.isPrimitive = function() {
+  var type = this.type();
+  return type === 'undefined' ||
+         type === 'null' ||
+         type === 'boolean' ||
+         type === 'number' ||
+         type === 'string';
+};
+
+
+/**
+ * Get the actual value reflected by this mirror.
+ * @return {value} The value reflected by this mirror
+ */
+ValueMirror.prototype.value = function() {
+  return this.value_;
+};
+
+
+/**
+ * Mirror object for Undefined.
+ * @constructor
+ * @extends ValueMirror
+ */
+function UndefinedMirror() {
+  ValueMirror.call(this, UNDEFINED_TYPE, void 0);
+}
+inherits(UndefinedMirror, ValueMirror);
+
+
+UndefinedMirror.prototype.toText = function() {
+  return 'undefined';
+}
+
+
+/**
+ * Mirror object for null.
+ * @constructor
+ * @extends ValueMirror
+ */
+function NullMirror() {
+  ValueMirror.call(this, NULL_TYPE, null);
+}
+inherits(NullMirror, ValueMirror);
+
+
+NullMirror.prototype.toText = function() {
+  return 'null';
+}
+
+
+/**
+ * Mirror object for boolean values.
+ * @param {boolean} value The boolean value reflected by this mirror
+ * @constructor
+ * @extends ValueMirror
+ */
+function BooleanMirror(value) {
+  ValueMirror.call(this, BOOLEAN_TYPE, value);
+}
+inherits(BooleanMirror, ValueMirror);
+
+
+BooleanMirror.prototype.toText = function() {
+  return this.value_ ? 'true' : 'false';
+}
+
+
+/**
+ * Mirror object for number values.
+ * @param {number} value The number value reflected by this mirror
+ * @constructor
+ * @extends ValueMirror
+ */
+function NumberMirror(value) {
+  ValueMirror.call(this, NUMBER_TYPE, value);
+}
+inherits(NumberMirror, ValueMirror);
+
+
+NumberMirror.prototype.toText = function() {
+  return %NumberToString(this.value_);
+}
+
+
+/**
+ * Mirror object for string values.
+ * @param {string} value The string value reflected by this mirror
+ * @constructor
+ * @extends ValueMirror
+ */
+function StringMirror(value) {
+  ValueMirror.call(this, STRING_TYPE, value);
+}
+inherits(StringMirror, ValueMirror);
+
+
+StringMirror.prototype.length = function() {
+  return this.value_.length;
+};
+
+
+StringMirror.prototype.toText = function() {
+  if (this.length() > kMaxProtocolStringLength) {
+    return this.value_.substring(0, kMaxProtocolStringLength) +
+           '... (length: ' + this.length() + ')';
+  } else {
+    return this.value_;
+  }
+}
+
+
+/**
+ * Mirror object for objects.
+ * @param {object} value The object reflected by this mirror
+ * @param {boolean} transient indicate whether this object is transient with a
+ *    transient handle
+ * @constructor
+ * @extends ValueMirror
+ */
+function ObjectMirror(value, type, transient) {
+  ValueMirror.call(this, type || OBJECT_TYPE, value, transient);
+}
+inherits(ObjectMirror, ValueMirror);
+
+
+ObjectMirror.prototype.className = function() {
+  return %_ClassOf(this.value_);
+};
+
+
+ObjectMirror.prototype.constructorFunction = function() {
+  return MakeMirror(%DebugGetProperty(this.value_, 'constructor'));
+};
+
+
+ObjectMirror.prototype.prototypeObject = function() {
+  return MakeMirror(%DebugGetProperty(this.value_, 'prototype'));
+};
+
+
+ObjectMirror.prototype.protoObject = function() {
+  return MakeMirror(%DebugGetPrototype(this.value_));
+};
+
+
+ObjectMirror.prototype.hasNamedInterceptor = function() {
+  // Get information on interceptors for this object.
+  var x = %GetInterceptorInfo(this.value_);
+  return (x & 2) != 0;
+};
+
+
+ObjectMirror.prototype.hasIndexedInterceptor = function() {
+  // Get information on interceptors for this object.
+  var x = %GetInterceptorInfo(this.value_);
+  return (x & 1) != 0;
+};
+
+
+/**
+ * Return the property names for this object.
+ * @param {number} kind Indicate whether named, indexed or both kinds of
+ *     properties are requested
+ * @param {number} limit Limit the number of names returend to the specified
+       value
+ * @return {Array} Property names for this object
+ */
+ObjectMirror.prototype.propertyNames = function(kind, limit) {
+  // Find kind and limit and allocate array for the result
+  kind = kind || PropertyKind.Named | PropertyKind.Indexed;
+
+  var propertyNames;
+  var elementNames;
+  var total = 0;
+  
+  // Find all the named properties.
+  if (kind & PropertyKind.Named) {
+    // Get the local property names.
+    propertyNames = %GetLocalPropertyNames(this.value_);
+    total += propertyNames.length;
+
+    // Get names for named interceptor properties if any.
+    if (this.hasNamedInterceptor() && (kind & PropertyKind.Named)) {
+      var namedInterceptorNames =
+          %GetNamedInterceptorPropertyNames(this.value_);
+      if (namedInterceptorNames) {
+        propertyNames = propertyNames.concat(namedInterceptorNames);
+        total += namedInterceptorNames.length;
+      }
+    }
+  }
+
+  // Find all the indexed properties.
+  if (kind & PropertyKind.Indexed) {
+    // Get the local element names.
+    elementNames = %GetLocalElementNames(this.value_);
+    total += elementNames.length;
+
+    // Get names for indexed interceptor properties.
+    if (this.hasIndexedInterceptor() && (kind & PropertyKind.Indexed)) {
+      var indexedInterceptorNames =
+          %GetIndexedInterceptorElementNames(this.value_);
+      if (indexedInterceptorNames) {
+        elementNames = elementNames.concat(indexedInterceptorNames);
+        total += indexedInterceptorNames.length;
+      }
+    }
+  }
+  limit = Math.min(limit || total, total);
+
+  var names = new Array(limit);
+  var index = 0;
+
+  // Copy names for named properties.
+  if (kind & PropertyKind.Named) {
+    for (var i = 0; index < limit && i < propertyNames.length; i++) {
+      names[index++] = propertyNames[i];
+    }
+  }
+
+  // Copy names for indexed properties.
+  if (kind & PropertyKind.Indexed) {
+    for (var i = 0; index < limit && i < elementNames.length; i++) {
+      names[index++] = elementNames[i];
+    }
+  }
+
+  return names;
+};
+
+
+/**
+ * Return the properties for this object as an array of PropertyMirror objects.
+ * @param {number} kind Indicate whether named, indexed or both kinds of
+ *     properties are requested
+ * @param {number} limit Limit the number of properties returend to the
+       specified value
+ * @return {Array} Property mirrors for this object
+ */
+ObjectMirror.prototype.properties = function(kind, limit) {
+  var names = this.propertyNames(kind, limit);
+  var properties = new Array(names.length);
+  for (var i = 0; i < names.length; i++) {
+    properties[i] = this.property(names[i]);
+  }
+
+  return properties;
+};
+
+
+ObjectMirror.prototype.property = function(name) {
+  var details = %DebugGetPropertyDetails(this.value_, %ToString(name));
+  if (details) {
+    return new PropertyMirror(this, name, details);
+  }
+
+  // Nothing found.
+  return GetUndefinedMirror();
+};
+
+
+
+/**
+ * Try to find a property from its value.
+ * @param {Mirror} value The property value to look for
+ * @return {PropertyMirror} The property with the specified value. If no
+ *     property was found with the specified value UndefinedMirror is returned
+ */
+ObjectMirror.prototype.lookupProperty = function(value) {
+  var properties = this.properties();
+
+  // Look for property value in properties.
+  for (var i = 0; i < properties.length; i++) {
+
+    // Skip properties which are defined through assessors.
+    var property = properties[i];
+    if (property.propertyType() != PropertyType.Callbacks) {
+      if (%_ObjectEquals(property.value_, value.value_)) {
+        return property;
+      }
+    }
+  }
+
+  // Nothing found.
+  return GetUndefinedMirror();
+};
+
+
+/**
+ * Returns objects which has direct references to this object
+ * @param {number} opt_max_objects Optional parameter specifying the maximum
+ *     number of referencing objects to return.
+ * @return {Array} The objects which has direct references to this object.
+ */
+ObjectMirror.prototype.referencedBy = function(opt_max_objects) {
+  // Find all objects with direct references to this object.
+  var result = %DebugReferencedBy(this.value_,
+                                  Mirror.prototype, opt_max_objects || 0);
+
+  // Make mirrors for all the references found.
+  for (var i = 0; i < result.length; i++) {
+    result[i] = MakeMirror(result[i]);
+  }
+
+  return result;
+};
+
+
+ObjectMirror.prototype.toText = function() {
+  var name;
+  var ctor = this.constructorFunction();
+  if (!ctor.isFunction()) {
+    name = this.className();
+  } else {
+    name = ctor.name();
+    if (!name) {
+      name = this.className();
+    }
+  }
+  return '#<' + builtins.GetInstanceName(name) + '>';
+};
+
+
+/**
+ * Mirror object for functions.
+ * @param {function} value The function object reflected by this mirror.
+ * @constructor
+ * @extends ObjectMirror
+ */
+function FunctionMirror(value) {
+  ObjectMirror.call(this, value, FUNCTION_TYPE);
+  this.resolved_ = true;
+}
+inherits(FunctionMirror, ObjectMirror);
+
+
+/**
+ * Returns whether the function is resolved.
+ * @return {boolean} True if the function is resolved. Unresolved functions can
+ *     only originate as functions from stack frames
+ */
+FunctionMirror.prototype.resolved = function() {
+  return this.resolved_;
+};
+
+
+/**
+ * Returns the name of the function.
+ * @return {string} Name of the function
+ */
+FunctionMirror.prototype.name = function() {
+  return %FunctionGetName(this.value_);
+};
+
+
+/**
+ * Returns the inferred name of the function.
+ * @return {string} Name of the function
+ */
+FunctionMirror.prototype.inferredName = function() {
+  return %FunctionGetInferredName(this.value_);
+};
+
+
+/**
+ * Returns the source code for the function.
+ * @return {string or undefined} The source code for the function. If the
+ *     function is not resolved undefined will be returned.
+ */
+FunctionMirror.prototype.source = function() {
+  // Return source if function is resolved. Otherwise just fall through to
+  // return undefined.
+  if (this.resolved()) {
+    return builtins.FunctionSourceString(this.value_);
+  }
+};
+
+
+/**
+ * Returns the script object for the function.
+ * @return {ScriptMirror or undefined} Script object for the function or
+ *     undefined if the function has no script
+ */
+FunctionMirror.prototype.script = function() {
+  // Return script if function is resolved. Otherwise just fall through
+  // to return undefined.
+  if (this.resolved()) {
+    var script = %FunctionGetScript(this.value_);
+    if (script) {
+      return MakeMirror(script);
+    }
+  }
+};
+
+
+/**
+ * Returns the script source position for the function. Only makes sense
+ * for functions which has a script defined.
+ * @return {Number or undefined} in-script position for the function
+ */
+FunctionMirror.prototype.sourcePosition_ = function() {
+  // Return script if function is resolved. Otherwise just fall through
+  // to return undefined.
+  if (this.resolved()) {
+    return %FunctionGetScriptSourcePosition(this.value_);
+  }
+};
+
+
+/**
+ * Returns the script source location object for the function. Only makes sense
+ * for functions which has a script defined.
+ * @return {Location or undefined} in-script location for the function begin
+ */
+FunctionMirror.prototype.sourceLocation = function() {
+  if (this.resolved() && this.script()) {
+    return this.script().locationFromPosition(this.sourcePosition_(),
+                                              true);
+  }
+};
+
+
+/**
+ * Returns objects constructed by this function.
+ * @param {number} opt_max_instances Optional parameter specifying the maximum
+ *     number of instances to return.
+ * @return {Array or undefined} The objects constructed by this function.
+ */
+FunctionMirror.prototype.constructedBy = function(opt_max_instances) {
+  if (this.resolved()) {
+    // Find all objects constructed from this function.
+    var result = %DebugConstructedBy(this.value_, opt_max_instances || 0);
+
+    // Make mirrors for all the instances found.
+    for (var i = 0; i < result.length; i++) {
+      result[i] = MakeMirror(result[i]);
+    }
+
+    return result;
+  } else {
+    return [];
+  }
+};
+
+
+FunctionMirror.prototype.toText = function() {
+  return this.source();
+}
+
+
+/**
+ * Mirror object for unresolved functions.
+ * @param {string} value The name for the unresolved function reflected by this
+ *     mirror.
+ * @constructor
+ * @extends ObjectMirror
+ */
+function UnresolvedFunctionMirror(value) {
+  // Construct this using the ValueMirror as an unresolved function is not a
+  // real object but just a string.
+  ValueMirror.call(this, FUNCTION_TYPE, value);
+  this.propertyCount_ = 0;
+  this.elementCount_ = 0;
+  this.resolved_ = false;
+}
+inherits(UnresolvedFunctionMirror, FunctionMirror);
+
+
+UnresolvedFunctionMirror.prototype.className = function() {
+  return 'Function';
+};
+
+
+UnresolvedFunctionMirror.prototype.constructorFunction = function() {
+  return GetUndefinedMirror();
+};
+
+
+UnresolvedFunctionMirror.prototype.prototypeObject = function() {
+  return GetUndefinedMirror();
+};
+
+
+UnresolvedFunctionMirror.prototype.protoObject = function() {
+  return GetUndefinedMirror();
+};
+
+
+UnresolvedFunctionMirror.prototype.name = function() {
+  return this.value_;
+};
+
+
+UnresolvedFunctionMirror.prototype.inferredName = function() {
+  return undefined;
+};
+
+
+UnresolvedFunctionMirror.prototype.propertyNames = function(kind, limit) {
+  return [];
+}
+
+
+/**
+ * Mirror object for arrays.
+ * @param {Array} value The Array object reflected by this mirror
+ * @constructor
+ * @extends ObjectMirror
+ */
+function ArrayMirror(value) {
+  ObjectMirror.call(this, value);
+}
+inherits(ArrayMirror, ObjectMirror);
+
+
+ArrayMirror.prototype.length = function() {
+  return this.value_.length;
+};
+
+
+ArrayMirror.prototype.indexedPropertiesFromRange = function(opt_from_index, opt_to_index) {
+  var from_index = opt_from_index || 0;
+  var to_index = opt_to_index || this.length() - 1;
+  if (from_index > to_index) return new Array();
+  var values = new Array(to_index - from_index + 1);
+  for (var i = from_index; i <= to_index; i++) {
+    var details = %DebugGetPropertyDetails(this.value_, %ToString(i));
+    var value;
+    if (details) {
+      value = new PropertyMirror(this, i, details);
+    } else {
+      value = GetUndefinedMirror();
+    }
+    values[i - from_index] = value;
+  }
+  return values;
+}
+
+
+/**
+ * Mirror object for dates.
+ * @param {Date} value The Date object reflected by this mirror
+ * @constructor
+ * @extends ObjectMirror
+ */
+function DateMirror(value) {
+  ObjectMirror.call(this, value);
+}
+inherits(DateMirror, ObjectMirror);
+
+
+DateMirror.prototype.toText = function() {
+  var s = JSON.stringify(this.value_);
+  return s.substring(1, s.length - 1);  // cut quotes
+}
+
+
+/**
+ * Mirror object for regular expressions.
+ * @param {RegExp} value The RegExp object reflected by this mirror
+ * @constructor
+ * @extends ObjectMirror
+ */
+function RegExpMirror(value) {
+  ObjectMirror.call(this, value, REGEXP_TYPE);
+}
+inherits(RegExpMirror, ObjectMirror);
+
+
+/**
+ * Returns the source to the regular expression.
+ * @return {string or undefined} The source to the regular expression
+ */
+RegExpMirror.prototype.source = function() {
+  return this.value_.source;
+};
+
+
+/**
+ * Returns whether this regular expression has the global (g) flag set.
+ * @return {boolean} Value of the global flag
+ */
+RegExpMirror.prototype.global = function() {
+  return this.value_.global;
+};
+
+
+/**
+ * Returns whether this regular expression has the ignore case (i) flag set.
+ * @return {boolean} Value of the ignore case flag
+ */
+RegExpMirror.prototype.ignoreCase = function() {
+  return this.value_.ignoreCase;
+};
+
+
+/**
+ * Returns whether this regular expression has the multiline (m) flag set.
+ * @return {boolean} Value of the multiline flag
+ */
+RegExpMirror.prototype.multiline = function() {
+  return this.value_.multiline;
+};
+
+
+RegExpMirror.prototype.toText = function() {
+  // Simpel to text which is used when on specialization in subclass.
+  return "/" + this.source() + "/";
+}
+
+
+/**
+ * Mirror object for error objects.
+ * @param {Error} value The error object reflected by this mirror
+ * @constructor
+ * @extends ObjectMirror
+ */
+function ErrorMirror(value) {
+  ObjectMirror.call(this, value, ERROR_TYPE);
+}
+inherits(ErrorMirror, ObjectMirror);
+
+
+/**
+ * Returns the message for this eror object.
+ * @return {string or undefined} The message for this eror object
+ */
+ErrorMirror.prototype.message = function() {
+  return this.value_.message;
+};
+
+
+ErrorMirror.prototype.toText = function() {
+  // Use the same text representation as in messages.js.
+  var text;
+  try {
+    str = builtins.ToDetailString(this.value_);
+  } catch (e) {
+    str = '#<an Error>';
+  }
+  return str;
+}
+
+
+/**
+ * Base mirror object for properties.
+ * @param {ObjectMirror} mirror The mirror object having this property
+ * @param {string} name The name of the property
+ * @param {Array} details Details about the property
+ * @constructor
+ * @extends Mirror
+ */
+function PropertyMirror(mirror, name, details) {
+  Mirror.call(this, PROPERTY_TYPE);
+  this.mirror_ = mirror;
+  this.name_ = name;
+  this.value_ = details[0];
+  this.details_ = details[1];
+  if (details.length > 2) {
+    this.exception_ = details[2]
+    this.getter_ = details[3];
+    this.setter_ = details[4];
+  }
+}
+inherits(PropertyMirror, Mirror);
+
+
+PropertyMirror.prototype.isReadOnly = function() {
+  return (this.attributes() & PropertyAttribute.ReadOnly) != 0;
+}
+
+
+PropertyMirror.prototype.isEnum = function() {
+  return (this.attributes() & PropertyAttribute.DontEnum) == 0;
+}
+
+
+PropertyMirror.prototype.canDelete = function() {
+  return (this.attributes() & PropertyAttribute.DontDelete) == 0;
+}
+
+
+PropertyMirror.prototype.name = function() {
+  return this.name_;
+}
+
+
+PropertyMirror.prototype.isIndexed = function() {
+  for (var i = 0; i < this.name_.length; i++) {
+    if (this.name_[i] < '0' || '9' < this.name_[i]) {
+      return false;
+    }
+  }
+  return true;
+}
+
+
+PropertyMirror.prototype.value = function() {
+  return MakeMirror(this.value_, false);
+}
+
+
+/**
+ * Returns whether this property value is an exception.
+ * @return {booolean} True if this property value is an exception
+ */
+PropertyMirror.prototype.isException = function() {
+  return this.exception_ ? true : false;
+}
+
+
+PropertyMirror.prototype.attributes = function() {
+  return %DebugPropertyAttributesFromDetails(this.details_);
+}
+
+
+PropertyMirror.prototype.propertyType = function() {
+  return %DebugPropertyTypeFromDetails(this.details_);
+}
+
+
+PropertyMirror.prototype.insertionIndex = function() {
+  return %DebugPropertyIndexFromDetails(this.details_);
+}
+
+
+/**
+ * Returns whether this property has a getter defined through __defineGetter__.
+ * @return {booolean} True if this property has a getter
+ */
+PropertyMirror.prototype.hasGetter = function() {
+  return this.getter_ ? true : false;
+}
+
+
+/**
+ * Returns whether this property has a setter defined through __defineSetter__.
+ * @return {booolean} True if this property has a setter
+ */
+PropertyMirror.prototype.hasSetter = function() {
+  return this.setter_ ? true : false;
+}
+
+
+/**
+ * Returns the getter for this property defined through __defineGetter__.
+ * @return {Mirror} FunctionMirror reflecting the getter function or
+ *     UndefinedMirror if there is no getter for this property
+ */
+PropertyMirror.prototype.getter = function() {
+  if (this.hasGetter()) {
+    return MakeMirror(this.getter_);
+  } else {
+    return GetUndefinedMirror();
+  }
+}
+
+
+/**
+ * Returns the setter for this property defined through __defineSetter__.
+ * @return {Mirror} FunctionMirror reflecting the setter function or
+ *     UndefinedMirror if there is no setter for this property
+ */
+PropertyMirror.prototype.setter = function() {
+  if (this.hasSetter()) {
+    return MakeMirror(this.setter_);
+  } else {
+    return GetUndefinedMirror();
+  }
+}
+
+
+/**
+ * Returns whether this property is natively implemented by the host or a set
+ * through JavaScript code.
+ * @return {boolean} True if the property is 
+ *     UndefinedMirror if there is no setter for this property
+ */
+PropertyMirror.prototype.isNative = function() {
+  return (this.propertyType() == PropertyType.Interceptor) ||
+         ((this.propertyType() == PropertyType.Callbacks) &&
+          !this.hasGetter() && !this.hasSetter());
+}
+
+
+const kFrameDetailsFrameIdIndex = 0;
+const kFrameDetailsReceiverIndex = 1;
+const kFrameDetailsFunctionIndex = 2;
+const kFrameDetailsArgumentCountIndex = 3;
+const kFrameDetailsLocalCountIndex = 4;
+const kFrameDetailsSourcePositionIndex = 5;
+const kFrameDetailsConstructCallIndex = 6;
+const kFrameDetailsDebuggerFrameIndex = 7;
+const kFrameDetailsFirstDynamicIndex = 8;
+
+const kFrameDetailsNameIndex = 0;
+const kFrameDetailsValueIndex = 1;
+const kFrameDetailsNameValueSize = 2;
+
+/**
+ * Wrapper for the frame details information retreived from the VM. The frame
+ * details from the VM is an array with the following content. See runtime.cc
+ * Runtime_GetFrameDetails.
+ *     0: Id
+ *     1: Receiver
+ *     2: Function
+ *     3: Argument count
+ *     4: Local count
+ *     5: Source position
+ *     6: Construct call
+ *     Arguments name, value
+ *     Locals name, value
+ * @param {number} break_id Current break id
+ * @param {number} index Frame number
+ * @constructor
+ */
+function FrameDetails(break_id, index) {
+  this.break_id_ = break_id;
+  this.details_ = %GetFrameDetails(break_id, index);
+}
+
+
+FrameDetails.prototype.frameId = function() {
+  %CheckExecutionState(this.break_id_);
+  return this.details_[kFrameDetailsFrameIdIndex];
+}
+
+
+FrameDetails.prototype.receiver = function() {
+  %CheckExecutionState(this.break_id_);
+  return this.details_[kFrameDetailsReceiverIndex];
+}
+
+
+FrameDetails.prototype.func = function() {
+  %CheckExecutionState(this.break_id_);
+  return this.details_[kFrameDetailsFunctionIndex];
+}
+
+
+FrameDetails.prototype.isConstructCall = function() {
+  %CheckExecutionState(this.break_id_);
+  return this.details_[kFrameDetailsConstructCallIndex];
+}
+
+
+FrameDetails.prototype.isDebuggerFrame = function() {
+  %CheckExecutionState(this.break_id_);
+  return this.details_[kFrameDetailsDebuggerFrameIndex];
+}
+
+
+FrameDetails.prototype.argumentCount = function() {
+  %CheckExecutionState(this.break_id_);
+  return this.details_[kFrameDetailsArgumentCountIndex];
+}
+
+
+FrameDetails.prototype.argumentName = function(index) {
+  %CheckExecutionState(this.break_id_);
+  if (index >= 0 && index < this.argumentCount()) {
+    return this.details_[kFrameDetailsFirstDynamicIndex +
+                         index * kFrameDetailsNameValueSize +
+                         kFrameDetailsNameIndex]
+  }
+}
+
+
+FrameDetails.prototype.argumentValue = function(index) {
+  %CheckExecutionState(this.break_id_);
+  if (index >= 0 && index < this.argumentCount()) {
+    return this.details_[kFrameDetailsFirstDynamicIndex +
+                         index * kFrameDetailsNameValueSize +
+                         kFrameDetailsValueIndex]
+  }
+}
+
+
+FrameDetails.prototype.localCount = function() {
+  %CheckExecutionState(this.break_id_);
+  return this.details_[kFrameDetailsLocalCountIndex];
+}
+
+
+FrameDetails.prototype.sourcePosition = function() {
+  %CheckExecutionState(this.break_id_);
+  return this.details_[kFrameDetailsSourcePositionIndex];
+}
+
+
+FrameDetails.prototype.localName = function(index) {
+  %CheckExecutionState(this.break_id_);
+  if (index >= 0 && index < this.localCount()) {
+    var locals_offset = kFrameDetailsFirstDynamicIndex + this.argumentCount() * kFrameDetailsNameValueSize
+    return this.details_[locals_offset +
+                         index * kFrameDetailsNameValueSize +
+                         kFrameDetailsNameIndex]
+  }
+}
+
+
+FrameDetails.prototype.localValue = function(index) {
+  %CheckExecutionState(this.break_id_);
+  if (index >= 0 && index < this.localCount()) {
+    var locals_offset = kFrameDetailsFirstDynamicIndex + this.argumentCount() * kFrameDetailsNameValueSize
+    return this.details_[locals_offset +
+                         index * kFrameDetailsNameValueSize +
+                         kFrameDetailsValueIndex]
+  }
+}
+
+
+FrameDetails.prototype.scopeCount = function() {
+  return %GetScopeCount(this.break_id_, this.frameId());
+}
+
+
+/**
+ * Mirror object for stack frames.
+ * @param {number} break_id The break id in the VM for which this frame is
+       valid
+ * @param {number} index The frame index (top frame is index 0)
+ * @constructor
+ * @extends Mirror
+ */
+function FrameMirror(break_id, index) {
+  Mirror.call(this, FRAME_TYPE);
+  this.break_id_ = break_id;
+  this.index_ = index;
+  this.details_ = new FrameDetails(break_id, index);
+}
+inherits(FrameMirror, Mirror);
+
+
+FrameMirror.prototype.index = function() {
+  return this.index_;
+};
+
+
+FrameMirror.prototype.func = function() {
+  // Get the function for this frame from the VM.
+  var f = this.details_.func();
+  
+  // Create a function mirror. NOTE: MakeMirror cannot be used here as the
+  // value returned from the VM might be a string if the function for the
+  // frame is unresolved.
+  if (IS_FUNCTION(f)) {
+    return MakeMirror(f);
+  } else {
+    return new UnresolvedFunctionMirror(f);
+  }
+};
+
+
+FrameMirror.prototype.receiver = function() {
+  return MakeMirror(this.details_.receiver());
+};
+
+
+FrameMirror.prototype.isConstructCall = function() {
+  return this.details_.isConstructCall();
+};
+
+
+FrameMirror.prototype.isDebuggerFrame = function() {
+  return this.details_.isDebuggerFrame();
+};
+
+
+FrameMirror.prototype.argumentCount = function() {
+  return this.details_.argumentCount();
+};
+
+
+FrameMirror.prototype.argumentName = function(index) {
+  return this.details_.argumentName(index);
+};
+
+
+FrameMirror.prototype.argumentValue = function(index) {
+  return MakeMirror(this.details_.argumentValue(index));
+};
+
+
+FrameMirror.prototype.localCount = function() {
+  return this.details_.localCount();
+};
+
+
+FrameMirror.prototype.localName = function(index) {
+  return this.details_.localName(index);
+};
+
+
+FrameMirror.prototype.localValue = function(index) {
+  return MakeMirror(this.details_.localValue(index));
+};
+
+
+FrameMirror.prototype.sourcePosition = function() {
+  return this.details_.sourcePosition();
+};
+
+
+FrameMirror.prototype.sourceLocation = function() {
+  if (this.func().resolved() && this.func().script()) {
+    return this.func().script().locationFromPosition(this.sourcePosition(),
+                                                     true);
+  }
+};
+
+
+FrameMirror.prototype.sourceLine = function() {
+  if (this.func().resolved()) {
+    var location = this.sourceLocation();
+    if (location) {
+      return location.line;
+    }
+  }
+};
+
+
+FrameMirror.prototype.sourceColumn = function() {
+  if (this.func().resolved()) {
+    var location = this.sourceLocation();
+    if (location) {
+      return location.column;
+    }
+  }
+};
+
+
+FrameMirror.prototype.sourceLineText = function() {
+  if (this.func().resolved()) {
+    var location = this.sourceLocation();
+    if (location) {
+      return location.sourceText();
+    }
+  }
+};
+
+
+FrameMirror.prototype.scopeCount = function() {
+  return this.details_.scopeCount();
+};
+
+
+FrameMirror.prototype.scope = function(index) {
+  return new ScopeMirror(this, index);
+};
+
+
+FrameMirror.prototype.evaluate = function(source, disable_break) {
+  var result = %DebugEvaluate(this.break_id_, this.details_.frameId(),
+                              source, Boolean(disable_break));
+  return MakeMirror(result);
+};
+
+
+FrameMirror.prototype.invocationText = function() {
+  // Format frame invoaction (receiver, function and arguments).
+  var result = '';
+  var func = this.func();
+  var receiver = this.receiver();
+  if (this.isConstructCall()) {
+    // For constructor frames display new followed by the function name.
+    result += 'new ';
+    result += func.name() ? func.name() : '[anonymous]';
+  } else if (this.isDebuggerFrame()) {
+    result += '[debugger]';
+  } else {
+    // If the receiver has a className which is 'global' don't display it.
+    var display_receiver = !receiver.className || receiver.className() != 'global';
+    if (display_receiver) {
+      result += receiver.toText();
+    }
+    // Try to find the function as a property in the receiver. Include the
+    // prototype chain in the lookup.
+    var property = GetUndefinedMirror();
+    if (!receiver.isUndefined()) {
+      for (var r = receiver; !r.isNull() && property.isUndefined(); r = r.protoObject()) {
+        property = r.lookupProperty(func);
+      }
+    }
+    if (!property.isUndefined()) {
+      // The function invoked was found on the receiver. Use the property name
+      // for the backtrace.
+      if (!property.isIndexed()) {
+        if (display_receiver) {
+          result += '.';
+        }
+        result += property.name();
+      } else {
+        result += '[';
+        result += property.name();
+        result += ']';
+      }
+      // Also known as - if the name in the function doesn't match the name
+      // under which it was looked up.
+      if (func.name() && func.name() != property.name()) {
+        result += '(aka ' + func.name() + ')';
+      }
+    } else {
+      // The function invoked was not found on the receiver. Use the function
+      // name if available for the backtrace.
+      if (display_receiver) {
+        result += '.';
+      }
+      result += func.name() ? func.name() : '[anonymous]';
+    }
+  }
+
+  // Render arguments for normal frames.
+  if (!this.isDebuggerFrame()) {
+    result += '(';
+    for (var i = 0; i < this.argumentCount(); i++) {
+      if (i != 0) result += ', ';
+      if (this.argumentName(i)) {
+        result += this.argumentName(i);
+        result += '=';
+      }
+      result += this.argumentValue(i).toText();
+    }
+    result += ')';
+  }
+
+  return result;
+}
+
+
+FrameMirror.prototype.sourceAndPositionText = function() {
+  // Format source and position.
+  var result = '';
+  var func = this.func();
+  if (func.resolved()) {
+    if (func.script()) {
+      if (func.script().name()) {
+        result += func.script().name();
+      } else {
+        result += '[unnamed]';
+      }
+      if (!this.isDebuggerFrame()) {
+        var location = this.sourceLocation();
+        result += ' line ';
+        result += !IS_UNDEFINED(location) ? (location.line + 1) : '?';
+        result += ' column ';
+        result += !IS_UNDEFINED(location) ? (location.column + 1) : '?';
+        if (!IS_UNDEFINED(this.sourcePosition())) {
+          result += ' (position ' + (this.sourcePosition() + 1) + ')';
+        }
+      }
+    } else {
+      result += '[no source]';
+    }
+  } else {
+    result += '[unresolved]';
+  }
+
+  return result;
+}
+
+
+FrameMirror.prototype.localsText = function() {
+  // Format local variables.
+  var result = '';
+  var locals_count = this.localCount()
+  if (locals_count > 0) {
+    for (var i = 0; i < locals_count; ++i) {
+      result += '      var ';
+      result += this.localName(i);
+      result += ' = ';
+      result += this.localValue(i).toText();
+      if (i < locals_count - 1) result += '\n';
+    }
+  }
+
+  return result;
+}
+
+
+FrameMirror.prototype.toText = function(opt_locals) {
+  var result = '';
+  result += '#' + (this.index() <= 9 ? '0' : '') + this.index();
+  result += ' ';
+  result += this.invocationText();
+  result += ' ';
+  result += this.sourceAndPositionText();
+  if (opt_locals) {
+    result += '\n';
+    result += this.localsText();
+  }
+  return result;
+}
+
+
+const kScopeDetailsTypeIndex = 0;
+const kScopeDetailsObjectIndex = 1;
+
+function ScopeDetails(frame, index) {
+  this.break_id_ = frame.break_id_;
+  this.details_ = %GetScopeDetails(frame.break_id_,
+                                   frame.details_.frameId(),
+                                   index);
+}
+
+
+ScopeDetails.prototype.type = function() {
+  %CheckExecutionState(this.break_id_);
+  return this.details_[kScopeDetailsTypeIndex];
+}
+
+
+ScopeDetails.prototype.object = function() {
+  %CheckExecutionState(this.break_id_);
+  return this.details_[kScopeDetailsObjectIndex];
+}
+
+
+/**
+ * Mirror object for scope.
+ * @param {FrameMirror} frame The frame this scope is a part of
+ * @param {number} index The scope index in the frame
+ * @constructor
+ * @extends Mirror
+ */
+function ScopeMirror(frame, index) {
+  Mirror.call(this, SCOPE_TYPE);
+  this.frame_index_ = frame.index_;
+  this.scope_index_ = index;
+  this.details_ = new ScopeDetails(frame, index);
+}
+inherits(ScopeMirror, Mirror);
+
+
+ScopeMirror.prototype.frameIndex = function() {
+  return this.frame_index_;
+};
+
+
+ScopeMirror.prototype.scopeIndex = function() {
+  return this.scope_index_;
+};
+
+
+ScopeMirror.prototype.scopeType = function() {
+  return this.details_.type();
+};
+
+
+ScopeMirror.prototype.scopeObject = function() {
+  // For local and closure scopes create a transient mirror as these objects are
+  // created on the fly materializing the local or closure scopes and
+  // therefore will not preserve identity.
+  var transient = this.scopeType() == ScopeType.Local ||
+                  this.scopeType() == ScopeType.Closure;
+  return MakeMirror(this.details_.object(), transient);
+};
+
+
+/**
+ * Mirror object for script source.
+ * @param {Script} script The script object
+ * @constructor
+ * @extends Mirror
+ */
+function ScriptMirror(script) {
+  Mirror.call(this, SCRIPT_TYPE);
+  this.script_ = script;
+  this.context_ = new ContextMirror(script.context_data);
+  this.allocateHandle_();
+}
+inherits(ScriptMirror, Mirror);
+
+
+ScriptMirror.prototype.value = function() {
+  return this.script_;
+};
+
+
+ScriptMirror.prototype.name = function() {
+  return this.script_.name;
+};
+
+
+ScriptMirror.prototype.id = function() {
+  return this.script_.id;
+};
+
+
+ScriptMirror.prototype.source = function() {
+  return this.script_.source;
+};
+
+
+ScriptMirror.prototype.lineOffset = function() {
+  return this.script_.line_offset;
+};
+
+
+ScriptMirror.prototype.columnOffset = function() {
+  return this.script_.column_offset;
+};
+
+
+ScriptMirror.prototype.data = function() {
+  return this.script_.data;
+};
+
+
+ScriptMirror.prototype.scriptType = function() {
+  return this.script_.type;
+};
+
+
+ScriptMirror.prototype.compilationType = function() {
+  return this.script_.compilation_type;
+};
+
+
+ScriptMirror.prototype.lineCount = function() {
+  return this.script_.lineCount();
+};
+
+
+ScriptMirror.prototype.locationFromPosition = function(
+    position, include_resource_offset) {
+  return this.script_.locationFromPosition(position, include_resource_offset);
+}
+
+
+ScriptMirror.prototype.sourceSlice = function (opt_from_line, opt_to_line) {
+  return this.script_.sourceSlice(opt_from_line, opt_to_line);
+}
+
+
+ScriptMirror.prototype.context = function() {
+  return this.context_;
+};
+
+
+ScriptMirror.prototype.evalFromScript = function() {
+  return MakeMirror(this.script_.eval_from_script);
+};
+
+
+ScriptMirror.prototype.evalFromFunctionName = function() {
+  return MakeMirror(this.script_.eval_from_function_name);
+};
+
+
+ScriptMirror.prototype.evalFromLocation = function() {
+  var eval_from_script = this.evalFromScript();
+  if (!eval_from_script.isUndefined()) {
+    var position = this.script_.eval_from_script_position;
+    return eval_from_script.locationFromPosition(position, true);
+  }
+};
+
+
+ScriptMirror.prototype.toText = function() {
+  var result = '';
+  result += this.name();
+  result += ' (lines: ';
+  if (this.lineOffset() > 0) {
+    result += this.lineOffset();
+    result += '-';
+    result += this.lineOffset() + this.lineCount() - 1;
+  } else {
+    result += this.lineCount();
+  }
+  result += ')';
+  return result;
+}
+
+
+/**
+ * Mirror object for context.
+ * @param {Object} data The context data
+ * @constructor
+ * @extends Mirror
+ */
+function ContextMirror(data) {
+  Mirror.call(this, CONTEXT_TYPE);
+  this.data_ = data;
+  this.allocateHandle_();
+}
+inherits(ContextMirror, Mirror);
+
+
+ContextMirror.prototype.data = function() {
+  return this.data_;
+};
+
+
+/**
+ * Returns a mirror serializer
+ *
+ * @param {boolean} details Set to true to include details
+ * @param {Object} options Options comtrolling the serialization
+ *     The following options can be set:
+ *       includeSource: include ths full source of scripts
+ * @returns {MirrorSerializer} mirror serializer
+ */
+function MakeMirrorSerializer(details, options) {
+  return new JSONProtocolSerializer(details, options);
+}
+
+
+/**
+ * Object for serializing a mirror objects and its direct references.
+ * @param {boolean} details Indicates whether to include details for the mirror
+ *     serialized
+ * @constructor
+ */
+function JSONProtocolSerializer(details, options) {
+  this.details_ = details;
+  this.options_ = options;
+  this.mirrors_ = [ ];
+}
+
+
+/**
+ * Returns a serialization of an object reference. The referenced object are
+ * added to the serialization state.
+ *
+ * @param {Mirror} mirror The mirror to serialize
+ * @returns {String} JSON serialization
+ */
+JSONProtocolSerializer.prototype.serializeReference = function(mirror) {
+  return this.serialize_(mirror, true, true);
+}
+
+
+/**
+ * Returns a serialization of an object value. The referenced objects are
+ * added to the serialization state.
+ *
+ * @param {Mirror} mirror The mirror to serialize
+ * @returns {String} JSON serialization
+ */
+JSONProtocolSerializer.prototype.serializeValue = function(mirror) {
+  var json = this.serialize_(mirror, false, true);
+  return json;
+}
+
+
+/**
+ * Returns a serialization of all the objects referenced.
+ *
+ * @param {Mirror} mirror The mirror to serialize.
+ * @returns {Array.<Object>} Array of the referenced objects converted to
+ *     protcol objects.
+ */
+JSONProtocolSerializer.prototype.serializeReferencedObjects = function() {
+  // Collect the protocol representation of the referenced objects in an array.
+  var content = [];
+  
+  // Get the number of referenced objects.
+  var count = this.mirrors_.length;
+  
+  for (var i = 0; i < count; i++) {
+    content.push(this.serialize_(this.mirrors_[i], false, false));
+  }
+
+  return content;
+}
+
+
+JSONProtocolSerializer.prototype.includeSource_ = function() {
+  return this.options_ && this.options_.includeSource;
+}
+
+
+JSONProtocolSerializer.prototype.inlineRefs_ = function() {
+  return this.options_ && this.options_.inlineRefs;
+}
+
+
+JSONProtocolSerializer.prototype.add_ = function(mirror) {
+  // If this mirror is already in the list just return.
+  for (var i = 0; i < this.mirrors_.length; i++) {
+    if (this.mirrors_[i] === mirror) {
+      return;
+    }
+  }
+  
+  // Add the mirror to the list of mirrors to be serialized.
+  this.mirrors_.push(mirror);
+}
+
+
+/**
+ * Formats mirror object to protocol reference object with some data that can
+ * be used to display the value in debugger.
+ * @param {Mirror} mirror Mirror to serialize.
+ * @return {Object} Protocol reference object.
+ */
+JSONProtocolSerializer.prototype.serializeReferenceWithDisplayData_ = 
+    function(mirror) {
+  var o = {};
+  o.ref = mirror.handle();
+  o.type = mirror.type();
+  switch (mirror.type()) {
+    case UNDEFINED_TYPE:
+    case NULL_TYPE:
+    case BOOLEAN_TYPE:
+    case NUMBER_TYPE:
+      o.value = mirror.value();
+      break;
+    case STRING_TYPE:
+      // Limit string length.
+      o.value = mirror.toText();
+      break;
+    case FUNCTION_TYPE:
+      o.name = mirror.name();
+      o.inferredName = mirror.inferredName();
+      if (mirror.script()) {
+        o.scriptId = mirror.script().id();
+      }
+      break;
+    case ERROR_TYPE:
+    case REGEXP_TYPE:
+      o.value = mirror.toText();
+      break;
+    case OBJECT_TYPE:
+      o.className = mirror.className();
+      break;
+  }
+  return o;
+};
+
+
+JSONProtocolSerializer.prototype.serialize_ = function(mirror, reference,
+                                                       details) {
+  // If serializing a reference to a mirror just return the reference and add
+  // the mirror to the referenced mirrors.
+  if (reference &&
+      (mirror.isValue() || mirror.isScript() || mirror.isContext())) {
+    if (this.inlineRefs_() && mirror.isValue()) {
+      return this.serializeReferenceWithDisplayData_(mirror);
+    } else {
+      this.add_(mirror);
+      return {'ref' : mirror.handle()};
+    }
+  }
+  
+  // Collect the JSON property/value pairs.
+  var content = {};
+
+  // Add the mirror handle.
+  if (mirror.isValue() || mirror.isScript() || mirror.isContext()) {
+    content.handle = mirror.handle();
+  }
+
+  // Always add the type.
+  content.type = mirror.type();
+
+  switch (mirror.type()) {
+    case UNDEFINED_TYPE:
+    case NULL_TYPE:
+      // Undefined and null are represented just by their type.
+      break;
+
+    case BOOLEAN_TYPE:
+      // Boolean values are simply represented by their value.
+      content.value = mirror.value();
+      break;
+
+    case NUMBER_TYPE:
+      // Number values are simply represented by their value.
+      content.value = NumberToJSON_(mirror.value());
+      break;
+
+    case STRING_TYPE:
+      // String values might have their value cropped to keep down size.
+      if (mirror.length() > kMaxProtocolStringLength) {
+        var str = mirror.value().substring(0, kMaxProtocolStringLength);
+        content.value = str;
+        content.fromIndex = 0;
+        content.toIndex = kMaxProtocolStringLength;
+      } else {
+        content.value = mirror.value();
+      }
+      content.length = mirror.length();
+      break;
+
+    case OBJECT_TYPE:
+    case FUNCTION_TYPE:
+    case ERROR_TYPE:
+    case REGEXP_TYPE:
+      // Add object representation.
+      this.serializeObject_(mirror, content, details);
+      break;
+
+    case PROPERTY_TYPE:
+      throw new Error('PropertyMirror cannot be serialized independeltly')
+      break;
+
+    case FRAME_TYPE:
+      // Add object representation.
+      this.serializeFrame_(mirror, content);
+      break;
+
+    case SCOPE_TYPE:
+      // Add object representation.
+      this.serializeScope_(mirror, content);
+      break;
+
+    case SCRIPT_TYPE:
+      // Script is represented by id, name and source attributes.
+      if (mirror.name()) {
+        content.name = mirror.name();
+      }
+      content.id = mirror.id();
+      content.lineOffset = mirror.lineOffset();
+      content.columnOffset = mirror.columnOffset();
+      content.lineCount = mirror.lineCount();
+      if (mirror.data()) {
+        content.data = mirror.data();
+      }
+      if (this.includeSource_()) {
+        content.source = mirror.source();
+      } else {
+        var sourceStart = mirror.source().substring(0, 80);
+        content.sourceStart = sourceStart;
+      }
+      content.sourceLength = mirror.source().length;
+      content.scriptType = mirror.scriptType();
+      content.compilationType = mirror.compilationType();
+      // For compilation type eval emit information on the script from which
+      // eval was called if a script is present.
+      if (mirror.compilationType() == 1 &&
+          mirror.evalFromScript()) {
+        content.evalFromScript =
+            this.serializeReference(mirror.evalFromScript());
+        var evalFromLocation = mirror.evalFromLocation()
+        if (evalFromLocation) {
+          content.evalFromLocation = { line: evalFromLocation.line,
+                                       column: evalFromLocation.column };
+        }
+        if (mirror.evalFromFunctionName()) {
+          content.evalFromFunctionName = mirror.evalFromFunctionName();
+        }
+      }
+      if (mirror.context()) {
+        content.context = this.serializeReference(mirror.context());
+      }
+      break;
+
+    case CONTEXT_TYPE:
+      content.data = mirror.data();
+      break;
+  }
+
+  // Always add the text representation.
+  content.text = mirror.toText();
+  
+  // Create and return the JSON string.
+  return content;
+}
+
+
+/**
+ * Serialize object information to the following JSON format.
+ *
+ *   {"className":"<class name>",
+ *    "constructorFunction":{"ref":<number>},
+ *    "protoObject":{"ref":<number>},
+ *    "prototypeObject":{"ref":<number>},
+ *    "namedInterceptor":<boolean>,
+ *    "indexedInterceptor":<boolean>,
+ *    "properties":[<properties>]}
+ */
+JSONProtocolSerializer.prototype.serializeObject_ = function(mirror, content,
+                                                             details) {
+  // Add general object properties.
+  content.className = mirror.className();
+  content.constructorFunction =
+      this.serializeReference(mirror.constructorFunction());
+  content.protoObject = this.serializeReference(mirror.protoObject());
+  content.prototypeObject = this.serializeReference(mirror.prototypeObject());
+
+  // Add flags to indicate whether there are interceptors.
+  if (mirror.hasNamedInterceptor()) {
+    content.namedInterceptor = true;
+  }
+  if (mirror.hasIndexedInterceptor()) {
+    content.indexedInterceptor = true;
+  }
+  
+  // Add function specific properties.
+  if (mirror.isFunction()) {
+    // Add function specific properties.
+    content.name = mirror.name();
+    if (!IS_UNDEFINED(mirror.inferredName())) {
+      content.inferredName = mirror.inferredName();
+    }
+    content.resolved = mirror.resolved();
+    if (mirror.resolved()) {
+      content.source = mirror.source();
+    }
+    if (mirror.script()) {
+      content.script = this.serializeReference(mirror.script());
+      content.scriptId = mirror.script().id();
+      
+      serializeLocationFields(mirror.sourceLocation(), content);
+    }
+  }
+
+  // Add date specific properties.
+  if (mirror.isDate()) {
+    // Add date specific properties.
+    content.value = mirror.value();
+  }
+
+  // Add actual properties - named properties followed by indexed properties.
+  var propertyNames = mirror.propertyNames(PropertyKind.Named);
+  var propertyIndexes = mirror.propertyNames(PropertyKind.Indexed);
+  var p = new Array(propertyNames.length + propertyIndexes.length);
+  for (var i = 0; i < propertyNames.length; i++) {
+    var propertyMirror = mirror.property(propertyNames[i]);
+    p[i] = this.serializeProperty_(propertyMirror);
+    if (details) {
+      this.add_(propertyMirror.value());
+    }
+  }
+  for (var i = 0; i < propertyIndexes.length; i++) {
+    var propertyMirror = mirror.property(propertyIndexes[i]);
+    p[propertyNames.length + i] = this.serializeProperty_(propertyMirror);
+    if (details) {
+      this.add_(propertyMirror.value());
+    }
+  }
+  content.properties = p;
+}
+
+
+/**
+ * Serialize location information to the following JSON format:
+ *
+ *   "position":"<position>",
+ *   "line":"<line>",
+ *   "column":"<column>",
+ * 
+ * @param {SourceLocation} location The location to serialize, may be undefined.
+ */
+function serializeLocationFields (location, content) {
+  if (!location) {
+    return;
+  }                                                                     
+  content.position = location.position;
+  var line = location.line;
+  if (!IS_UNDEFINED(line)) {
+    content.line = line;
+  }
+  var column = location.column;
+  if (!IS_UNDEFINED(column)) {
+    content.column = column;
+  }
+}
+
+
+/**
+ * Serialize property information to the following JSON format for building the
+ * array of properties.
+ *
+ *   {"name":"<property name>",
+ *    "attributes":<number>,
+ *    "propertyType":<number>,
+ *    "ref":<number>}
+ *
+ * If the attribute for the property is PropertyAttribute.None it is not added.
+ * If the propertyType for the property is PropertyType.Normal it is not added.
+ * Here are a couple of examples.
+ *
+ *   {"name":"hello","ref":1}
+ *   {"name":"length","attributes":7,"propertyType":3,"ref":2}
+ *
+ * @param {PropertyMirror} propertyMirror The property to serialize.
+ * @returns {Object} Protocol object representing the property.
+ */
+JSONProtocolSerializer.prototype.serializeProperty_ = function(propertyMirror) {
+  var result = {};
+  
+  result.name = propertyMirror.name();
+  var propertyValue = propertyMirror.value();
+  if (this.inlineRefs_() && propertyValue.isValue()) {
+    result.value = this.serializeReferenceWithDisplayData_(propertyValue);
+  } else {
+    if (propertyMirror.attributes() != PropertyAttribute.None) {
+      result.attributes = propertyMirror.attributes();
+    }
+    if (propertyMirror.propertyType() != PropertyType.Normal) {
+      result.propertyType = propertyMirror.propertyType();
+    }
+    result.ref = propertyValue.handle();
+  }
+  return result;
+}
+
+
+JSONProtocolSerializer.prototype.serializeFrame_ = function(mirror, content) {
+  content.index = mirror.index();
+  content.receiver = this.serializeReference(mirror.receiver());
+  var func = mirror.func();
+  content.func = this.serializeReference(func);
+  if (func.script()) {
+    content.script = this.serializeReference(func.script());
+  }
+  content.constructCall = mirror.isConstructCall();
+  content.debuggerFrame = mirror.isDebuggerFrame();
+  var x = new Array(mirror.argumentCount());
+  for (var i = 0; i < mirror.argumentCount(); i++) {
+    var arg = {};
+    var argument_name = mirror.argumentName(i)
+    if (argument_name) {
+      arg.name = argument_name;
+    }
+    arg.value = this.serializeReference(mirror.argumentValue(i));
+    x[i] = arg;
+  }
+  content.arguments = x;
+  var x = new Array(mirror.localCount());
+  for (var i = 0; i < mirror.localCount(); i++) {
+    var local = {};
+    local.name = mirror.localName(i);
+    local.value = this.serializeReference(mirror.localValue(i));
+    x[i] = local;
+  }
+  content.locals = x;
+  serializeLocationFields(mirror.sourceLocation(), content);
+  var source_line_text = mirror.sourceLineText();
+  if (!IS_UNDEFINED(source_line_text)) {
+    content.sourceLineText = source_line_text;
+  }
+  
+  content.scopes = [];
+  for (var i = 0; i < mirror.scopeCount(); i++) {
+    var scope = mirror.scope(i);
+    content.scopes.push({
+      type: scope.scopeType(),
+      index: i
+    });
+  }
+}
+
+
+JSONProtocolSerializer.prototype.serializeScope_ = function(mirror, content) {
+  content.index = mirror.scopeIndex();
+  content.frameIndex = mirror.frameIndex();
+  content.type = mirror.scopeType();
+  content.object = this.inlineRefs_() ?
+                   this.serializeValue(mirror.scopeObject()) :
+                   this.serializeReference(mirror.scopeObject());
+}
+
+
+/**
+ * Convert a number to a protocol value. For all finite numbers the number
+ * itself is returned. For non finite numbers NaN, Infinite and
+ * -Infinite the string representation "NaN", "Infinite" or "-Infinite"
+ * (not including the quotes) is returned.
+ *
+ * @param {number} value The number value to convert to a protocol value.
+ * @returns {number|string} Protocol value.
+ */
+function NumberToJSON_(value) {
+  if (isNaN(value)) {
+    return 'NaN';
+  }
+  if (!isFinite(value)) {
+    if (value > 0) {
+      return 'Infinity';
+    } else {
+      return '-Infinity';
+    }
+  }
+  return value; 
+}
diff --git a/src/mksnapshot.cc b/src/mksnapshot.cc
index 6457ae7..37cf263 100644
--- a/src/mksnapshot.cc
+++ b/src/mksnapshot.cc
@@ -35,6 +35,7 @@
 #include "natives.h"
 #include "platform.h"
 #include "serialize.h"
+#include "list.h"
 
 // use explicit namespace to avoid clashing with types in namespace v8
 namespace i = v8::internal;
@@ -96,7 +97,9 @@
 
 class CppByteSink : public i::SnapshotByteSink {
  public:
-  explicit CppByteSink(const char* snapshot_file) : bytes_written_(0) {
+  explicit CppByteSink(const char* snapshot_file)
+      : bytes_written_(0),
+        partial_sink_(this) {
     fp_ = i::OS::FOpen(snapshot_file, "wb");
     if (fp_ == NULL) {
       i::PrintF("Unable to write to snapshot file \"%s\"\n", snapshot_file);
@@ -111,11 +114,53 @@
   }
 
   virtual ~CppByteSink() {
-    if (fp_ != NULL) {
-      fprintf(fp_, "};\n\n");
-      fprintf(fp_, "int Snapshot::size_ = %d;\n\n", bytes_written_);
-      fprintf(fp_, "} }  // namespace v8::internal\n");
-      fclose(fp_);
+    fprintf(fp_, "const int Snapshot::size_ = %d;\n\n", bytes_written_);
+    fprintf(fp_, "} }  // namespace v8::internal\n");
+    fclose(fp_);
+  }
+
+  void WriteSpaceUsed(
+      int new_space_used,
+      int pointer_space_used,
+      int data_space_used,
+      int code_space_used,
+      int map_space_used,
+      int cell_space_used,
+      int large_space_used) {
+    fprintf(fp_, "};\n\n");
+    fprintf(fp_, "const int Snapshot::new_space_used_ = %d;\n", new_space_used);
+    fprintf(fp_,
+            "const int Snapshot::pointer_space_used_ = %d;\n",
+            pointer_space_used);
+    fprintf(fp_,
+            "const int Snapshot::data_space_used_ = %d;\n",
+            data_space_used);
+    fprintf(fp_,
+            "const int Snapshot::code_space_used_ = %d;\n",
+            code_space_used);
+    fprintf(fp_, "const int Snapshot::map_space_used_ = %d;\n", map_space_used);
+    fprintf(fp_,
+            "const int Snapshot::cell_space_used_ = %d;\n",
+            cell_space_used);
+    fprintf(fp_,
+            "const int Snapshot::large_space_used_ = %d;\n",
+            large_space_used);
+  }
+
+  void WritePartialSnapshot() {
+    int length = partial_sink_.Position();
+    fprintf(fp_, "};\n\n");
+    fprintf(fp_, "const int Snapshot::context_size_ = %d;\n",  length);
+    fprintf(fp_, "const byte Snapshot::context_data_[] = {\n");
+    for (int j = 0; j < length; j++) {
+      if ((j & 0x1f) == 0x1f) {
+        fprintf(fp_, "\n");
+      }
+      char byte = partial_sink_.at(j);
+      if (j != 0) {
+        fprintf(fp_, ",");
+      }
+      fprintf(fp_, "%d", byte);
     }
   }
 
@@ -125,7 +170,7 @@
     }
     fprintf(fp_, "%d", byte);
     bytes_written_++;
-    if ((bytes_written_ & 0x3f) == 0) {
+    if ((bytes_written_ & 0x1f) == 0) {
       fprintf(fp_, "\n");
     }
   }
@@ -134,13 +179,33 @@
     return bytes_written_;
   }
 
+  i::SnapshotByteSink* partial_sink() { return &partial_sink_; }
+
+  class PartialSnapshotSink : public i::SnapshotByteSink {
+   public:
+    explicit PartialSnapshotSink(CppByteSink* parent)
+        : parent_(parent),
+          data_() { }
+    virtual ~PartialSnapshotSink() { data_.Free(); }
+    virtual void Put(int byte, const char* description) {
+      data_.Add(byte);
+    }
+    virtual int Position() { return data_.length(); }
+    char at(int i) { return data_[i]; }
+   private:
+    CppByteSink* parent_;
+    i::List<char> data_;
+  };
+
  private:
   FILE* fp_;
   int bytes_written_;
+  PartialSnapshotSink partial_sink_;
 };
 
 
 int main(int argc, char** argv) {
+
 #ifdef ENABLE_LOGGING_AND_PROFILING
   // By default, log code create information in the snapshot.
   i::FLAG_log_code = true;
@@ -162,12 +227,31 @@
       i::Bootstrapper::NativesSourceLookup(i);
     }
   }
+  // If we don't do this then we end up with a stray root pointing at the
+  // context even after we have disposed of the context.
+  i::Heap::CollectAllGarbage(true);
+  i::Object* raw_context = *(v8::Utils::OpenHandle(*context));
   context.Dispose();
   CppByteSink sink(argv[1]);
   // This results in a somewhat smaller snapshot, probably because it gets rid
   // of some things that are cached between garbage collections.
-  i::Heap::CollectAllGarbage(true);
   i::StartupSerializer ser(&sink);
-  ser.Serialize();
+  ser.SerializeStrongReferences();
+
+  i::PartialSerializer partial_ser(&ser, sink.partial_sink());
+  partial_ser.Serialize(&raw_context);
+
+  ser.SerializeWeakReferences();
+
+  sink.WritePartialSnapshot();
+
+  sink.WriteSpaceUsed(
+      partial_ser.CurrentAllocationAddress(i::NEW_SPACE),
+      partial_ser.CurrentAllocationAddress(i::OLD_POINTER_SPACE),
+      partial_ser.CurrentAllocationAddress(i::OLD_DATA_SPACE),
+      partial_ser.CurrentAllocationAddress(i::CODE_SPACE),
+      partial_ser.CurrentAllocationAddress(i::MAP_SPACE),
+      partial_ser.CurrentAllocationAddress(i::CELL_SPACE),
+      partial_ser.CurrentAllocationAddress(i::LO_SPACE));
   return 0;
 }
diff --git a/src/natives.h b/src/natives.h
index fdfd213..639a2d3 100644
--- a/src/natives.h
+++ b/src/natives.h
@@ -44,13 +44,13 @@
  public:
   // Number of built-in scripts.
   static int GetBuiltinsCount();
-  // Number of delayed/lazy loading scripts.
-  static int GetDelayCount();
+  // Number of debugger implementation scripts.
+  static int GetDebuggerCount();
 
-  // These are used to access built-in scripts.
-  // The delayed script has an index in the interval [0, GetDelayCount()).
-  // The non-delayed script has an index in the interval
-  // [GetDelayCount(), GetNativesCount()).
+  // These are used to access built-in scripts.  The debugger implementation
+  // scripts have an index in the interval [0, GetDebuggerCount()).  The
+  // non-debugger scripts have an index in the interval [GetDebuggerCount(),
+  // GetNativesCount()).
   static int GetIndex(const char* name);
   static Vector<const char> GetScriptSource(int index);
   static Vector<const char> GetScriptName(int index);
diff --git a/src/objects-debug.cc b/src/objects-debug.cc
index 7e77e81..ded213b 100644
--- a/src/objects-debug.cc
+++ b/src/objects-debug.cc
@@ -615,9 +615,6 @@
   if (is_undetectable()) {
     PrintF(" - undetectable\n");
   }
-  if (needs_loading()) {
-    PrintF(" - needs_loading\n");
-  }
   if (has_instance_call_handler()) {
     PrintF(" - instance_call_handler\n");
   }
diff --git a/src/objects-inl.h b/src/objects-inl.h
index 4355fe9..cc971f3 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -2448,11 +2448,6 @@
 }
 
 
-bool JSObject::IsLoaded() {
-  return !map()->needs_loading();
-}
-
-
 Code* JSFunction::code() {
   return shared()->code();
 }
diff --git a/src/objects.cc b/src/objects.cc
index 6dd1d49..a8328ac 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -339,55 +339,6 @@
 }
 
 
-Object* JSObject::GetLazyProperty(Object* receiver,
-                                  LookupResult* result,
-                                  String* name,
-                                  PropertyAttributes* attributes) {
-  HandleScope scope;
-  Handle<Object> this_handle(this);
-  Handle<Object> receiver_handle(receiver);
-  Handle<String> name_handle(name);
-  bool pending_exception;
-  LoadLazy(Handle<JSObject>(JSObject::cast(result->GetLazyValue())),
-           &pending_exception);
-  if (pending_exception) return Failure::Exception();
-  return this_handle->GetPropertyWithReceiver(*receiver_handle,
-                                              *name_handle,
-                                              attributes);
-}
-
-
-Object* JSObject::SetLazyProperty(LookupResult* result,
-                                  String* name,
-                                  Object* value,
-                                  PropertyAttributes attributes) {
-  ASSERT(!IsJSGlobalProxy());
-  HandleScope scope;
-  Handle<JSObject> this_handle(this);
-  Handle<String> name_handle(name);
-  Handle<Object> value_handle(value);
-  bool pending_exception;
-  LoadLazy(Handle<JSObject>(JSObject::cast(result->GetLazyValue())),
-           &pending_exception);
-  if (pending_exception) return Failure::Exception();
-  return this_handle->SetProperty(*name_handle, *value_handle, attributes);
-}
-
-
-Object* JSObject::DeleteLazyProperty(LookupResult* result,
-                                     String* name,
-                                     DeleteMode mode) {
-  HandleScope scope;
-  Handle<JSObject> this_handle(this);
-  Handle<String> name_handle(name);
-  bool pending_exception;
-  LoadLazy(Handle<JSObject>(JSObject::cast(result->GetLazyValue())),
-           &pending_exception);
-  if (pending_exception) return Failure::Exception();
-  return this_handle->DeleteProperty(*name_handle, mode);
-}
-
-
 Object* JSObject::GetNormalizedProperty(LookupResult* result) {
   ASSERT(!HasFastProperties());
   Object* value = property_dictionary()->ValueAt(result->GetDictionaryEntry());
@@ -531,12 +482,6 @@
     return Heap::undefined_value();
   }
   *attributes = result->GetAttributes();
-  if (!result->IsLoaded()) {
-    return JSObject::cast(this)->GetLazyProperty(receiver,
-                                                 result,
-                                                 name,
-                                                 attributes);
-  }
   Object* value;
   JSObject* holder = result->holder();
   switch (result->type()) {
@@ -1779,7 +1724,6 @@
           return;
         }
         value = JSGlobalPropertyCell::cast(value)->value();
-        ASSERT(result->IsLoaded());
       }
       // Make sure to disallow caching for uninitialized constants
       // found in the dictionary-mode objects.
@@ -1913,9 +1857,6 @@
   if (result->IsNotFound()) {
     return AddProperty(name, value, attributes);
   }
-  if (!result->IsLoaded()) {
-    return SetLazyProperty(result, name, value, attributes);
-  }
   if (result->IsReadOnly() && result->IsProperty()) return value;
   // This is a real property that is not read-only, or it is a
   // transition or null descriptor and there are no setters in the prototypes.
@@ -1997,9 +1938,6 @@
   if (result->IsNotFound()) {
     return AddProperty(name, value, attributes);
   }
-  if (!result->IsLoaded()) {
-    return SetLazyProperty(result, name, value, attributes);
-  }
   // Check of IsReadOnly removed from here in clone.
   switch (result->type()) {
     case NORMAL:
@@ -2520,11 +2458,6 @@
       }
       return DeletePropertyWithInterceptor(name);
     }
-    if (!result.IsLoaded()) {
-      return JSObject::cast(this)->DeleteLazyProperty(&result,
-                                                      name,
-                                                      mode);
-    }
     // Normalize object if needed.
     Object* obj = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
     if (obj->IsFailure()) return obj;
diff --git a/src/objects.h b/src/objects.h
index f641196..4893666 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -34,6 +34,8 @@
 #include "unicode-inl.h"
 #if V8_TARGET_ARCH_ARM
 #include "arm/constants-arm.h"
+#elif V8_TARGET_ARCH_MIPS
+#include "mips/constants-mips.h"
 #endif
 
 //
@@ -1101,7 +1103,6 @@
 # define BIG_ENDIAN_FLOATING_POINT 1
 #endif
   static const int kSize = kValueOffset + kDoubleSize;
-
   static const uint32_t kSignMask = 0x80000000u;
   static const uint32_t kExponentMask = 0x7ff00000u;
   static const uint32_t kMantissaMask = 0xfffffu;
@@ -1213,12 +1214,6 @@
   // Deletes the named property in a normalized object.
   Object* DeleteNormalizedProperty(String* name, DeleteMode mode);
 
-  // Sets a property that currently has lazy loading.
-  Object* SetLazyProperty(LookupResult* result,
-                          String* name,
-                          Object* value,
-                          PropertyAttributes attributes);
-
   // Returns the class name ([[Class]] property in the specification).
   String* class_name();
 
@@ -1253,13 +1248,6 @@
   Object* GetLocalPropertyPostInterceptor(JSObject* receiver,
                                           String* name,
                                           PropertyAttributes* attributes);
-  Object* GetLazyProperty(Object* receiver,
-                          LookupResult* result,
-                          String* name,
-                          PropertyAttributes* attributes);
-
-  // Tells whether this object needs to be loaded.
-  inline bool IsLoaded();
 
   // Returns true if this is an instance of an api function and has
   // been modified since it was created.  May give false positives.
@@ -1297,9 +1285,6 @@
 
   Object* DeleteProperty(String* name, DeleteMode mode);
   Object* DeleteElement(uint32_t index, DeleteMode mode);
-  Object* DeleteLazyProperty(LookupResult* result,
-                             String* name,
-                             DeleteMode mode);
 
   // Tests for the fast common case for property enumeration.
   bool IsSimpleEnum();
@@ -2871,20 +2856,6 @@
     return ((1 << kIsUndetectable) & bit_field()) != 0;
   }
 
-  inline void set_needs_loading(bool value) {
-    if (value) {
-      set_bit_field2(bit_field2() | (1 << kNeedsLoading));
-    } else {
-      set_bit_field2(bit_field2() & ~(1 << kNeedsLoading));
-    }
-  }
-
-  // Does this object or function require a lazily loaded script to be
-  // run before being used?
-  inline bool needs_loading() {
-    return ((1 << kNeedsLoading) & bit_field2()) != 0;
-  }
-
   // Tells whether the instance has a call-as-function handler.
   inline void set_has_instance_call_handler() {
     set_bit_field(bit_field() | (1 << kHasInstanceCallHandler));
@@ -3018,8 +2989,7 @@
   static const int kIsAccessCheckNeeded = 7;
 
   // Bit positions for bit field 2
-  static const int kNeedsLoading = 0;
-  static const int kIsExtensible = 1;
+  static const int kIsExtensible = 0;
 
  private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(Map);
diff --git a/src/platform-linux.cc b/src/platform-linux.cc
index 247f43f..ef4ae17 100644
--- a/src/platform-linux.cc
+++ b/src/platform-linux.cc
@@ -89,6 +89,8 @@
   // Here gcc is telling us that we are on an ARM and gcc is assuming that we
   // have VFP3 instructions.  If gcc can assume it then so can we.
   return 1u << VFP3;
+#elif CAN_USE_ARMV7_INSTRUCTIONS
+  return 1u << ARMv7;
 #else
   return 0;  // Linux runs on anything.
 #endif
@@ -113,6 +115,9 @@
     case VFP3:
       search_string = "vfp";
       break;
+    case ARMv7:
+      search_string = "ARMv7";
+      break;
     default:
       UNREACHABLE();
   }
@@ -151,11 +156,12 @@
   // On EABI ARM targets this is required for fp correctness in the
   // runtime system.
   return 8;
-#else
+#elif V8_TARGET_ARCH_MIPS
+  return 8;
+#endif
   // With gcc 4.4 the tree vectorization optimiser can generate code
   // that requires 16 byte alignment such as movdqa on x86.
   return 16;
-#endif
 }
 
 
@@ -169,29 +175,11 @@
 
 
 double OS::LocalTimeOffset() {
-#if defined(ANDROID)
-  // Android does not have tm_gmtoff, so instead we'll work it out.
-  // Use a date in the local timezone representing 1st January 2010.
-  struct tm t;
-  t.tm_sec = 0;
-  t.tm_min = 0;
-  t.tm_hour = 0;
-  t.tm_mday = 1;
-  t.tm_mon = 0;
-  t.tm_year = 110;
-  t.tm_wday = 0;
-  t.tm_yday = 0;
-  t.tm_isdst = 0;
-  // 1262304000 is January, 1 2010 UTC.
-  time_t offset = 1262304000 - mktime(&t);
-  return static_cast<double>(offset * msPerSecond);
-#else
   time_t tv = time(NULL);
   struct tm* t = localtime(&tv);
   // tm_gmtoff includes any daylight savings offset, so subtract it.
   return static_cast<double>(t->tm_gmtoff * msPerSecond -
                              (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
-#endif
 }
 
 
@@ -280,6 +268,8 @@
 //  which is the architecture of generated code).
 #if defined(__arm__) || defined(__thumb__)
   asm("bkpt 0");
+#elif defined(__mips__)
+  asm("break");
 #else
   asm("int $3");
 #endif
@@ -731,8 +721,11 @@
 
 
 static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
+    return;
+/*#ifndef V8_HOST_ARCH_MIPS
   USE(info);
   if (signal != SIGPROF) return;
+  if (!IsVmThread()) return;
   if (active_sampler_ == NULL) return;
 
   TickSample sample;
@@ -761,6 +754,9 @@
     sample.sp = reinterpret_cast<Address>(mcontext.arm_sp);
     sample.fp = reinterpret_cast<Address>(mcontext.arm_fp);
 #endif
+#elif V8_HOST_ARCH_MIPS
+    // Implement this on MIPS.
+    UNIMPLEMENTED();
 #endif
     if (IsVmThread())
       active_sampler_->SampleStack(&sample);
@@ -770,6 +766,7 @@
   sample.state = Logger::state();
 
   active_sampler_->Tick(&sample);
+#endif*/
 }
 
 
@@ -808,7 +805,7 @@
   sa.sa_sigaction = ProfilerSignalHandler;
   sigemptyset(&sa.sa_mask);
   sa.sa_flags = SA_SIGINFO;
-  if (sigaction(SIGPROF, &sa, &data_->old_signal_handler_) != 0) return;
+  if (sigaction(SIGALRM, &sa, &data_->old_signal_handler_) != 0) return;
   data_->signal_handler_installed_ = true;
 
   // Set the itimer to generate a tick for each interval.
@@ -817,7 +814,7 @@
   itimer.it_interval.tv_usec = (interval_ % 1000) * 1000;
   itimer.it_value.tv_sec = itimer.it_interval.tv_sec;
   itimer.it_value.tv_usec = itimer.it_interval.tv_usec;
-  setitimer(ITIMER_PROF, &itimer, &data_->old_timer_value_);
+  setitimer(ITIMER_REAL, &itimer, &data_->old_timer_value_);
 
   // Set this sampler as the active sampler.
   active_sampler_ = this;
diff --git a/src/platform-win32.cc b/src/platform-win32.cc
index 81b0d4c..7ea3ce9 100644
--- a/src/platform-win32.cc
+++ b/src/platform-win32.cc
@@ -511,7 +511,7 @@
 // takes into account whether daylight saving is in effect at the time.
 // Only times in the 32-bit Unix range may be passed to this function.
 // Also, adding the time-zone offset to the input must not overflow.
-// The function EquivalentTime() in date-delay.js guarantees this.
+// The function EquivalentTime() in date.js guarantees this.
 int64_t Time::LocalOffset() {
   // Initialize timezone information, if needed.
   TzSet();
diff --git a/src/property.h b/src/property.h
index 1869719..b993af1 100644
--- a/src/property.h
+++ b/src/property.h
@@ -242,15 +242,6 @@
   bool IsCacheable() { return cacheable_; }
   void DisallowCaching() { cacheable_ = false; }
 
-  // Tells whether the value needs to be loaded.
-  bool IsLoaded() {
-    if (lookup_type_ == DESCRIPTOR_TYPE || lookup_type_ == DICTIONARY_TYPE) {
-      Object* target = GetLazyValue();
-      return !target->IsJSObject() || JSObject::cast(target)->IsLoaded();
-    }
-    return true;
-  }
-
   Object* GetLazyValue() {
     switch (type()) {
       case FIELD:
diff --git a/src/regexp.js b/src/regexp.js
new file mode 100644
index 0000000..7bec455
--- /dev/null
+++ b/src/regexp.js
@@ -0,0 +1,406 @@
+// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Expect $Object = global.Object;
+// Expect $Array = global.Array;
+
+const $RegExp = global.RegExp;
+
+// A recursive descent parser for Patterns according to the grammar of
+// ECMA-262 15.10.1, with deviations noted below.
+function DoConstructRegExp(object, pattern, flags, isConstructorCall) {
+  // RegExp : Called as constructor; see ECMA-262, section 15.10.4.
+  if (IS_REGEXP(pattern)) {
+    if (!IS_UNDEFINED(flags)) {
+      throw MakeTypeError('regexp_flags', []);
+    }
+    flags = (pattern.global ? 'g' : '')
+        + (pattern.ignoreCase ? 'i' : '')
+        + (pattern.multiline ? 'm' : '');
+    pattern = pattern.source;
+  }
+
+  pattern = IS_UNDEFINED(pattern) ? '' : ToString(pattern);
+  flags = IS_UNDEFINED(flags) ? '' : ToString(flags);
+
+  var global = false;
+  var ignoreCase = false;
+  var multiline = false;
+
+  for (var i = 0; i < flags.length; i++) {
+    var c = StringCharAt.call(flags, i);
+    switch (c) {
+      case 'g':
+        // Allow duplicate flags to be consistent with JSC and others.
+        global = true;
+        break;
+      case 'i':
+        ignoreCase = true;
+        break;
+      case 'm':
+        multiline = true;
+        break;
+      default:
+        // Ignore flags that have no meaning to be consistent with
+        // JSC.
+        break;
+    }
+  }
+
+  if (isConstructorCall) {
+    // ECMA-262, section 15.10.7.1.
+    %SetProperty(object, 'source', pattern,
+                 DONT_DELETE |  READ_ONLY | DONT_ENUM);
+
+    // ECMA-262, section 15.10.7.2.
+    %SetProperty(object, 'global', global, DONT_DELETE | READ_ONLY | DONT_ENUM);
+
+    // ECMA-262, section 15.10.7.3.
+    %SetProperty(object, 'ignoreCase', ignoreCase,
+                 DONT_DELETE | READ_ONLY | DONT_ENUM);
+
+    // ECMA-262, section 15.10.7.4.
+    %SetProperty(object, 'multiline', multiline,
+                 DONT_DELETE | READ_ONLY | DONT_ENUM);
+
+    // ECMA-262, section 15.10.7.5.
+    %SetProperty(object, 'lastIndex', 0, DONT_DELETE | DONT_ENUM);
+  } else { // RegExp is being recompiled via RegExp.prototype.compile.
+    %IgnoreAttributesAndSetProperty(object, 'source', pattern);
+    %IgnoreAttributesAndSetProperty(object, 'global', global);
+    %IgnoreAttributesAndSetProperty(object, 'ignoreCase', ignoreCase);
+    %IgnoreAttributesAndSetProperty(object, 'multiline', multiline);
+    %IgnoreAttributesAndSetProperty(object, 'lastIndex', 0);
+  }
+
+  // Call internal function to compile the pattern.
+  %RegExpCompile(object, pattern, flags);
+}
+
+
+function RegExpConstructor(pattern, flags) {
+  if (%_IsConstructCall()) {
+    DoConstructRegExp(this, pattern, flags, true);
+  } else {
+    // RegExp : Called as function; see ECMA-262, section 15.10.3.1.
+    if (IS_REGEXP(pattern) && IS_UNDEFINED(flags)) {
+      return pattern;
+    }
+    return new $RegExp(pattern, flags);
+  }
+}
+
+
+// Deprecated RegExp.prototype.compile method.  We behave like the constructor
+// were called again.  In SpiderMonkey, this method returns the regexp object.
+// In JSC, it returns undefined.  For compatibility with JSC, we match their
+// behavior.
+function CompileRegExp(pattern, flags) {
+  // Both JSC and SpiderMonkey treat a missing pattern argument as the
+  // empty subject string, and an actual undefined value passed as the
+  // pattern as the string 'undefined'.  Note that JSC is inconsistent
+  // here, treating undefined values differently in
+  // RegExp.prototype.compile and in the constructor, where they are
+  // the empty string.  For compatibility with JSC, we match their
+  // behavior.
+  if (IS_UNDEFINED(pattern) && %_ArgumentsLength() != 0) {
+    DoConstructRegExp(this, 'undefined', flags, false);
+  } else {
+    DoConstructRegExp(this, pattern, flags, false);
+  }
+}
+
+
+function DoRegExpExec(regexp, string, index) {
+  return %_RegExpExec(regexp, string, index, lastMatchInfo);
+}
+
+
+function RegExpExec(string) {
+  if (!IS_REGEXP(this)) {
+    throw MakeTypeError('method_called_on_incompatible',
+                        ['RegExp.prototype.exec', this]);
+  }
+  if (%_ArgumentsLength() == 0) {
+    var regExpInput = LAST_INPUT(lastMatchInfo);
+    if (IS_UNDEFINED(regExpInput)) {
+      throw MakeError('no_input_to_regexp', [this]);
+    }
+    string = regExpInput;
+  }
+  var s = ToString(string);
+  var length = s.length;
+  var lastIndex = this.lastIndex;
+  var i = this.global ? TO_INTEGER(lastIndex) : 0;
+
+  if (i < 0 || i > s.length) {
+    this.lastIndex = 0;
+    return null;
+  }
+
+  %_Log('regexp', 'regexp-exec,%0r,%1S,%2i', [this, s, lastIndex]);
+  // matchIndices is either null or the lastMatchInfo array.
+  var matchIndices = %_RegExpExec(this, s, i, lastMatchInfo);
+
+  if (matchIndices == null) {
+    if (this.global) this.lastIndex = 0;
+    return matchIndices; // no match
+  }
+
+  var numResults = NUMBER_OF_CAPTURES(lastMatchInfo) >> 1;
+  var result = new $Array(numResults);
+  for (var i = 0; i < numResults; i++) {
+    var matchStart = lastMatchInfo[CAPTURE(i << 1)];
+    var matchEnd = lastMatchInfo[CAPTURE((i << 1) + 1)];
+    if (matchStart != -1 && matchEnd != -1) {
+      result[i] = SubString(s, matchStart, matchEnd);
+    } else {
+      // Make sure the element is present. Avoid reading the undefined
+      // property from the global object since this may change.
+      result[i] = void 0;
+    }
+  }
+
+  if (this.global)
+    this.lastIndex = lastMatchInfo[CAPTURE1];
+  result.index = lastMatchInfo[CAPTURE0];
+  result.input = s;
+  return result;
+}
+
+
+// Section 15.10.6.3 doesn't actually make sense, but the intention seems to be
+// that test is defined in terms of String.prototype.exec. However, it probably
+// means the original value of String.prototype.exec, which is what everybody
+// else implements.
+function RegExpTest(string) {
+  if (!IS_REGEXP(this)) {
+    throw MakeTypeError('method_called_on_incompatible',
+                        ['RegExp.prototype.test', this]);
+  }
+  if (%_ArgumentsLength() == 0) {
+    var regExpInput = LAST_INPUT(lastMatchInfo);
+    if (IS_UNDEFINED(regExpInput)) {
+      throw MakeError('no_input_to_regexp', [this]);
+    }
+    string = regExpInput;
+  }
+  var s = ToString(string);
+  var length = s.length;
+  var lastIndex = this.lastIndex;
+  var i = this.global ? TO_INTEGER(lastIndex) : 0;
+
+  if (i < 0 || i > s.length) {
+    this.lastIndex = 0;
+    return false;
+  }
+
+  %_Log('regexp', 'regexp-exec,%0r,%1S,%2i', [this, s, lastIndex]);
+  // matchIndices is either null or the lastMatchInfo array.
+  var matchIndices = %_RegExpExec(this, s, i, lastMatchInfo);
+
+  if (matchIndices == null) {
+    if (this.global) this.lastIndex = 0;
+    return false;
+  }
+
+  if (this.global) this.lastIndex = lastMatchInfo[CAPTURE1];
+  return true;
+}
+
+
+function RegExpToString() {
+  // If this.source is an empty string, output /(?:)/.
+  // http://bugzilla.mozilla.org/show_bug.cgi?id=225550
+  // ecma_2/RegExp/properties-001.js.
+  var src = this.source ? this.source : '(?:)';
+  var result = '/' + src + '/';
+  if (this.global)
+    result += 'g';
+  if (this.ignoreCase)
+    result += 'i';
+  if (this.multiline)
+    result += 'm';
+  return result;
+}
+
+
+// Getters for the static properties lastMatch, lastParen, leftContext, and
+// rightContext of the RegExp constructor.  The properties are computed based
+// on the captures array of the last successful match and the subject string
+// of the last successful match.
+function RegExpGetLastMatch() {
+  var regExpSubject = LAST_SUBJECT(lastMatchInfo);
+  return SubString(regExpSubject,
+                   lastMatchInfo[CAPTURE0],
+                   lastMatchInfo[CAPTURE1]);
+}
+
+
+function RegExpGetLastParen() {
+  var length = NUMBER_OF_CAPTURES(lastMatchInfo);
+  if (length <= 2) return '';  // There were no captures.
+  // We match the SpiderMonkey behavior: return the substring defined by the
+  // last pair (after the first pair) of elements of the capture array even if
+  // it is empty.
+  var regExpSubject = LAST_SUBJECT(lastMatchInfo);
+  var start = lastMatchInfo[CAPTURE(length - 2)];
+  var end = lastMatchInfo[CAPTURE(length - 1)];
+  if (start != -1 && end != -1) {
+    return SubString(regExpSubject, start, end);
+  }
+  return "";
+}
+
+
+function RegExpGetLeftContext() {
+  return SubString(LAST_SUBJECT(lastMatchInfo),
+                   0,
+                   lastMatchInfo[CAPTURE0]);
+}
+
+
+function RegExpGetRightContext() {
+  var subject = LAST_SUBJECT(lastMatchInfo);
+  return SubString(subject,
+                   lastMatchInfo[CAPTURE1],
+                   subject.length);
+}
+
+
+// The properties $1..$9 are the first nine capturing substrings of the last
+// successful match, or ''.  The function RegExpMakeCaptureGetter will be
+// called with indices from 1 to 9.
+function RegExpMakeCaptureGetter(n) {
+  return function() {
+    var index = n * 2;
+    if (index >= NUMBER_OF_CAPTURES(lastMatchInfo)) return '';
+    var matchStart = lastMatchInfo[CAPTURE(index)];
+    var matchEnd = lastMatchInfo[CAPTURE(index + 1)];
+    if (matchStart == -1 || matchEnd == -1) return '';
+    return SubString(LAST_SUBJECT(lastMatchInfo), matchStart, matchEnd);
+  };
+}
+
+
+// Property of the builtins object for recording the result of the last
+// regexp match.  The property lastMatchInfo includes the matchIndices
+// array of the last successful regexp match (an array of start/end index
+// pairs for the match and all the captured substrings), the invariant is
+// that there are at least two capture indeces.  The array also contains
+// the subject string for the last successful match.
+var lastMatchInfo = [
+    2,                 // REGEXP_NUMBER_OF_CAPTURES
+    "",                // Last subject.
+    void 0,            // Last input - settable with RegExpSetInput.
+    0,                 // REGEXP_FIRST_CAPTURE + 0
+    0,                 // REGEXP_FIRST_CAPTURE + 1
+];
+
+// -------------------------------------------------------------------
+
+function SetupRegExp() {
+  %FunctionSetInstanceClassName($RegExp, 'RegExp');
+  %FunctionSetPrototype($RegExp, new $Object());
+  %SetProperty($RegExp.prototype, 'constructor', $RegExp, DONT_ENUM);
+  %SetCode($RegExp, RegExpConstructor);
+
+  InstallFunctions($RegExp.prototype, DONT_ENUM, $Array(
+    "exec", RegExpExec,
+    "test", RegExpTest,
+    "toString", RegExpToString,
+    "compile", CompileRegExp
+  ));
+
+  // The length of compile is 1 in SpiderMonkey.
+  %FunctionSetLength($RegExp.prototype.compile, 1);
+
+  // The properties input, $input, and $_ are aliases for each other.  When this
+  // value is set the value it is set to is coerced to a string. 
+  // Getter and setter for the input.
+  function RegExpGetInput() {
+    var regExpInput = LAST_INPUT(lastMatchInfo);
+    return IS_UNDEFINED(regExpInput) ? "" : regExpInput;
+  }
+  function RegExpSetInput(string) {
+    LAST_INPUT(lastMatchInfo) = ToString(string);
+  };
+
+  %DefineAccessor($RegExp, 'input', GETTER, RegExpGetInput, DONT_DELETE);
+  %DefineAccessor($RegExp, 'input', SETTER, RegExpSetInput, DONT_DELETE);
+  %DefineAccessor($RegExp, '$_', GETTER, RegExpGetInput, DONT_ENUM | DONT_DELETE);
+  %DefineAccessor($RegExp, '$_', SETTER, RegExpSetInput, DONT_ENUM | DONT_DELETE);
+  %DefineAccessor($RegExp, '$input', GETTER, RegExpGetInput, DONT_ENUM | DONT_DELETE);
+  %DefineAccessor($RegExp, '$input', SETTER, RegExpSetInput, DONT_ENUM | DONT_DELETE);
+
+  // The properties multiline and $* are aliases for each other.  When this
+  // value is set in SpiderMonkey, the value it is set to is coerced to a
+  // boolean.  We mimic that behavior with a slight difference: in SpiderMonkey
+  // the value of the expression 'RegExp.multiline = null' (for instance) is the
+  // boolean false (ie, the value after coercion), while in V8 it is the value
+  // null (ie, the value before coercion).
+
+  // Getter and setter for multiline.
+  var multiline = false;
+  function RegExpGetMultiline() { return multiline; };
+  function RegExpSetMultiline(flag) { multiline = flag ? true : false; };
+
+  %DefineAccessor($RegExp, 'multiline', GETTER, RegExpGetMultiline, DONT_DELETE);
+  %DefineAccessor($RegExp, 'multiline', SETTER, RegExpSetMultiline, DONT_DELETE);
+  %DefineAccessor($RegExp, '$*', GETTER, RegExpGetMultiline, DONT_ENUM | DONT_DELETE);
+  %DefineAccessor($RegExp, '$*', SETTER, RegExpSetMultiline, DONT_ENUM | DONT_DELETE);
+
+
+  function NoOpSetter(ignored) {}
+
+
+  // Static properties set by a successful match.
+  %DefineAccessor($RegExp, 'lastMatch', GETTER, RegExpGetLastMatch, DONT_DELETE);
+  %DefineAccessor($RegExp, 'lastMatch', SETTER, NoOpSetter, DONT_DELETE);
+  %DefineAccessor($RegExp, '$&', GETTER, RegExpGetLastMatch, DONT_ENUM | DONT_DELETE);
+  %DefineAccessor($RegExp, '$&', SETTER, NoOpSetter, DONT_ENUM | DONT_DELETE);
+  %DefineAccessor($RegExp, 'lastParen', GETTER, RegExpGetLastParen, DONT_DELETE);
+  %DefineAccessor($RegExp, 'lastParen', SETTER, NoOpSetter, DONT_DELETE);
+  %DefineAccessor($RegExp, '$+', GETTER, RegExpGetLastParen, DONT_ENUM | DONT_DELETE);
+  %DefineAccessor($RegExp, '$+', SETTER, NoOpSetter, DONT_ENUM | DONT_DELETE);
+  %DefineAccessor($RegExp, 'leftContext', GETTER, RegExpGetLeftContext, DONT_DELETE);
+  %DefineAccessor($RegExp, 'leftContext', SETTER, NoOpSetter, DONT_DELETE);
+  %DefineAccessor($RegExp, '$`', GETTER, RegExpGetLeftContext, DONT_ENUM | DONT_DELETE);
+  %DefineAccessor($RegExp, '$`', SETTER, NoOpSetter, DONT_ENUM | DONT_DELETE);
+  %DefineAccessor($RegExp, 'rightContext', GETTER, RegExpGetRightContext, DONT_DELETE);
+  %DefineAccessor($RegExp, 'rightContext', SETTER, NoOpSetter, DONT_DELETE);
+  %DefineAccessor($RegExp, "$'", GETTER, RegExpGetRightContext, DONT_ENUM | DONT_DELETE);
+  %DefineAccessor($RegExp, "$'", SETTER, NoOpSetter, DONT_ENUM | DONT_DELETE);
+
+  for (var i = 1; i < 10; ++i) {
+    %DefineAccessor($RegExp, '$' + i, GETTER, RegExpMakeCaptureGetter(i), DONT_DELETE);
+    %DefineAccessor($RegExp, '$' + i, SETTER, NoOpSetter, DONT_DELETE);
+  }
+}
+
+
+SetupRegExp();
diff --git a/src/register-allocator-inl.h b/src/register-allocator-inl.h
index 8fb498b..b3fa474 100644
--- a/src/register-allocator-inl.h
+++ b/src/register-allocator-inl.h
@@ -38,6 +38,8 @@
 #include "x64/register-allocator-x64-inl.h"
 #elif V8_TARGET_ARCH_ARM
 #include "arm/register-allocator-arm-inl.h"
+#elif V8_TARGET_ARCH_MIPS
+#include "mips/register-allocator-mips-inl.h"
 #else
 #error Unsupported target architecture.
 #endif
diff --git a/src/register-allocator.h b/src/register-allocator.h
index 1765633..0f46996 100644
--- a/src/register-allocator.h
+++ b/src/register-allocator.h
@@ -36,6 +36,8 @@
 #include "x64/register-allocator-x64.h"
 #elif V8_TARGET_ARCH_ARM
 #include "arm/register-allocator-arm.h"
+#elif V8_TARGET_ARCH_MIPS
+#include "mips/register-allocator-mips.h"
 #else
 #error Unsupported target architecture.
 #endif
diff --git a/src/runtime.cc b/src/runtime.cc
index 515343b..aa94ce5 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -596,8 +596,9 @@
 
   if (result.type() == CALLBACKS) {
     Object* structure = result.GetCallbackObject();
-    if (structure->IsProxy()) {
-      // Property that is internally implemented as a callback.
+    if (structure->IsProxy() || structure->IsAccessorInfo()) {
+      // Property that is internally implemented as a callback or
+      // an API defined callback.
       Object* value = obj->GetPropertyWithCallback(
           obj, structure, name, result.holder());
       elms->set(0, Heap::false_value());
@@ -609,7 +610,6 @@
       elms->set(1, FixedArray::cast(structure)->get(0));
       elms->set(2, FixedArray::cast(structure)->get(1));
     } else {
-      // TODO(ricow): Handle API callbacks.
       return Heap::undefined_value();
     }
   } else {
@@ -619,7 +619,7 @@
   }
 
   elms->set(3, Heap::ToBoolean(!result.IsDontEnum()));
-  elms->set(4, Heap::ToBoolean(!result.IsReadOnly()));
+  elms->set(4, Heap::ToBoolean(!result.IsDontDelete()));
   return *desc;
 }
 
@@ -2855,7 +2855,7 @@
       // Lookup cache miss.  Perform lookup and update the cache if appropriate.
       LookupResult result;
       receiver->LocalLookup(key, &result);
-      if (result.IsProperty() && result.IsLoaded() && result.type() == FIELD) {
+      if (result.IsProperty() && result.type() == FIELD) {
         int offset = result.GetFieldIndex();
         KeyedLookupCache::Update(receiver_map, key, offset);
         return receiver->FastPropertyAt(offset);
@@ -2888,6 +2888,65 @@
 }
 
 
+static Object* Runtime_DefineOrRedefineAccessorProperty(Arguments args) {
+  ASSERT(args.length() == 5);
+  HandleScope scope;
+  CONVERT_ARG_CHECKED(JSObject, obj, 0);
+  CONVERT_CHECKED(String, name, args[1]);
+  CONVERT_CHECKED(Smi, flag_setter, args[2]);
+  CONVERT_CHECKED(JSFunction, fun, args[3]);
+  CONVERT_CHECKED(Smi, flag_attr, args[4]);
+  int unchecked = flag_attr->value();
+  RUNTIME_ASSERT((unchecked & ~(READ_ONLY | DONT_ENUM | DONT_DELETE)) == 0);
+  RUNTIME_ASSERT(!obj->IsNull());
+  LookupResult result;
+  obj->LocalLookupRealNamedProperty(name, &result);
+
+  PropertyAttributes attr = static_cast<PropertyAttributes>(unchecked);
+  // If an existing property is either FIELD, NORMAL or CONSTANT_FUNCTION
+  // delete it to avoid running into trouble in DefineAccessor, which
+  // handles this incorrectly if the property is readonly (does nothing)
+  if (result.IsValid() &&
+      (result.type() == FIELD || result.type() == NORMAL
+       || result.type() == CONSTANT_FUNCTION)) {
+    obj->DeleteProperty(name, JSObject::NORMAL_DELETION);
+  }
+  return obj->DefineAccessor(name, flag_setter->value() == 0, fun, attr);
+}
+
+static Object* Runtime_DefineOrRedefineDataProperty(Arguments args) {
+  ASSERT(args.length() == 4);
+  HandleScope scope;
+  CONVERT_ARG_CHECKED(JSObject, js_object, 0);
+  CONVERT_ARG_CHECKED(String, name, 1);
+  Handle<Object> obj_value = args.at<Object>(2);
+
+  CONVERT_CHECKED(Smi, flag, args[3]);
+  int unchecked = flag->value();
+  RUNTIME_ASSERT((unchecked & ~(READ_ONLY | DONT_ENUM | DONT_DELETE)) == 0);
+
+  LookupResult result;
+  js_object->LocalLookupRealNamedProperty(*name, &result);
+
+  PropertyAttributes attr = static_cast<PropertyAttributes>(unchecked);
+
+  // Take special care when attributes are different and there is already
+  // a property. For simplicity we normalize the property which enables us
+  // to not worry about changing the instance_descriptor and creating a new
+  // map. The current version of SetObjectProperty does not handle attributes
+  // correctly in the case where a property is a field and is reset with
+  // new attributes.
+  if (result.IsProperty() && attr != result.GetAttributes()) {
+    PropertyDetails details = PropertyDetails(attr, NORMAL);
+    // New attributes - normalize to avoid writing to instance descriptor
+    js_object->NormalizeProperties(KEEP_INOBJECT_PROPERTIES, 0);
+    return js_object->SetNormalizedProperty(*name, *obj_value, details);
+  }
+
+  return Runtime::SetObjectProperty(js_object, name, obj_value, attr);
+}
+
+
 Object* Runtime::SetObjectProperty(Handle<Object> object,
                                    Handle<Object> key,
                                    Handle<Object> value,
@@ -8100,6 +8159,15 @@
 }
 
 
+static Object* Runtime_ProfileLogMarker(Arguments args) {
+  ASSERT(args.length() == 1);
+  CONVERT_CHECKED(String, format, args[0]);
+  Vector<const char> marker = format->ToAsciiVector();
+  Logger::LogProfileMarker(marker);
+  return Heap::undefined_value();
+}
+
+
 #ifdef DEBUG
 // ListNatives is ONLY used by the fuzz-natives.js in debug mode
 // Exclude the code in release mode.
diff --git a/src/runtime.h b/src/runtime.h
index b2b8609..12f969d 100644
--- a/src/runtime.h
+++ b/src/runtime.h
@@ -215,6 +215,8 @@
   F(ResolvePossiblyDirectEval, 3, 2) \
   \
   F(SetProperty, -1 /* 3 or 4 */, 1) \
+  F(DefineOrRedefineDataProperty, 4, 1) \
+  F(DefineOrRedefineAccessorProperty, 5, 1) \
   F(IgnoreAttributesAndSetProperty, -1 /* 3 or 4 */, 1) \
   \
   /* Arrays */ \
@@ -282,7 +284,11 @@
   F(DeleteHandleScopeExtensions, 0, 1) \
   \
   /* Pseudo functions - handled as macros by parser */ \
-  F(IS_VAR, 1, 1)
+  F(IS_VAR, 1, 1) \
+  \
+  /* Profile marker support for more fine grained profiling of page cyclers. */ \
+  /* Also known as a stupid hack. :) */                                 \
+  F(ProfileLogMarker, 1, 1)
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
 #define RUNTIME_FUNCTION_LIST_DEBUGGER_SUPPORT(F) \
diff --git a/src/runtime.js b/src/runtime.js
index c4c855e..10ef98e 100644
--- a/src/runtime.js
+++ b/src/runtime.js
@@ -506,6 +506,16 @@
 }
 
 
+// ECMA-262, section 9.2, page 30
+function ToBoolean(x) {
+  if (IS_BOOLEAN(x)) return x;
+  if (IS_STRING(x)) return x.length != 0;
+  if (x == null) return false;
+  if (IS_NUMBER(x)) return !((x == 0) || NUMBER_IS_NAN(x));
+  return true;
+}
+
+
 // ECMA-262, section 9.3, page 31.
 function ToNumber(x) {
   if (IS_NUMBER(x)) return x;
@@ -526,16 +536,6 @@
 }
 
 
-// ... where did this come from?
-function ToBoolean(x) {
-  if (IS_BOOLEAN(x)) return x;
-  if (IS_STRING(x)) return x.length != 0;
-  if (x == null) return false;
-  if (IS_NUMBER(x)) return !((x == 0) || NUMBER_IS_NAN(x));
-  return true;
-}
-
-
 // ECMA-262, section 9.9, page 36.
 function ToObject(x) {
   if (IS_STRING(x)) return new $String(x);
@@ -569,6 +569,25 @@
 }
 
 
+// ES5, section 9.12
+function SameValue(x, y) {
+  if (typeof x != typeof y) return false;
+  if (IS_NULL_OR_UNDEFINED(x)) return true;
+  if (IS_NUMBER(x)) {
+    if (NUMBER_IS_NAN(x) && NUMBER_IS_NAN(y)) return true;
+    // x is +0 and y is -0 or vice versa
+    if (x === 0 && y === 0 && !%_IsSmi(x) && !%_IsSmi(y) && 
+        ((1 / x < 0 && 1 / y > 0) || (1 / x > 0 && 1 / y < 0))) {
+      return false;
+    }
+    return x == y;    
+  }
+  if (IS_STRING(x)) return %StringEquals(x, y);
+  if (IS_BOOLEAN(x))return %NumberEquals(%ToNumber(x),%ToNumber(y));
+
+  return %_ObjectEquals(x, y);
+}
+
 
 /* ---------------------------------
    - - -   U t i l i t i e s   - - -
diff --git a/src/serialize.cc b/src/serialize.cc
index bc934fb..ad8e20f 100644
--- a/src/serialize.cc
+++ b/src/serialize.cc
@@ -539,7 +539,7 @@
     HeapObject* new_object = HeapObject::cast(new_allocation);
     // Record all large objects in the same space.
     address = new_object->address();
-    high_water_[LO_SPACE] = address + size;
+    pages_[LO_SPACE].Add(address);
   }
   last_object_address_ = address;
   return address;
@@ -892,7 +892,7 @@
 Serializer::Serializer(SnapshotByteSink* sink)
     : sink_(sink),
       current_root_index_(0),
-      external_reference_encoder_(NULL),
+      external_reference_encoder_(new ExternalReferenceEncoder),
       large_object_total_(0) {
   for (int i = 0; i <= LAST_SPACE; i++) {
     fullness_[i] = 0;
@@ -900,28 +900,28 @@
 }
 
 
+Serializer::~Serializer() {
+  delete external_reference_encoder_;
+}
+
+
 void StartupSerializer::SerializeStrongReferences() {
   // No active threads.
   CHECK_EQ(NULL, ThreadState::FirstInUse());
   // No active or weak handles.
   CHECK(HandleScopeImplementer::instance()->blocks()->is_empty());
   CHECK_EQ(0, GlobalHandles::NumberOfWeakHandles());
-  CHECK_EQ(NULL, external_reference_encoder_);
   // We don't support serializing installed extensions.
   for (RegisteredExtension* ext = RegisteredExtension::first_extension();
        ext != NULL;
        ext = ext->next()) {
     CHECK_NE(v8::INSTALLED, ext->state());
   }
-  external_reference_encoder_ = new ExternalReferenceEncoder();
   Heap::IterateStrongRoots(this, VISIT_ONLY_STRONG);
-  delete external_reference_encoder_;
-  external_reference_encoder_ = NULL;
 }
 
 
 void PartialSerializer::Serialize(Object** object) {
-  external_reference_encoder_ = new ExternalReferenceEncoder();
   this->VisitPointer(object);
 
   // After we have done the partial serialization the partial snapshot cache
@@ -935,9 +935,6 @@
     startup_serializer_->VisitPointer(&partial_snapshot_cache_[index]);
   }
   partial_snapshot_cache_length_ = kPartialSnapshotCacheCapacity;
-
-  delete external_reference_encoder_;
-  external_reference_encoder_ = NULL;
 }
 
 
@@ -989,6 +986,7 @@
     Object* entry = partial_snapshot_cache_[i];
     if (entry == heap_object) return i;
   }
+
   // We didn't find the object in the cache.  So we add it to the cache and
   // then visit the pointer so that it becomes part of the startup snapshot
   // and we can refer to it from the partial snapshot.
diff --git a/src/serialize.h b/src/serialize.h
index ce3b006..ab2ae9f 100644
--- a/src/serialize.h
+++ b/src/serialize.h
@@ -120,28 +120,9 @@
     return data_[position_++];
   }
 
-  void CopyRaw(byte* to, int number_of_bytes) {
-    memcpy(to, data_ + position_, number_of_bytes);
-    position_ += number_of_bytes;
-  }
+  inline void CopyRaw(byte* to, int number_of_bytes);
 
-  int GetInt() {
-    // A little unwind to catch the really small ints.
-    int snapshot_byte = Get();
-    if ((snapshot_byte & 0x80) == 0) {
-      return snapshot_byte;
-    }
-    int accumulator = (snapshot_byte & 0x7f) << 7;
-    while (true) {
-      snapshot_byte = Get();
-      if ((snapshot_byte & 0x80) == 0) {
-        return accumulator | snapshot_byte;
-      }
-      accumulator = (accumulator | (snapshot_byte & 0x7f)) << 7;
-    }
-    UNREACHABLE();
-    return accumulator;
-  }
+  inline int GetInt();
 
   bool AtEOF() {
     return position_ == length_;
@@ -235,11 +216,35 @@
   }
 
   static int partial_snapshot_cache_length_;
-  static const int kPartialSnapshotCacheCapacity = 1024;
+  static const int kPartialSnapshotCacheCapacity = 1300;
   static Object* partial_snapshot_cache_[];
 };
 
 
+int SnapshotByteSource::GetInt() {
+  // A little unwind to catch the really small ints.
+  int snapshot_byte = Get();
+  if ((snapshot_byte & 0x80) == 0) {
+    return snapshot_byte;
+  }
+  int accumulator = (snapshot_byte & 0x7f) << 7;
+  while (true) {
+    snapshot_byte = Get();
+    if ((snapshot_byte & 0x80) == 0) {
+      return accumulator | snapshot_byte;
+    }
+    accumulator = (accumulator | (snapshot_byte & 0x7f)) << 7;
+  }
+  UNREACHABLE();
+  return accumulator;
+}
+
+
+void SnapshotByteSource::CopyRaw(byte* to, int number_of_bytes) {
+  memcpy(to, data_ + position_, number_of_bytes);
+  position_ += number_of_bytes;
+}
+
 
 // A Deserializer reads a snapshot and reconstructs the Object graph it defines.
 class Deserializer: public SerializerDeserializer {
@@ -364,6 +369,7 @@
 class Serializer : public SerializerDeserializer {
  public:
   explicit Serializer(SnapshotByteSink* sink);
+  ~Serializer();
   void VisitPointers(Object** start, Object** end);
   // You can call this after serialization to find out how much space was used
   // in each space.
@@ -492,7 +498,12 @@
   virtual int RootIndex(HeapObject* o);
   virtual int PartialSnapshotCacheIndex(HeapObject* o);
   virtual bool ShouldBeInThePartialSnapshotCache(HeapObject* o) {
-    return o->IsString() || o->IsSharedFunctionInfo();
+    // Scripts should be referred only through shared function infos.  We can't
+    // allow them to be part of the partial snapshot because they contain a
+    // unique ID, and deserializing several partial snapshots containing script
+    // would cause dupes.
+    ASSERT(!o->IsScript());
+    return o->IsString() || o->IsSharedFunctionInfo() || o->IsHeapNumber();
   }
 
  private:
@@ -530,6 +541,7 @@
   }
 };
 
+
 } }  // namespace v8::internal
 
 #endif  // V8_SERIALIZE_H_
diff --git a/src/simulator.h b/src/simulator.h
index 6f8cd5a..485e930 100644
--- a/src/simulator.h
+++ b/src/simulator.h
@@ -34,6 +34,8 @@
 #include "x64/simulator-x64.h"
 #elif V8_TARGET_ARCH_ARM
 #include "arm/simulator-arm.h"
+#elif V8_TARGET_ARCH_MIPS
+#include "mips/simulator-mips.h"
 #else
 #error Unsupported target architecture.
 #endif
diff --git a/src/snapshot-common.cc b/src/snapshot-common.cc
index 1e81b8e..f1106e1 100644
--- a/src/snapshot-common.cc
+++ b/src/snapshot-common.cc
@@ -59,4 +59,24 @@
   return false;
 }
 
+
+Handle<Context> Snapshot::NewContextFromSnapshot() {
+  if (context_size_ == 0) {
+    return Handle<Context>();
+  }
+  Heap::ReserveSpace(new_space_used_,
+                     pointer_space_used_,
+                     data_space_used_,
+                     code_space_used_,
+                     map_space_used_,
+                     cell_space_used_,
+                     large_space_used_);
+  SnapshotByteSource source(context_data_, context_size_);
+  Deserializer deserializer(&source);
+  Object* root;
+  deserializer.DeserializePartial(&root);
+  CHECK(root->IsContext());
+  return Handle<Context>(Context::cast(root));
+}
+
 } }  // namespace v8::internal
diff --git a/src/snapshot-empty.cc b/src/snapshot-empty.cc
index 60ab1e5..cb26eb8 100644
--- a/src/snapshot-empty.cc
+++ b/src/snapshot-empty.cc
@@ -35,6 +35,16 @@
 namespace internal {
 
 const byte Snapshot::data_[] = { 0 };
-int Snapshot::size_ = 0;
+const int Snapshot::size_ = 0;
+const byte Snapshot::context_data_[] = { 0 };
+const int Snapshot::context_size_ = 0;
+
+const int Snapshot::new_space_used_ = 0;
+const int Snapshot::pointer_space_used_ = 0;
+const int Snapshot::data_space_used_ = 0;
+const int Snapshot::code_space_used_ = 0;
+const int Snapshot::map_space_used_ = 0;
+const int Snapshot::cell_space_used_ = 0;
+const int Snapshot::large_space_used_ = 0;
 
 } }  // namespace v8::internal
diff --git a/src/snapshot.h b/src/snapshot.h
index 88ba8db..9f77c20 100644
--- a/src/snapshot.h
+++ b/src/snapshot.h
@@ -38,6 +38,9 @@
   // could be found.
   static bool Initialize(const char* snapshot_file = NULL);
 
+  // Create a new context using the internal partial snapshot.
+  static Handle<Context> NewContextFromSnapshot();
+
   // Returns whether or not the snapshot is enabled.
   static bool IsEnabled() { return size_ != 0; }
 
@@ -47,7 +50,16 @@
 
  private:
   static const byte data_[];
-  static int size_;
+  static const byte context_data_[];
+  static const int new_space_used_;
+  static const int pointer_space_used_;
+  static const int data_space_used_;
+  static const int code_space_used_;
+  static const int map_space_used_;
+  static const int cell_space_used_;
+  static const int large_space_used_;
+  static const int size_;
+  static const int context_size_;
 
   static bool Deserialize(const byte* content, int len);
 
diff --git a/src/string.js b/src/string.js
index ed938ec..b2af050 100644
--- a/src/string.js
+++ b/src/string.js
@@ -160,12 +160,12 @@
 
 // ECMA-262 section 15.5.4.10
 function StringMatch(regexp) {
-  if (!IS_REGEXP(regexp)) regexp = new ORIGINAL_REGEXP(regexp);
+  if (!IS_REGEXP(regexp)) regexp = new $RegExp(regexp);
   var subject = ToString(this);
 
   if (!regexp.global) return regexp.exec(subject);
   %_Log('regexp', 'regexp-match,%0S,%1r', [subject, regexp]);
-  // lastMatchInfo is defined in regexp-delay.js.
+  // lastMatchInfo is defined in regexp.js.
   return %StringMatch(subject, regexp, lastMatchInfo);
 }
 
@@ -457,7 +457,7 @@
 
 // ECMA-262 section 15.5.4.12
 function StringSearch(re) {
-  var regexp = new ORIGINAL_REGEXP(re);
+  var regexp = new $RegExp(re);
   var s = ToString(this);
   var last_idx = regexp.lastIndex; // keep old lastIndex
   regexp.lastIndex = 0;            // ignore re.global property
diff --git a/src/top.cc b/src/top.cc
index 0274838..056d384 100644
--- a/src/top.cc
+++ b/src/top.cc
@@ -668,7 +668,7 @@
 
 
 void Top::ComputeLocation(MessageLocation* target) {
-  *target = MessageLocation(empty_script(), -1, -1);
+  *target = MessageLocation(Handle<Script>(Heap::empty_script()), -1, -1);
   StackTraceFrameIterator it;
   if (!it.done()) {
     JavaScriptFrame* frame = it.frame();
diff --git a/src/v8-counters.h b/src/v8-counters.h
index 7397c30..2d24765 100644
--- a/src/v8-counters.h
+++ b/src/v8-counters.h
@@ -97,7 +97,12 @@
   /* Amount of source code compiled with the old codegen. */          \
   SC(total_old_codegen_source_size, V8.TotalOldCodegenSourceSize)     \
   /* Amount of source code compiled with the full codegen. */         \
-  SC(total_full_codegen_source_size, V8.TotalFullCodegenSourceSize)
+  SC(total_full_codegen_source_size, V8.TotalFullCodegenSourceSize)   \
+  /* Number of contexts created from scratch. */                      \
+  SC(contexts_created_from_scratch, V8.ContextsCreatedFromScratch)    \
+  /* Number of contexts created by partial snapshot. */               \
+  SC(contexts_created_by_snapshot, V8.ContextsCreatedBySnapshot)
+
 
 
 #define STATS_COUNTER_LIST_2(SC)                                    \
diff --git a/src/v8.cc b/src/v8.cc
index 3bec827..3953361 100644
--- a/src/v8.cc
+++ b/src/v8.cc
@@ -114,8 +114,11 @@
 
   OProfileAgent::Initialize();
 
-  if (FLAG_log_code) {
+  // If we are deserializing, log non-function code objects and compiled
+  // functions found in the snapshot.
+  if (des != NULL && FLAG_log_code) {
     HandleScope scope;
+    LOG(LogCodeObjects());
     LOG(LogCompiledFunctions());
   }
 
diff --git a/src/v8natives.js b/src/v8natives.js
index 7475065..5a47211 100644
--- a/src/v8natives.js
+++ b/src/v8natives.js
@@ -307,7 +307,7 @@
 
 // ES5 8.10.4
 function FromPropertyDescriptor(desc) {
-  if(IS_UNDEFINED(desc)) return desc;
+  if (IS_UNDEFINED(desc)) return desc;
   var obj = new $Object();
   if (IsDataDescriptor(desc)) {
     obj.value = desc.getValue();
@@ -333,7 +333,6 @@
     desc.setEnumerable(ToBoolean(obj.enumerable));
   }
 
-
   if ("configurable" in obj) {
     desc.setConfigurable(ToBoolean(obj.configurable));
   }
@@ -377,7 +376,9 @@
   this.writable_ = false;
   this.hasWritable_ = false;
   this.enumerable_ = false;
+  this.hasEnumerable_ = false;
   this.configurable_ = false;
+  this.hasConfigurable_ = false;
   this.get_ = void 0;
   this.hasGetter_ = false;
   this.set_ = void 0;
@@ -396,8 +397,14 @@
 }
 
 
+PropertyDescriptor.prototype.hasValue = function() {
+  return this.hasValue_;
+}
+
+
 PropertyDescriptor.prototype.setEnumerable = function(enumerable) {
   this.enumerable_ = enumerable;
+  this.hasEnumerable_ = true;
 }
 
 
@@ -406,6 +413,11 @@
 }
 
 
+PropertyDescriptor.prototype.hasEnumerable = function() {
+  return this.hasEnumerable_;
+}
+
+
 PropertyDescriptor.prototype.setWritable = function(writable) {
   this.writable_ = writable;
   this.hasWritable_ = true;
@@ -419,6 +431,12 @@
 
 PropertyDescriptor.prototype.setConfigurable = function(configurable) {
   this.configurable_ = configurable;
+  this.hasConfigurable_ = true;
+}
+
+
+PropertyDescriptor.prototype.hasConfigurable = function() {
+  return this.hasConfigurable_;
 }
 
 
@@ -438,6 +456,11 @@
 }
 
 
+PropertyDescriptor.prototype.hasGetter = function() {
+  return this.hasGetter_;
+}
+
+
 PropertyDescriptor.prototype.setSet = function(set) {
   this.set_ = set;
   this.hasSetter_ = true;
@@ -449,6 +472,12 @@
 }
 
 
+PropertyDescriptor.prototype.hasSetter = function() {
+  return this.hasSetter_;
+}
+
+
+
 // ES5 section 8.12.1.
 function GetOwnProperty(obj, p) {
   var desc = new PropertyDescriptor();
@@ -458,8 +487,7 @@
   //  obj is an accessor [true, Get, Set, Enumerable, Configurable]
   var props = %GetOwnProperty(ToObject(obj), ToString(p));
 
-  if (IS_UNDEFINED(props))
-    return void 0;
+  if (IS_UNDEFINED(props)) return void 0;
 
   // This is an accessor
   if (props[0]) {
@@ -476,16 +504,89 @@
 }
 
 
-// ES5 8.12.9.  This version cannot cope with the property p already
-// being present on obj.
+// ES5 section 8.12.2.
+function GetProperty(obj, p) {
+  var prop = GetOwnProperty(obj);
+  if (!IS_UNDEFINED(prop)) return prop;
+  var proto = obj.__proto__;
+  if (IS_NULL(proto)) return void 0;
+  return GetProperty(proto, p);
+}
+
+
+// ES5 section 8.12.6
+function HasProperty(obj, p) {
+  var desc = GetProperty(obj, p);
+  return IS_UNDEFINED(desc) ? false : true;
+}
+
+
+// ES5 8.12.9.  
 function DefineOwnProperty(obj, p, desc, should_throw) {
-  var flag = desc.isEnumerable() ? 0 : DONT_ENUM;
-  if (IsDataDescriptor(desc)) {
-    flag |= desc.isWritable() ? 0 : (DONT_DELETE | READ_ONLY);
-    %SetProperty(obj, p, desc.getValue(), flag);
+  var current = GetOwnProperty(obj, p);
+  var extensible = %IsExtensible(ToObject(obj));
+
+  // Error handling according to spec.
+  // Step 3
+  if (IS_UNDEFINED(current) && !extensible)
+    throw MakeTypeError("define_disallowed", ["defineProperty"]);
+
+  if (!IS_UNDEFINED(current) && !current.isConfigurable()) {
+    // Step 7
+    if (desc.isConfigurable() ||  desc.isEnumerable() != current.isEnumerable())
+      throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
+    // Step 9
+    if (IsDataDescriptor(current) != IsDataDescriptor(desc))
+      throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
+    // Step 10
+    if (IsDataDescriptor(current) && IsDataDescriptor(desc)) {
+      if (!current.isWritable() && desc.isWritable())
+        throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
+      if (!current.isWritable() && desc.hasValue() &&
+          !SameValue(desc.getValue(), current.getValue())) {
+        throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
+      }
+    }
+    // Step 11
+    if (IsAccessorDescriptor(desc) && IsAccessorDescriptor(current)) {
+      if (desc.hasSetter() && !SameValue(desc.getSet(), current.getSet())){
+        throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
+      }
+      if (desc.hasGetter() && !SameValue(desc.getGet(),current.getGet()))
+        throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
+    }
+  }
+
+  // Send flags - enumerable and configurable are common - writable is 
+  // only send to the data descriptor.
+  // Take special care if enumerable and configurable is not defined on
+  // desc (we need to preserve the existing values from current).
+  var flag = NONE;
+  if (desc.hasEnumerable()) {
+    flag |= desc.isEnumerable() ? 0 : DONT_ENUM;
+  } else if (!IS_UNDEFINED(current)) {
+    flag |= current.isEnumerable() ? 0 : DONT_ENUM;
   } else {
-    if (IS_FUNCTION(desc.getGet())) %DefineAccessor(obj, p, GETTER, desc.getGet(), flag);
-    if (IS_FUNCTION(desc.getSet())) %DefineAccessor(obj, p, SETTER, desc.getSet(), flag);
+    flag |= DONT_ENUM;
+  }
+
+  if (desc.hasConfigurable()) {
+    flag |= desc.isConfigurable() ? 0 : DONT_DELETE;
+  } else if (!IS_UNDEFINED(current)) {
+    flag |= current.isConfigurable() ? 0 : DONT_DELETE;
+  } else
+    flag |= DONT_DELETE;
+
+  if (IsDataDescriptor(desc) || IsGenericDescriptor(desc)) {
+    flag |= desc.isWritable() ? 0 : READ_ONLY;
+    %DefineOrRedefineDataProperty(obj, p, desc.getValue(), flag);
+  } else {
+    if (desc.hasGetter() && IS_FUNCTION(desc.getGet())) {
+       %DefineOrRedefineAccessorProperty(obj, p, GETTER, desc.getGet(), flag);
+    }
+    if (desc.hasSetter() && IS_FUNCTION(desc.getSet())) {
+      %DefineOrRedefineAccessorProperty(obj, p, SETTER, desc.getSet(), flag);
+    }
   }
   return true;
 }
@@ -558,10 +659,21 @@
 }
 
 
-// ES5 section 15.2.3.7.  This version cannot cope with the properies already
-// being present on obj.  Therefore it is not exposed as
-// Object.defineProperties yet.
+// ES5 section 15.2.3.6.
+function ObjectDefineProperty(obj, p, attributes) {
+  if ((!IS_OBJECT(obj) || IS_NULL_OR_UNDEFINED(obj)) && !IS_FUNCTION(obj))
+    throw MakeTypeError("obj_ctor_property_non_object", ["defineProperty"]);
+  var name = ToString(p);
+  var desc = ToPropertyDescriptor(attributes);
+  DefineOwnProperty(obj, name, desc, true);
+  return obj;
+}
+
+
+// ES5 section 15.2.3.7.
 function ObjectDefineProperties(obj, properties) {
+ if ((!IS_OBJECT(obj) || IS_NULL_OR_UNDEFINED(obj)) && !IS_FUNCTION(obj))
+    throw MakeTypeError("obj_ctor_property_non_object", ["defineProperties"]);
   var props = ToObject(properties);
   var key_values = [];
   for (var key in props) {
@@ -577,6 +689,7 @@
     var desc = key_values[i + 1];
     DefineOwnProperty(obj, key, desc, true);
   }
+  return obj;
 }
 
 
@@ -611,6 +724,8 @@
   InstallFunctions($Object, DONT_ENUM, $Array(
     "keys", ObjectKeys,
     "create", ObjectCreate,
+    "defineProperty", ObjectDefineProperty,
+    "defineProperties", ObjectDefineProperties,
     "getPrototypeOf", ObjectGetPrototypeOf,
     "getOwnPropertyDescriptor", ObjectGetOwnPropertyDescriptor,
     "getOwnPropertyNames", ObjectGetOwnPropertyNames
diff --git a/src/version.cc b/src/version.cc
index aea1a3a..a200861 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,7 +34,7 @@
 // cannot be changed without changing the SCons build script.
 #define MAJOR_VERSION     2
 #define MINOR_VERSION     1
-#define BUILD_NUMBER      1 
+#define BUILD_NUMBER      1
 #define PATCH_LEVEL       0
 #define CANDIDATE_VERSION true
 
diff --git a/src/virtual-frame.h b/src/virtual-frame.h
index 0bf0ca2..220823e 100644
--- a/src/virtual-frame.h
+++ b/src/virtual-frame.h
@@ -37,6 +37,8 @@
 #include "x64/virtual-frame-x64.h"
 #elif V8_TARGET_ARCH_ARM
 #include "arm/virtual-frame-arm.h"
+#elif V8_TARGET_ARCH_MIPS
+#include "mips/virtual-frame-mips.h"
 #else
 #error Unsupported target architecture.
 #endif
diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc
index 9cfe98a..1b6874e 100644
--- a/src/x64/assembler-x64.cc
+++ b/src/x64/assembler-x64.cc
@@ -265,7 +265,7 @@
 
 
 // -----------------------------------------------------------------------------
-// Implementation of Assembler
+// Implementation of Assembler.
 
 #ifdef GENERATED_CODE_COVERAGE
 static void InitCoverageLog();
@@ -276,7 +276,7 @@
 Assembler::Assembler(void* buffer, int buffer_size)
     : code_targets_(100) {
   if (buffer == NULL) {
-    // do our own buffer management
+    // Do our own buffer management.
     if (buffer_size <= kMinimalBufferSize) {
       buffer_size = kMinimalBufferSize;
 
@@ -293,7 +293,7 @@
     buffer_size_ = buffer_size;
     own_buffer_ = true;
   } else {
-    // use externally provided buffer instead
+    // Use externally provided buffer instead.
     ASSERT(buffer_size > 0);
     buffer_ = static_cast<byte*>(buffer);
     buffer_size_ = buffer_size;
@@ -309,7 +309,7 @@
   }
 #endif
 
-  // setup buffer pointers
+  // Setup buffer pointers.
   ASSERT(buffer_ != NULL);
   pc_ = buffer_;
   reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
@@ -337,11 +337,10 @@
 
 
 void Assembler::GetCode(CodeDesc* desc) {
-  // finalize code
-  // (at this point overflow() may be true, but the gap ensures that
-  // we are still not overlapping instructions and relocation info)
-  ASSERT(pc_ <= reloc_info_writer.pos());  // no overlap
-  // setup desc
+  // Finalize code (at this point overflow() may be true, but the gap ensures
+  // that we are still not overlapping instructions and relocation info).
+  ASSERT(pc_ <= reloc_info_writer.pos());  // No overlap.
+  // Setup code descriptor.
   desc->buffer = buffer_;
   desc->buffer_size = buffer_size_;
   desc->instr_size = pc_offset();
@@ -370,7 +369,7 @@
     int current = L->pos();
     int next = long_at(current);
     while (next != current) {
-      // relative address, relative to point after address
+      // Relative address, relative to point after address.
       int imm32 = pos - (current + sizeof(int32_t));
       long_at_put(current, imm32);
       current = next;
@@ -390,10 +389,10 @@
 
 
 void Assembler::GrowBuffer() {
-  ASSERT(buffer_overflow());  // should not call this otherwise
+  ASSERT(buffer_overflow());
   if (!own_buffer_) FATAL("external code buffer is too small");
 
-  // compute new buffer size
+  // Compute new buffer size.
   CodeDesc desc;  // the new buffer
   if (buffer_size_ < 4*KB) {
     desc.buffer_size = 4*KB;
@@ -407,7 +406,7 @@
     V8::FatalProcessOutOfMemory("Assembler::GrowBuffer");
   }
 
-  // setup new buffer
+  // Setup new buffer.
   desc.buffer = NewArray<byte>(desc.buffer_size);
   desc.instr_size = pc_offset();
   desc.reloc_size =
@@ -419,7 +418,7 @@
   memset(desc.buffer, 0xCC, desc.buffer_size);
 #endif
 
-  // copy the data
+  // Copy the data.
   intptr_t pc_delta = desc.buffer - buffer_;
   intptr_t rc_delta = (desc.buffer + desc.buffer_size) -
       (buffer_ + buffer_size_);
@@ -427,7 +426,7 @@
   memmove(rc_delta + reloc_info_writer.pos(),
           reloc_info_writer.pos(), desc.reloc_size);
 
-  // switch buffers
+  // Switch buffers.
   if (spare_buffer_ == NULL && buffer_size_ == kMinimalBufferSize) {
     spare_buffer_ = buffer_;
   } else {
@@ -442,7 +441,7 @@
   reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
                                reloc_info_writer.last_pc() + pc_delta);
 
-  // relocate runtime entries
+  // Relocate runtime entries.
   for (RelocIterator it(desc); !it.done(); it.next()) {
     RelocInfo::Mode rmode = it.rinfo()->rmode();
     if (rmode == RelocInfo::INTERNAL_REFERENCE) {
@@ -472,7 +471,7 @@
 }
 
 
-// Assembler Instruction implementations
+// Assembler Instruction implementations.
 
 void Assembler::arithmetic_op(byte opcode, Register reg, const Operand& op) {
   EnsureSpace ensure_space(this);
@@ -756,7 +755,7 @@
 void Assembler::call(Label* L) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
-  // 1110 1000 #32-bit disp
+  // 1110 1000 #32-bit disp.
   emit(0xE8);
   if (L->is_bound()) {
     int offset = L->pos() - pc_offset() - sizeof(int32_t);
@@ -777,7 +776,7 @@
 void Assembler::call(Handle<Code> target, RelocInfo::Mode rmode) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
-  // 1110 1000 #32-bit disp
+  // 1110 1000 #32-bit disp.
   emit(0xE8);
   emit_code_target(target, rmode);
 }
@@ -786,7 +785,7 @@
 void Assembler::call(Register adr) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
-  // Opcode: FF /2 r64
+  // Opcode: FF /2 r64.
   if (adr.high_bit()) {
     emit_rex_64(adr);
   }
@@ -798,7 +797,7 @@
 void Assembler::call(const Operand& op) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
-  // Opcode: FF /2 m64
+  // Opcode: FF /2 m64.
   emit_rex_64(op);
   emit(0xFF);
   emit_operand(2, op);
@@ -829,7 +828,7 @@
   ASSERT(cc >= 0);  // Use mov for unconditional moves.
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
-  // Opcode: REX.W 0f 40 + cc /r
+  // Opcode: REX.W 0f 40 + cc /r.
   emit_rex_64(dst, src);
   emit(0x0f);
   emit(0x40 + cc);
@@ -846,7 +845,7 @@
   ASSERT(cc >= 0);
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
-  // Opcode: REX.W 0f 40 + cc /r
+  // Opcode: REX.W 0f 40 + cc /r.
   emit_rex_64(dst, src);
   emit(0x0f);
   emit(0x40 + cc);
@@ -863,7 +862,7 @@
   ASSERT(cc >= 0);
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
-  // Opcode: 0f 40 + cc /r
+  // Opcode: 0f 40 + cc /r.
   emit_optional_rex_32(dst, src);
   emit(0x0f);
   emit(0x40 + cc);
@@ -880,7 +879,7 @@
   ASSERT(cc >= 0);
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
-  // Opcode: 0f 40 + cc /r
+  // Opcode: 0f 40 + cc /r.
   emit_optional_rex_32(dst, src);
   emit(0x0f);
   emit(0x40 + cc);
@@ -1110,17 +1109,17 @@
     int offs = L->pos() - pc_offset();
     ASSERT(offs <= 0);
     if (is_int8(offs - short_size)) {
-      // 0111 tttn #8-bit disp
+      // 0111 tttn #8-bit disp.
       emit(0x70 | cc);
       emit((offs - short_size) & 0xFF);
     } else {
-      // 0000 1111 1000 tttn #32-bit disp
+      // 0000 1111 1000 tttn #32-bit disp.
       emit(0x0F);
       emit(0x80 | cc);
       emitl(offs - long_size);
     }
   } else if (L->is_linked()) {
-    // 0000 1111 1000 tttn #32-bit disp
+    // 0000 1111 1000 tttn #32-bit disp.
     emit(0x0F);
     emit(0x80 | cc);
     emitl(L->pos());
@@ -1142,7 +1141,7 @@
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   ASSERT(is_uint4(cc));
-  // 0000 1111 1000 tttn #32-bit disp
+  // 0000 1111 1000 tttn #32-bit disp.
   emit(0x0F);
   emit(0x80 | cc);
   emit_code_target(target, rmode);
@@ -1156,21 +1155,21 @@
     int offs = L->pos() - pc_offset() - 1;
     ASSERT(offs <= 0);
     if (is_int8(offs - sizeof(int8_t))) {
-      // 1110 1011 #8-bit disp
+      // 1110 1011 #8-bit disp.
       emit(0xEB);
       emit((offs - sizeof(int8_t)) & 0xFF);
     } else {
-      // 1110 1001 #32-bit disp
+      // 1110 1001 #32-bit disp.
       emit(0xE9);
       emitl(offs - sizeof(int32_t));
     }
   } else  if (L->is_linked()) {
-    // 1110 1001 #32-bit disp
+    // 1110 1001 #32-bit disp.
     emit(0xE9);
     emitl(L->pos());
     L->link_to(pc_offset() - sizeof(int32_t));
   } else {
-    // 1110 1001 #32-bit disp
+    // 1110 1001 #32-bit disp.
     ASSERT(L->is_unused());
     emit(0xE9);
     int32_t current = pc_offset();
@@ -1183,7 +1182,7 @@
 void Assembler::jmp(Handle<Code> target, RelocInfo::Mode rmode) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
-  // 1110 1001 #32-bit disp
+  // 1110 1001 #32-bit disp.
   emit(0xE9);
   emit_code_target(target, rmode);
 }
@@ -1192,7 +1191,7 @@
 void Assembler::jmp(Register target) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
-  // Opcode FF/4 r64
+  // Opcode FF/4 r64.
   if (target.high_bit()) {
     emit_rex_64(target);
   }
@@ -1204,7 +1203,7 @@
 void Assembler::jmp(const Operand& src) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
-  // Opcode FF/4 m64
+  // Opcode FF/4 m64.
   emit_optional_rex_32(src);
   emit(0xFF);
   emit_operand(0x4, src);
@@ -1413,10 +1412,8 @@
 }
 
 
-/*
- * Loads the ip-relative location of the src label into the target
- * location (as a 32-bit offset sign extended to 64-bit).
- */
+// Loads the ip-relative location of the src label into the target location
+// (as a 32-bit offset sign extended to 64-bit).
 void Assembler::movl(const Operand& dst, Label* src) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
@@ -2006,7 +2003,7 @@
 }
 
 
-// FPU instructions
+// FPU instructions.
 
 
 void Assembler::fld(int i) {
@@ -2377,7 +2374,7 @@
   emit(b2 + i);
 }
 
-// SSE 2 operations
+// SSE 2 operations.
 
 void Assembler::movsd(const Operand& dst, XMMRegister src) {
   EnsureSpace ensure_space(this);
@@ -2527,7 +2524,7 @@
 }
 
 
-// Relocation information implementations
+// Relocation information implementations.
 
 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
   ASSERT(rmode != RelocInfo::NONE);
diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h
index 3f2aef0..64fbd88 100644
--- a/src/x64/assembler-x64.h
+++ b/src/x64/assembler-x64.h
@@ -113,8 +113,8 @@
     return code_ & 0x7;
   }
 
-  // (unfortunately we can't make this private in a struct when initializing
-  // by assignment.)
+  // Unfortunately we can't make this private in a struct when initializing
+  // by assignment.
   int code_;
 };
 
diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
index 685c928..ee8f251 100644
--- a/src/x64/codegen-x64.cc
+++ b/src/x64/codegen-x64.cc
@@ -246,14 +246,10 @@
 // -----------------------------------------------------------------------------
 // CodeGenerator implementation.
 
-CodeGenerator::CodeGenerator(MacroAssembler* masm,
-                             Handle<Script> script,
-                             bool is_eval)
-    : is_eval_(is_eval),
-      script_(script),
-      deferred_(8),
+CodeGenerator::CodeGenerator(MacroAssembler* masm)
+    : deferred_(8),
       masm_(masm),
-      scope_(NULL),
+      info_(NULL),
       frame_(NULL),
       allocator_(NULL),
       state_(NULL),
@@ -263,6 +259,9 @@
 }
 
 
+Scope* CodeGenerator::scope() { return info_->function()->scope(); }
+
+
 void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
   // Call the runtime to declare the globals.  The inevitable call
   // will sync frame elements to memory anyway, so we do it eagerly to
@@ -278,16 +277,12 @@
 }
 
 
-void CodeGenerator::Generate(FunctionLiteral* function,
-                             Mode mode,
-                             CompilationInfo* info) {
+void CodeGenerator::Generate(CompilationInfo* info, Mode mode) {
   // Record the position for debugging purposes.
-  CodeForFunctionPosition(function);
-  ZoneList<Statement*>* body = function->body();
+  CodeForFunctionPosition(info->function());
 
   // Initialize state.
-  ASSERT(scope_ == NULL);
-  scope_ = function->scope();
+  info_ = info;
   ASSERT(allocator_ == NULL);
   RegisterAllocator register_allocator(this);
   allocator_ = &register_allocator;
@@ -302,7 +297,7 @@
 
 #ifdef DEBUG
   if (strlen(FLAG_stop_at) > 0 &&
-      function->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
+      info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
     frame_->SpillAll();
     __ int3();
   }
@@ -328,7 +323,7 @@
       frame_->AllocateStackSlots();
 
       // Allocate the local context if needed.
-      int heap_slots = scope_->num_heap_slots();
+      int heap_slots = scope()->num_heap_slots();
       if (heap_slots > 0) {
         Comment cmnt(masm_, "[ allocate local context");
         // Allocate local context.
@@ -358,7 +353,6 @@
       // 3) don't copy parameter operand code from SlotOperand!
       {
         Comment cmnt2(masm_, "[ copy context parameters into .context");
-
         // Note that iteration order is relevant here! If we have the same
         // parameter twice (e.g., function (x, y, x)), and that parameter
         // needs to be copied into the context, it must be the last argument
@@ -367,15 +361,15 @@
         // order: such a parameter is copied repeatedly into the same
         // context location and thus the last value is what is seen inside
         // the function.
-        for (int i = 0; i < scope_->num_parameters(); i++) {
-          Variable* par = scope_->parameter(i);
+        for (int i = 0; i < scope()->num_parameters(); i++) {
+          Variable* par = scope()->parameter(i);
           Slot* slot = par->slot();
           if (slot != NULL && slot->type() == Slot::CONTEXT) {
             // The use of SlotOperand below is safe in unspilled code
             // because the slot is guaranteed to be a context slot.
             //
             // There are no parameters in the global scope.
-            ASSERT(!scope_->is_global_scope());
+            ASSERT(!scope()->is_global_scope());
             frame_->PushParameterAt(i);
             Result value = frame_->Pop();
             value.ToRegister();
@@ -403,9 +397,9 @@
       }
 
       // Initialize ThisFunction reference if present.
-      if (scope_->is_function_scope() && scope_->function() != NULL) {
+      if (scope()->is_function_scope() && scope()->function() != NULL) {
         frame_->Push(Factory::the_hole_value());
-        StoreToSlot(scope_->function()->slot(), NOT_CONST_INIT);
+        StoreToSlot(scope()->function()->slot(), NOT_CONST_INIT);
       }
     } else {
       // When used as the secondary compiler for splitting, rbp, rsi,
@@ -423,12 +417,12 @@
     // Generate code to 'execute' declarations and initialize functions
     // (source elements). In case of an illegal redeclaration we need to
     // handle that instead of processing the declarations.
-    if (scope_->HasIllegalRedeclaration()) {
+    if (scope()->HasIllegalRedeclaration()) {
       Comment cmnt(masm_, "[ illegal redeclarations");
-      scope_->VisitIllegalRedeclaration(this);
+      scope()->VisitIllegalRedeclaration(this);
     } else {
       Comment cmnt(masm_, "[ declarations");
-      ProcessDeclarations(scope_->declarations());
+      ProcessDeclarations(scope()->declarations());
       // Bail out if a stack-overflow exception occurred when processing
       // declarations.
       if (HasStackOverflow()) return;
@@ -443,7 +437,7 @@
     // Compile the body of the function in a vanilla state. Don't
     // bother compiling all the code if the scope has an illegal
     // redeclaration.
-    if (!scope_->HasIllegalRedeclaration()) {
+    if (!scope()->HasIllegalRedeclaration()) {
       Comment cmnt(masm_, "[ function body");
 #ifdef DEBUG
       bool is_builtin = Bootstrapper::IsActive();
@@ -454,14 +448,14 @@
         // Ignore the return value.
       }
 #endif
-      VisitStatements(body);
+      VisitStatements(info->function()->body());
 
       // Handle the return from the function.
       if (has_valid_frame()) {
         // If there is a valid frame, control flow can fall off the end of
         // the body.  In that case there is an implicit return statement.
         ASSERT(!function_return_is_shadowed_);
-        CodeForReturnPosition(function);
+        CodeForReturnPosition(info->function());
         frame_->PrepareForReturn();
         Result undefined(Factory::undefined_value());
         if (function_return_.is_bound()) {
@@ -504,7 +498,6 @@
   // There is no need to delete the register allocator, it is a
   // stack-allocated local.
   allocator_ = NULL;
-  scope_ = NULL;
 }
 
 void CodeGenerator::GenerateReturnSequence(Result* return_value) {
@@ -527,7 +520,7 @@
   // Leave the frame and return popping the arguments and the
   // receiver.
   frame_->Exit();
-  masm_->ret((scope_->num_parameters() + 1) * kPointerSize);
+  masm_->ret((scope()->num_parameters() + 1) * kPointerSize);
 #ifdef ENABLE_DEBUGGER_SUPPORT
   // Add padding that will be overwritten by a debugger breakpoint.
   // frame_->Exit() generates "movq rsp, rbp; pop rbp; ret k"
@@ -695,7 +688,7 @@
   // Load the receiver and the existing arguments object onto the
   // expression stack. Avoid allocating the arguments object here.
   Load(receiver);
-  LoadFromSlot(scope_->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
+  LoadFromSlot(scope()->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
 
   // Emit the source position information after having loaded the
   // receiver and the arguments.
@@ -773,8 +766,8 @@
       __ j(equal, &adapted);
 
       // No arguments adaptor frame. Copy fixed number of arguments.
-      __ movq(rax, Immediate(scope_->num_parameters()));
-      for (int i = 0; i < scope_->num_parameters(); i++) {
+      __ movq(rax, Immediate(scope()->num_parameters()));
+      for (int i = 0; i < scope()->num_parameters(); i++) {
         __ push(frame_->ParameterAt(i));
       }
       __ jmp(&invoke);
@@ -2263,7 +2256,7 @@
 
   // Build the function boilerplate and instantiate it.
   Handle<JSFunction> boilerplate =
-      Compiler::BuildBoilerplate(node, script_, this);
+      Compiler::BuildBoilerplate(node, script(), this);
   // Check for stack-overflow exception.
   if (HasStackOverflow()) return;
   InstantiateBoilerplate(boilerplate);
@@ -3610,7 +3603,7 @@
   Load(args->at(0));
   Result key = frame_->Pop();
   // Explicitly create a constant result.
-  Result count(Handle<Smi>(Smi::FromInt(scope_->num_parameters())));
+  Result count(Handle<Smi>(Smi::FromInt(scope()->num_parameters())));
   // Call the shared stub to get to arguments[key].
   ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
   Result result = frame_->CallStub(&stub, &key, &count);
@@ -3719,7 +3712,7 @@
   ASSERT(args->length() == 0);
   // ArgumentsAccessStub takes the parameter count as an input argument
   // in register eax.  Create a constant result for it.
-  Result count(Handle<Smi>(Smi::FromInt(scope_->num_parameters())));
+  Result count(Handle<Smi>(Smi::FromInt(scope()->num_parameters())));
   // Call the shared stub to get to the arguments.length.
   ArgumentsAccessStub stub(ArgumentsAccessStub::READ_LENGTH);
   Result result = frame_->CallStub(&stub, &count);
@@ -4789,13 +4782,13 @@
 }
 
 
-ArgumentsAllocationMode CodeGenerator::ArgumentsMode() const {
-  if (scope_->arguments() == NULL) return NO_ARGUMENTS_ALLOCATION;
-  ASSERT(scope_->arguments_shadow() != NULL);
+ArgumentsAllocationMode CodeGenerator::ArgumentsMode() {
+  if (scope()->arguments() == NULL) return NO_ARGUMENTS_ALLOCATION;
+  ASSERT(scope()->arguments_shadow() != NULL);
   // We don't want to do lazy arguments allocation for functions that
   // have heap-allocated contexts, because it interfers with the
   // uninitialized const tracking in the context objects.
-  return (scope_->num_heap_slots() > 0)
+  return (scope()->num_heap_slots() > 0)
       ? EAGER_ARGUMENTS_ALLOCATION
       : LAZY_ARGUMENTS_ALLOCATION;
 }
@@ -4815,14 +4808,14 @@
     ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
     frame_->PushFunction();
     frame_->PushReceiverSlotAddress();
-    frame_->Push(Smi::FromInt(scope_->num_parameters()));
+    frame_->Push(Smi::FromInt(scope()->num_parameters()));
     Result result = frame_->CallStub(&stub, 3);
     frame_->Push(&result);
   }
 
 
-  Variable* arguments = scope_->arguments()->var();
-  Variable* shadow = scope_->arguments_shadow()->var();
+  Variable* arguments = scope()->arguments()->var();
+  Variable* shadow = scope()->arguments_shadow()->var();
   ASSERT(arguments != NULL && arguments->slot() != NULL);
   ASSERT(shadow != NULL && shadow->slot() != NULL);
   JumpTarget done;
@@ -4831,7 +4824,7 @@
     // We have to skip storing into the arguments slot if it has
     // already been written to. This can happen if the a function
     // has a local variable named 'arguments'.
-    LoadFromSlot(scope_->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
+    LoadFromSlot(scope()->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
     Result probe = frame_->Pop();
     if (probe.is_constant()) {
       // We have to skip updating the arguments object if it has been
diff --git a/src/x64/codegen-x64.h b/src/x64/codegen-x64.h
index a758e73..8fbbe5a 100644
--- a/src/x64/codegen-x64.h
+++ b/src/x64/codegen-x64.h
@@ -305,19 +305,15 @@
 
   // Takes a function literal, generates code for it. This function should only
   // be called by compiler.cc.
-  static Handle<Code> MakeCode(FunctionLiteral* fun,
-                               Handle<Script> script,
-                               bool is_eval,
-                               CompilationInfo* info);
+  static Handle<Code> MakeCode(CompilationInfo* info);
 
   // Printing of AST, etc. as requested by flags.
-  static void MakeCodePrologue(FunctionLiteral* fun);
+  static void MakeCodePrologue(CompilationInfo* info);
 
   // Allocate and install the code.
-  static Handle<Code> MakeCodeEpilogue(FunctionLiteral* fun,
-                                       MacroAssembler* masm,
+  static Handle<Code> MakeCodeEpilogue(MacroAssembler* masm,
                                        Code::Flags flags,
-                                       Handle<Script> script);
+                                       CompilationInfo* info);
 
 #ifdef ENABLE_LOGGING_AND_PROFILING
   static bool ShouldGenerateLog(Expression* type);
@@ -328,7 +324,7 @@
   // Accessors
   MacroAssembler* masm() { return masm_; }
   VirtualFrame* frame() const { return frame_; }
-  Handle<Script> script() { return script_; }
+  inline Handle<Script> script();
 
   bool has_valid_frame() const { return frame_ != NULL; }
 
@@ -352,16 +348,15 @@
 
  private:
   // Construction/Destruction
-  CodeGenerator(MacroAssembler* masm, Handle<Script> script, bool is_eval);
+  explicit CodeGenerator(MacroAssembler* masm);
 
   // Accessors
-  Scope* scope() const { return scope_; }
+  inline bool is_eval();
+  Scope* scope();
 
   // Generating deferred code.
   void ProcessDeferred();
 
-  bool is_eval() { return is_eval_; }
-
   // State
   ControlDestination* destination() const { return state_->destination(); }
 
@@ -390,7 +385,7 @@
   void VisitStatementsAndSpill(ZoneList<Statement*>* statements);
 
   // Main code generation function
-  void Generate(FunctionLiteral* fun, Mode mode, CompilationInfo* info);
+  void Generate(CompilationInfo* info, Mode mode);
 
   // Generate the return sequence code.  Should be called no more than
   // once per compiled function, immediately after binding the return
@@ -398,7 +393,7 @@
   void GenerateReturnSequence(Result* return_value);
 
   // Returns the arguments allocation mode.
-  ArgumentsAllocationMode ArgumentsMode() const;
+  ArgumentsAllocationMode ArgumentsMode();
 
   // Store the arguments object and allocate it if necessary.
   Result StoreArgumentsObject(bool initial);
@@ -604,15 +599,14 @@
   bool HasValidEntryRegisters();
 #endif
 
-  bool is_eval_;  // Tells whether code is generated for eval.
-  Handle<Script> script_;
   ZoneList<DeferredCode*> deferred_;
 
   // Assembler
   MacroAssembler* masm_;  // to generate code
 
+  CompilationInfo* info_;
+
   // Code generation state
-  Scope* scope_;
   VirtualFrame* frame_;
   RegisterAllocator* allocator_;
   CodeGenState* state_;
diff --git a/src/x64/fast-codegen-x64.cc b/src/x64/fast-codegen-x64.cc
index 12b5653..c6e7be2 100644
--- a/src/x64/fast-codegen-x64.cc
+++ b/src/x64/fast-codegen-x64.cc
@@ -37,7 +37,7 @@
 
 void FastCodeGenerator::EmitLoadReceiver(Register reg) {
   // Offset 2 is due to return address and saved frame pointer.
-  int index = 2 + function()->scope()->num_parameters();
+  int index = 2 + scope()->num_parameters();
   __ movq(reg, Operand(rbp, index * kPointerSize));
 }
 
@@ -48,41 +48,47 @@
     PrintF("MapCheck(this)\n");
   }
 
-  EmitLoadReceiver(rdx);
-  __ JumpIfSmi(rdx, bailout());
-
-  ASSERT(has_receiver() && receiver()->IsHeapObject());
-  Handle<HeapObject> object = Handle<HeapObject>::cast(receiver());
+  ASSERT(info()->has_receiver() && info()->receiver()->IsHeapObject());
+  Handle<HeapObject> object = Handle<HeapObject>::cast(info()->receiver());
   Handle<Map> map(object->map());
-  __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset), map);
-  __ j(not_equal, bailout());
+
+  EmitLoadReceiver(rdx);
+  __ CheckMap(rdx, map, bailout(), false);
 }
 
 
-void FastCodeGenerator::EmitGlobalVariableLoad(Handle<String> name) {
-  // Compile global variable accesses as load IC calls.  The only live
-  // registers are rsi (context) and possibly rdx (this).  Both are also
-  // saved in the stack and rsi is preserved by the call.
-  __ push(CodeGenerator::GlobalObject());
-  __ Move(rcx, name);
-  Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
-  __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
-  if (has_this_properties()) {
-    // Restore this.
-    EmitLoadReceiver(rdx);
-  } else {
-    __ nop();  // Not test rax, indicates IC has no inlined code at call site.
+void FastCodeGenerator::EmitGlobalMapCheck() {
+  Comment cmnt(masm(), ";; GlobalMapCheck");
+  if (FLAG_print_ir) {
+    PrintF(";; GlobalMapCheck()");
+  }
+
+  ASSERT(info()->has_global_object());
+  Handle<Map> map(info()->global_object()->map());
+
+  __ movq(rbx, CodeGenerator::GlobalObject());
+  __ CheckMap(rbx, map, bailout(), true);
+}
+
+
+void FastCodeGenerator::EmitGlobalVariableLoad(Handle<Object> cell) {
+  ASSERT(cell->IsJSGlobalPropertyCell());
+  __ Move(rax, cell);
+  __ movq(rax, FieldOperand(rax, JSGlobalPropertyCell::kValueOffset));
+  if (FLAG_debug_code) {
+    __ Cmp(rax, Factory::the_hole_value());
+    __ Check(not_equal, "DontDelete cells can't contain the hole");
   }
 }
 
 
 void FastCodeGenerator::EmitThisPropertyStore(Handle<String> name) {
   LookupResult lookup;
-  receiver()->Lookup(*name, &lookup);
+  info()->receiver()->Lookup(*name, &lookup);
 
-  ASSERT(lookup.holder() == *receiver());
+  ASSERT(lookup.holder() == *info()->receiver());
   ASSERT(lookup.type() == FIELD);
-  Handle<Map> map(Handle<HeapObject>::cast(receiver())->map());
+  Handle<Map> map(Handle<HeapObject>::cast(info()->receiver())->map());
   int index = lookup.GetFieldIndex() - map->inobject_properties();
   int offset = index * kPointerSize;
 
@@ -102,11 +108,9 @@
 }
 
 
-void FastCodeGenerator::Generate(FunctionLiteral* fun, CompilationInfo* info) {
-  ASSERT(function_ == NULL);
+void FastCodeGenerator::Generate(CompilationInfo* compilation_info) {
   ASSERT(info_ == NULL);
-  function_ = fun;
-  info_ = info;
+  info_ = compilation_info;
 
   // Save the caller's frame pointer and set up our own.
   Comment prologue_cmnt(masm(), ";; Prologue");
@@ -118,9 +122,13 @@
   // point.
 
   // Receiver (this) is allocated to rdx if there are this properties.
-  if (has_this_properties()) EmitReceiverMapCheck();
+  if (info()->has_this_properties()) EmitReceiverMapCheck();
 
-  VisitStatements(fun->body());
+  // If there is a global variable access check if the global object
+  // is the same as at lazy-compilation time.
+  if (info()->has_globals()) EmitGlobalMapCheck();
+
+  VisitStatements(info()->function()->body());
 
   Comment return_cmnt(masm(), ";; Return(<undefined>)");
   __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
@@ -128,7 +136,7 @@
   Comment epilogue_cmnt(masm(), ";; Epilogue");
   __ movq(rsp, rbp);
   __ pop(rbp);
-  __ ret((fun->scope()->num_parameters() + 1) * kPointerSize);
+  __ ret((scope()->num_parameters() + 1) * kPointerSize);
 
   __ bind(&bailout_);
 }
diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc
index f5bbfaf..2e95c68 100644
--- a/src/x64/full-codegen-x64.cc
+++ b/src/x64/full-codegen-x64.cc
@@ -51,9 +51,10 @@
 //
 // The function builds a JS frame.  Please see JavaScriptFrameConstants in
 // frames-x64.h for its layout.
-void FullCodeGenerator::Generate(FunctionLiteral* fun, Mode mode) {
-  function_ = fun;
-  SetFunctionPosition(fun);
+void FullCodeGenerator::Generate(CompilationInfo* info, Mode mode) {
+  ASSERT(info_ == NULL);
+  info_ = info;
+  SetFunctionPosition(function());
 
   if (mode == PRIMARY) {
     __ push(rbp);  // Caller's frame pointer.
@@ -62,7 +63,7 @@
     __ push(rdi);  // Callee's JS Function.
 
     { Comment cmnt(masm_, "[ Allocate locals");
-      int locals_count = fun->scope()->num_stack_slots();
+      int locals_count = scope()->num_stack_slots();
       if (locals_count == 1) {
         __ PushRoot(Heap::kUndefinedValueRootIndex);
       } else if (locals_count > 1) {
@@ -76,7 +77,7 @@
     bool function_in_register = true;
 
     // Possibly allocate a local context.
-    if (fun->scope()->num_heap_slots() > 0) {
+    if (scope()->num_heap_slots() > 0) {
       Comment cmnt(masm_, "[ Allocate local context");
       // Argument to NewContext is the function, which is still in rdi.
       __ push(rdi);
@@ -87,9 +88,9 @@
       __ movq(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
 
       // Copy any necessary parameters into the context.
-      int num_parameters = fun->scope()->num_parameters();
+      int num_parameters = scope()->num_parameters();
       for (int i = 0; i < num_parameters; i++) {
-        Slot* slot = fun->scope()->parameter(i)->slot();
+        Slot* slot = scope()->parameter(i)->slot();
         if (slot != NULL && slot->type() == Slot::CONTEXT) {
           int parameter_offset = StandardFrameConstants::kCallerSPOffset +
                                      (num_parameters - 1 - i) * kPointerSize;
@@ -108,7 +109,7 @@
     }
 
     // Possibly allocate an arguments object.
-    Variable* arguments = fun->scope()->arguments()->AsVariable();
+    Variable* arguments = scope()->arguments()->AsVariable();
     if (arguments != NULL) {
       // Arguments object must be allocated after the context object, in
       // case the "arguments" or ".arguments" variables are in the context.
@@ -119,10 +120,11 @@
         __ push(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
       }
       // The receiver is just before the parameters on the caller's stack.
-      __ lea(rdx, Operand(rbp, StandardFrameConstants::kCallerSPOffset +
-                          fun->num_parameters() * kPointerSize));
+      int offset = scope()->num_parameters() * kPointerSize;
+      __ lea(rdx,
+             Operand(rbp, StandardFrameConstants::kCallerSPOffset + offset));
       __ push(rdx);
-      __ Push(Smi::FromInt(fun->num_parameters()));
+      __ Push(Smi::FromInt(scope()->num_parameters()));
       // Arguments to ArgumentsAccessStub:
       //   function, receiver address, parameter count.
       // The stub will rewrite receiver and parameter count if the previous
@@ -133,13 +135,13 @@
       __ movq(rcx, rax);
       Move(arguments->slot(), rax, rbx, rdx);
       Slot* dot_arguments_slot =
-          fun->scope()->arguments_shadow()->AsVariable()->slot();
+          scope()->arguments_shadow()->AsVariable()->slot();
       Move(dot_arguments_slot, rcx, rbx, rdx);
     }
   }
 
   { Comment cmnt(masm_, "[ Declarations");
-    VisitDeclarations(fun->scope()->declarations());
+    VisitDeclarations(scope()->declarations());
   }
 
   { Comment cmnt(masm_, "[ Stack check");
@@ -157,14 +159,14 @@
 
   { Comment cmnt(masm_, "[ Body");
     ASSERT(loop_depth() == 0);
-    VisitStatements(fun->body());
+    VisitStatements(function()->body());
     ASSERT(loop_depth() == 0);
   }
 
   { Comment cmnt(masm_, "[ return <undefined>;");
     // Emit a 'return undefined' in case control fell off the end of the body.
     __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
-    EmitReturnSequence(function_->end_position());
+    EmitReturnSequence(function()->end_position());
   }
 }
 
@@ -190,7 +192,7 @@
     // patch with the code required by the debugger.
     __ movq(rsp, rbp);
     __ pop(rbp);
-    __ ret((function_->scope()->num_parameters() + 1) * kPointerSize);
+    __ ret((scope()->num_parameters() + 1) * kPointerSize);
 #ifdef ENABLE_DEBUGGER_SUPPORT
     // Add padding that will be overwritten by a debugger breakpoint.  We
     // have just generated "movq rsp, rbp; pop rbp; ret k" with length 7
@@ -629,7 +631,7 @@
       return Operand(rbp, SlotOffset(slot));
     case Slot::CONTEXT: {
       int context_chain_length =
-          function_->scope()->ContextChainLength(slot->var()->scope());
+          scope()->ContextChainLength(slot->var()->scope());
       __ LoadContext(scratch, context_chain_length);
       return CodeGenerator::ContextOperand(scratch, slot->index());
     }
@@ -688,7 +690,7 @@
         // this specific context.
 
         // The variable in the decl always resides in the current context.
-        ASSERT_EQ(0, function_->scope()->ContextChainLength(var->scope()));
+        ASSERT_EQ(0, scope()->ContextChainLength(var->scope()));
         if (FLAG_debug_code) {
           // Check if we have the correct context pointer.
           __ movq(rbx,
@@ -767,7 +769,7 @@
   // Call the runtime to declare the globals.
   __ push(rsi);  // The context is the first argument.
   __ Push(pairs);
-  __ Push(Smi::FromInt(is_eval_ ? 1 : 0));
+  __ Push(Smi::FromInt(is_eval() ? 1 : 0));
   __ CallRuntime(Runtime::kDeclareGlobals, 3);
   // Return value is ignored.
 }
@@ -778,7 +780,7 @@
 
   // Build the function boilerplate and instantiate it.
   Handle<JSFunction> boilerplate =
-      Compiler::BuildBoilerplate(expr, script_, this);
+      Compiler::BuildBoilerplate(expr, script(), this);
   if (HasStackOverflow()) return;
 
   ASSERT(boilerplate->IsBoilerplate());
diff --git a/src/x64/ic-x64.cc b/src/x64/ic-x64.cc
index 28bfd2e..99a8c7d 100644
--- a/src/x64/ic-x64.cc
+++ b/src/x64/ic-x64.cc
@@ -151,22 +151,6 @@
 }
 
 
-// Helper function used to check that a value is either not an object
-// or is loaded if it is an object.
-static void GenerateCheckNonObjectOrLoaded(MacroAssembler* masm, Label* miss,
-                                           Register value) {
-  Label done;
-  // Check if the value is a Smi.
-  __ JumpIfSmi(value, &done);
-  // Check if the object has been loaded.
-  __ movq(kScratchRegister, FieldOperand(value, JSFunction::kMapOffset));
-  __ testb(FieldOperand(kScratchRegister, Map::kBitField2Offset),
-           Immediate(1 << Map::kNeedsLoading));
-  __ j(not_zero, miss);
-  __ bind(&done);
-}
-
-
 // One byte opcode for test eax,0xXXXXXXXX.
 static const byte kTestEaxByte = 0xA9;
 
@@ -390,7 +374,6 @@
                          rdx,
                          rax,
                          DICTIONARY_CHECK_DONE);
-  GenerateCheckNonObjectOrLoaded(masm, &slow, rcx);
   __ movq(rax, rcx);
   __ IncrementCounter(&Counters::keyed_load_generic_symbol, 1);
   __ ret(0);
@@ -1053,10 +1036,6 @@
   // Check that the value is a JavaScript function.
   __ CmpObjectType(rdx, JS_FUNCTION_TYPE, rdx);
   __ j(not_equal, miss);
-  // Check that the function has been loaded.
-  __ testb(FieldOperand(rdx, Map::kBitField2Offset),
-           Immediate(1 << Map::kNeedsLoading));
-  __ j(not_zero, miss);
 
   // Patch the receiver with the global proxy if necessary.
   if (is_global_object) {
@@ -1267,7 +1246,6 @@
   // Search the dictionary placing the result in rax.
   __ bind(&probe);
   GenerateDictionaryLoad(masm, &miss, rdx, rax, rbx, rcx, CHECK_DICTIONARY);
-  GenerateCheckNonObjectOrLoaded(masm, &miss, rax);
   __ ret(0);
 
   // Global object access: Check access rights.
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index 96b45e8..56bbc20 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -1585,6 +1585,18 @@
 }
 
 
+void MacroAssembler::CheckMap(Register obj,
+                              Handle<Map> map,
+                              Label* fail,
+                              bool is_heap_object) {
+  if (!is_heap_object) {
+    JumpIfSmi(obj, fail);
+  }
+  Cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
+  j(not_equal, fail);
+}
+
+
 Condition MacroAssembler::IsObjectStringType(Register heap_object,
                                              Register map,
                                              Register instance_type) {
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index 2913274..a975dca 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -460,6 +460,14 @@
   // Always use unsigned comparisons: above and below, not less and greater.
   void CmpInstanceType(Register map, InstanceType type);
 
+  // Check if the map of an object is equal to a specified map and
+  // branch to label if not. Skip the smi check if not required
+  // (object is known to be a heap object)
+  void CheckMap(Register obj,
+                Handle<Map> map,
+                Label* fail,
+                bool is_heap_object);
+
   // Check if the object in register heap_object is a string. Afterwards the
   // register map contains the object map and the register instance_type
   // contains the instance_type. The registers map and instance_type can be the
@@ -712,8 +720,8 @@
   List<Unresolved> unresolved_;
   bool generating_stub_;
   bool allow_stub_calls_;
-  Handle<Object> code_object_;  // This handle will be patched with the code
-                                // object on installation.
+  // This handle will be patched with the code object on installation.
+  Handle<Object> code_object_;
 
   // Helper functions for generating invokes.
   void InvokePrologue(const ParameterCount& expected,
diff --git a/test/cctest/SConscript b/test/cctest/SConscript
index e6c81d8..acd567e 100644
--- a/test/cctest/SConscript
+++ b/test/cctest/SConscript
@@ -63,7 +63,10 @@
     'test-utils.cc',
     'test-version.cc'
   ],
-  'arch:arm':  ['test-assembler-arm.cc', 'test-disasm-arm.cc'],
+  'arch:arm':  [
+    'test-assembler-arm.cc',
+    'test-disasm-arm.cc'
+  ],
   'arch:ia32': [
     'test-assembler-ia32.cc',
     'test-disasm-ia32.cc',
@@ -72,6 +75,7 @@
   'arch:x64': ['test-assembler-x64.cc',
                'test-macro-assembler-x64.cc',
                'test-log-stack-tracer.cc'],
+  'arch:mips': ['test-assembler-mips.cc'],
   'os:linux':  ['test-platform-linux.cc'],
   'os:macos':  ['test-platform-macos.cc'],
   'os:nullos': ['test-platform-nullos.cc'],
diff --git a/test/cctest/cctest.status b/test/cctest/cctest.status
index a143cbd..363b0d7 100644
--- a/test/cctest/cctest.status
+++ b/test/cctest/cctest.status
@@ -38,7 +38,6 @@
 test-serialize/TestThatAlwaysFails: FAIL
 test-serialize/DependentTestThatAlwaysFails: FAIL
 
-
 [ $arch == arm ]
 
 # BUG(240): Test seems flaky on ARM.
@@ -52,3 +51,23 @@
 
 # BUG(355): Test crashes on ARM.
 test-log/ProfLazyMode: SKIP
+
+[ $arch == mips ]
+test-accessors: SKIP
+test-alloc: SKIP
+test-api: SKIP
+test-compiler: SKIP
+test-debug: SKIP
+test-decls: SKIP
+test-func-name-inference: SKIP
+test-heap: SKIP
+test-heap-profiler: SKIP
+test-log: SKIP
+test-log-utils: SKIP
+test-mark-compact: SKIP
+test-regexp: SKIP
+test-serialize: SKIP
+test-sockets: SKIP
+test-strings: SKIP
+test-threads: SKIP
+test-thread-termination: SKIP
diff --git a/test/cctest/test-api.cc b/test/cctest/test-api.cc
index f71b325..0a392eb 100644
--- a/test/cctest/test-api.cc
+++ b/test/cctest/test-api.cc
@@ -38,7 +38,7 @@
 #include "utils.h"
 #include "cctest.h"
 
-static const bool kLogThreading = false;
+static const bool kLogThreading = true;
 
 static bool IsNaN(double x) {
 #ifdef WIN32
@@ -2297,6 +2297,103 @@
   }
 }
 
+THREADED_TEST(DefinePropertyOnAPIAccessor) {
+  v8::HandleScope scope;
+  Local<ObjectTemplate> templ = ObjectTemplate::New();
+  templ->SetAccessor(v8_str("x"), GetXValue, NULL, v8_str("donut"));
+  LocalContext context;
+  context->Global()->Set(v8_str("obj"), templ->NewInstance());
+
+  // Uses getOwnPropertyDescriptor to check the configurable status
+  Local<Script> script_desc
+    = Script::Compile(v8_str("var prop =Object.getOwnPropertyDescriptor( "
+                             "obj, 'x');"
+                             "prop.configurable;"));
+  Local<Value> result = script_desc->Run();
+  CHECK_EQ(result->BooleanValue(), true);
+
+  // Redefine get - but still configurable
+  Local<Script> script_define
+    = Script::Compile(v8_str("var desc = { get: function(){return 42; },"
+                             "            configurable: true };"
+                             "Object.defineProperty(obj, 'x', desc);"
+                             "obj.x"));
+  result = script_define->Run();
+  CHECK_EQ(result, v8_num(42));
+
+  // Check that the accessor is still configurable
+  result = script_desc->Run();
+  CHECK_EQ(result->BooleanValue(), true);
+
+  // Redefine to a non-configurable
+  script_define
+    = Script::Compile(v8_str("var desc = { get: function(){return 43; },"
+                             "             configurable: false };"
+                             "Object.defineProperty(obj, 'x', desc);"
+                             "obj.x"));
+  result = script_define->Run();
+  CHECK_EQ(result, v8_num(43));
+  result = script_desc->Run();
+  CHECK_EQ(result->BooleanValue(), false);
+
+  // Make sure that it is not possible to redefine again
+  v8::TryCatch try_catch;
+  result = script_define->Run();
+  CHECK(try_catch.HasCaught());
+  String::AsciiValue exception_value(try_catch.Exception());
+  CHECK_EQ(*exception_value,
+           "TypeError: Cannot redefine property: defineProperty");
+}
+
+THREADED_TEST(DefinePropertyOnDefineGetterSetter) {
+  v8::HandleScope scope;
+  Local<ObjectTemplate> templ = ObjectTemplate::New();
+  templ->SetAccessor(v8_str("x"), GetXValue, NULL, v8_str("donut"));
+  LocalContext context;
+  context->Global()->Set(v8_str("obj"), templ->NewInstance());
+
+  Local<Script> script_desc = Script::Compile(v8_str("var prop ="
+                                    "Object.getOwnPropertyDescriptor( "
+                                    "obj, 'x');"
+                                    "prop.configurable;"));
+  Local<Value> result = script_desc->Run();
+  CHECK_EQ(result->BooleanValue(), true);
+
+  Local<Script> script_define =
+    Script::Compile(v8_str("var desc = {get: function(){return 42; },"
+                           "            configurable: true };"
+                           "Object.defineProperty(obj, 'x', desc);"
+                           "obj.x"));
+  result = script_define->Run();
+  CHECK_EQ(result, v8_num(42));
+
+
+  result = script_desc->Run();
+  CHECK_EQ(result->BooleanValue(), true);
+
+
+  script_define =
+    Script::Compile(v8_str("var desc = {get: function(){return 43; },"
+                           "            configurable: false };"
+                           "Object.defineProperty(obj, 'x', desc);"
+                           "obj.x"));
+  result = script_define->Run();
+  CHECK_EQ(result, v8_num(43));
+  result = script_desc->Run();
+
+  CHECK_EQ(result->BooleanValue(), false);
+
+  v8::TryCatch try_catch;
+  result = script_define->Run();
+  CHECK(try_catch.HasCaught());
+  String::AsciiValue exception_value(try_catch.Exception());
+  CHECK_EQ(*exception_value,
+           "TypeError: Cannot redefine property: defineProperty");
+}
+
+
+
+
 
 v8::Persistent<Value> xValue;
 
@@ -4097,6 +4194,7 @@
   value = v8_compile("other.accessible_prop = 3")->Run();
   CHECK(value->IsNumber());
   CHECK_EQ(3, value->Int32Value());
+  CHECK_EQ(3, g_echo_value);
 
   value = v8_compile("other.accessible_prop")->Run();
   CHECK(value->IsNumber());
@@ -4980,7 +5078,7 @@
 
 
 static v8::Handle<Value> call_as_function(const v8::Arguments& args) {
-  ApiTestFuzzer::Fuzz();
+  //ApiTestFuzzer::Fuzz();
   if (args.IsConstructCall()) {
     if (args[0]->IsInt32()) {
        return v8_num(-args[0]->Int32Value());
diff --git a/test/cctest/test-assembler-arm.cc b/test/cctest/test-assembler-arm.cc
index 459b862..7f3404c 100644
--- a/test/cctest/test-assembler-arm.cc
+++ b/test/cctest/test-assembler-arm.cc
@@ -47,9 +47,6 @@
 
 // The test framework does not accept flags on the command line, so we set them
 static void InitializeVM() {
-  // disable compilation of natives by specifying an empty natives file
-  FLAG_natives_file = "";
-
   // enable generation of comments
   FLAG_debug_code = true;
 
diff --git a/test/cctest/test-compiler.cc b/test/cctest/test-compiler.cc
index 05c29d7..b5f12b9 100644
--- a/test/cctest/test-compiler.cc
+++ b/test/cctest/test-compiler.cc
@@ -114,8 +114,13 @@
 
 static Handle<JSFunction> Compile(const char* source) {
   Handle<String> source_code(Factory::NewStringFromUtf8(CStrVector(source)));
-  Handle<JSFunction> boilerplate =
-      Compiler::Compile(source_code, Handle<String>(), 0, 0, NULL, NULL);
+  Handle<JSFunction> boilerplate = Compiler::Compile(source_code,
+                                                     Handle<String>(),
+                                                     0,
+                                                     0,
+                                                     NULL,
+                                                     NULL,
+                                                     NOT_NATIVES_CODE);
   return Factory::NewFunctionFromBoilerplate(boilerplate,
                                              Top::global_context());
 }
diff --git a/test/cctest/test-regexp.cc b/test/cctest/test-regexp.cc
index b1ca45a..db312da 100644
--- a/test/cctest/test-regexp.cc
+++ b/test/cctest/test-regexp.cc
@@ -653,6 +653,8 @@
 typedef RegExpMacroAssemblerX64 ArchRegExpMacroAssembler;
 #elif V8_TARGET_ARCH_ARM
 typedef RegExpMacroAssemblerARM ArchRegExpMacroAssembler;
+#elif V8_TARGET_ARCH_MIPS
+typedef RegExpMacroAssembler ArchRegExpMacroAssembler;
 #endif
 
 class ContextInitializer {
diff --git a/test/cctest/test-serialize.cc b/test/cctest/test-serialize.cc
index c34840a..18f6988 100644
--- a/test/cctest/test-serialize.cc
+++ b/test/cctest/test-serialize.cc
@@ -290,57 +290,68 @@
 
 
 DEPENDENT_TEST(Deserialize, Serialize) {
-  v8::HandleScope scope;
+  // The serialize-deserialize tests only work if the VM is built without
+  // serialization.  That doesn't matter.  We don't need to be able to
+  // serialize a snapshot in a VM that is booted from a snapshot.
+  if (!Snapshot::IsEnabled()) {
+    v8::HandleScope scope;
 
-  Deserialize();
+    Deserialize();
 
-  v8::Persistent<v8::Context> env = v8::Context::New();
-  env->Enter();
+    v8::Persistent<v8::Context> env = v8::Context::New();
+    env->Enter();
 
-  SanityCheck();
+    SanityCheck();
+  }
 }
 
 
 DEPENDENT_TEST(DeserializeFromSecondSerialization, SerializeTwice) {
-  v8::HandleScope scope;
+  if (!Snapshot::IsEnabled()) {
+    v8::HandleScope scope;
 
-  Deserialize();
+    Deserialize();
 
-  v8::Persistent<v8::Context> env = v8::Context::New();
-  env->Enter();
+    v8::Persistent<v8::Context> env = v8::Context::New();
+    env->Enter();
 
-  SanityCheck();
+    SanityCheck();
+  }
 }
 
 
 DEPENDENT_TEST(DeserializeAndRunScript2, Serialize) {
-  v8::HandleScope scope;
+  if (!Snapshot::IsEnabled()) {
+    v8::HandleScope scope;
 
-  Deserialize();
+    Deserialize();
 
-  v8::Persistent<v8::Context> env = v8::Context::New();
-  env->Enter();
+    v8::Persistent<v8::Context> env = v8::Context::New();
+    env->Enter();
 
-  const char* c_source = "\"1234\".length";
-  v8::Local<v8::String> source = v8::String::New(c_source);
-  v8::Local<v8::Script> script = v8::Script::Compile(source);
-  CHECK_EQ(4, script->Run()->Int32Value());
+    const char* c_source = "\"1234\".length";
+    v8::Local<v8::String> source = v8::String::New(c_source);
+    v8::Local<v8::Script> script = v8::Script::Compile(source);
+    CHECK_EQ(4, script->Run()->Int32Value());
+  }
 }
 
 
 DEPENDENT_TEST(DeserializeFromSecondSerializationAndRunScript2,
                SerializeTwice) {
-  v8::HandleScope scope;
+  if (!Snapshot::IsEnabled()) {
+    v8::HandleScope scope;
 
-  Deserialize();
+    Deserialize();
 
-  v8::Persistent<v8::Context> env = v8::Context::New();
-  env->Enter();
+    v8::Persistent<v8::Context> env = v8::Context::New();
+    env->Enter();
 
-  const char* c_source = "\"1234\".length";
-  v8::Local<v8::String> source = v8::String::New(c_source);
-  v8::Local<v8::Script> script = v8::Script::Compile(source);
-  CHECK_EQ(4, script->Run()->Int32Value());
+    const char* c_source = "\"1234\".length";
+    v8::Local<v8::String> source = v8::String::New(c_source);
+    v8::Local<v8::Script> script = v8::Script::Compile(source);
+    CHECK_EQ(4, script->Run()->Int32Value());
+  }
 }
 
 
@@ -393,14 +404,8 @@
 }
 
 
-DEPENDENT_TEST(PartialDeserialization, PartialSerialization) {
-  int file_name_length = StrLength(FLAG_testing_serialization_file) + 10;
-  Vector<char> startup_name = Vector<char>::New(file_name_length + 1);
-  OS::SNPrintF(startup_name, "%s.startup", FLAG_testing_serialization_file);
-
-  CHECK(Snapshot::Initialize(startup_name.start()));
-
-  const char* file_name = FLAG_testing_serialization_file;
+static void ReserveSpaceForPartialSnapshot(const char* file_name) {
+  int file_name_length = StrLength(file_name) + 10;
   Vector<char> name = Vector<char>::New(file_name_length + 1);
   OS::SNPrintF(name, "%s.size", file_name);
   FILE* fp = OS::FOpen(name.start(), "r");
@@ -429,26 +434,122 @@
                      map_size,
                      cell_size,
                      large_size);
-  int snapshot_size = 0;
-  byte* snapshot = ReadBytes(file_name, &snapshot_size);
+}
 
-  Object* root;
-  {
-    SnapshotByteSource source(snapshot, snapshot_size);
-    Deserializer deserializer(&source);
-    deserializer.DeserializePartial(&root);
-    CHECK(root->IsString());
+
+DEPENDENT_TEST(PartialDeserialization, PartialSerialization) {
+  if (!Snapshot::IsEnabled()) {
+    int file_name_length = StrLength(FLAG_testing_serialization_file) + 10;
+    Vector<char> startup_name = Vector<char>::New(file_name_length + 1);
+    OS::SNPrintF(startup_name, "%s.startup", FLAG_testing_serialization_file);
+
+    CHECK(Snapshot::Initialize(startup_name.start()));
+
+    const char* file_name = FLAG_testing_serialization_file;
+    ReserveSpaceForPartialSnapshot(file_name);
+
+    int snapshot_size = 0;
+    byte* snapshot = ReadBytes(file_name, &snapshot_size);
+
+    Object* root;
+    {
+      SnapshotByteSource source(snapshot, snapshot_size);
+      Deserializer deserializer(&source);
+      deserializer.DeserializePartial(&root);
+      CHECK(root->IsString());
+    }
+    v8::HandleScope handle_scope;
+    Handle<Object>root_handle(root);
+
+    Object* root2;
+    {
+      SnapshotByteSource source(snapshot, snapshot_size);
+      Deserializer deserializer(&source);
+      deserializer.DeserializePartial(&root2);
+      CHECK(root2->IsString());
+      CHECK(*root_handle == root2);
+    }
   }
-  v8::HandleScope handle_scope;
-  Handle<Object>root_handle(root);
+}
 
-  Object* root2;
-  {
-    SnapshotByteSource source(snapshot, snapshot_size);
-    Deserializer deserializer(&source);
-    deserializer.DeserializePartial(&root2);
-    CHECK(root2->IsString());
-    CHECK(*root_handle == root2);
+
+TEST(ContextSerialization) {
+  Serializer::Enable();
+  v8::V8::Initialize();
+
+  v8::Persistent<v8::Context> env = v8::Context::New();
+  ASSERT(!env.IsEmpty());
+  env->Enter();
+  // Make sure all builtin scripts are cached.
+  { HandleScope scope;
+    for (int i = 0; i < Natives::GetBuiltinsCount(); i++) {
+      Bootstrapper::NativesSourceLookup(i);
+    }
+  }
+  // If we don't do this then we end up with a stray root pointing at the
+  // context even after we have disposed of env.
+  Heap::CollectAllGarbage(true);
+
+  int file_name_length = StrLength(FLAG_testing_serialization_file) + 10;
+  Vector<char> startup_name = Vector<char>::New(file_name_length + 1);
+  OS::SNPrintF(startup_name, "%s.startup", FLAG_testing_serialization_file);
+
+  env->Exit();
+
+  Object* raw_context = *(v8::Utils::OpenHandle(*env));
+
+  env.Dispose();
+
+  FileByteSink startup_sink(startup_name.start());
+  StartupSerializer startup_serializer(&startup_sink);
+  startup_serializer.SerializeStrongReferences();
+
+  FileByteSink partial_sink(FLAG_testing_serialization_file);
+  PartialSerializer p_ser(&startup_serializer, &partial_sink);
+  p_ser.Serialize(&raw_context);
+  startup_serializer.SerializeWeakReferences();
+  partial_sink.WriteSpaceUsed(p_ser.CurrentAllocationAddress(NEW_SPACE),
+                              p_ser.CurrentAllocationAddress(OLD_POINTER_SPACE),
+                              p_ser.CurrentAllocationAddress(OLD_DATA_SPACE),
+                              p_ser.CurrentAllocationAddress(CODE_SPACE),
+                              p_ser.CurrentAllocationAddress(MAP_SPACE),
+                              p_ser.CurrentAllocationAddress(CELL_SPACE),
+                              p_ser.CurrentAllocationAddress(LO_SPACE));
+}
+
+
+DEPENDENT_TEST(ContextDeserialization, ContextSerialization) {
+  if (!Snapshot::IsEnabled()) {
+    int file_name_length = StrLength(FLAG_testing_serialization_file) + 10;
+    Vector<char> startup_name = Vector<char>::New(file_name_length + 1);
+    OS::SNPrintF(startup_name, "%s.startup", FLAG_testing_serialization_file);
+
+    CHECK(Snapshot::Initialize(startup_name.start()));
+
+    const char* file_name = FLAG_testing_serialization_file;
+    ReserveSpaceForPartialSnapshot(file_name);
+
+    int snapshot_size = 0;
+    byte* snapshot = ReadBytes(file_name, &snapshot_size);
+
+    Object* root;
+    {
+      SnapshotByteSource source(snapshot, snapshot_size);
+      Deserializer deserializer(&source);
+      deserializer.DeserializePartial(&root);
+      CHECK(root->IsContext());
+    }
+    v8::HandleScope handle_scope;
+    Handle<Object>root_handle(root);
+
+    Object* root2;
+    {
+      SnapshotByteSource source(snapshot, snapshot_size);
+      Deserializer deserializer(&source);
+      deserializer.DeserializePartial(&root2);
+      CHECK(root2->IsContext());
+      CHECK(*root_handle != root2);
+    }
   }
 }
 
@@ -456,6 +557,7 @@
 TEST(LinearAllocation) {
   v8::V8::Initialize();
   int new_space_max = 512 * KB;
+
   for (int size = 1000; size < 5 * MB; size += size >> 1) {
     int new_space_size = (size < new_space_max) ? size : new_space_max;
     Heap::ReserveSpace(
diff --git a/test/es5conform/es5conform.status b/test/es5conform/es5conform.status
index a755016..a3f137f 100644
--- a/test/es5conform/es5conform.status
+++ b/test/es5conform/es5conform.status
@@ -39,8 +39,6 @@
 chapter15/15.1: UNIMPLEMENTED
 chapter15/15.2/15.2.3/15.2.3.1: UNIMPLEMENTED
 chapter15/15.2/15.2.3/15.2.3.5: UNIMPLEMENTED
-chapter15/15.2/15.2.3/15.2.3.6: UNIMPLEMENTED
-chapter15/15.2/15.2.3/15.2.3.7: UNIMPLEMENTED
 chapter15/15.2/15.2.3/15.2.3.8: UNIMPLEMENTED
 chapter15/15.2/15.2.3/15.2.3.9: UNIMPLEMENTED
 chapter15/15.2/15.2.3/15.2.3.10: UNIMPLEMENTED
@@ -48,24 +46,6 @@
 chapter15/15.2/15.2.3/15.2.3.12: UNIMPLEMENTED
 chapter15/15.2/15.2.3/15.2.3.13: UNIMPLEMENTED
 
-# Object.getPrototypeOf
-chapter15/15.2/15.2.3/15.2.3.2: PASS
-
-# Object.getOwnPropertyDescriptor
-chapter15/15.2/15.2.3/15.2.3.3: PASS
-
-# NOT IMPLEMENTED: defineProperty
-chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-3: FAIL_OK
-
-# NOT IMPLEMENTED: getOwnPropertyNames
-chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-16: FAIL_OK
-
-# NOT IMPLEMENTED: defineProperty
-chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-18: FAIL_OK
-
-# NOT IMPLEMENTED: defineProperties
-chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-19: FAIL_OK
-
 # NOT IMPLEMENTED: seal
 chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-20: FAIL_OK
 
@@ -87,37 +67,24 @@
 # NOT IMPLEMENTED: bind
 chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-38: FAIL_OK
 
-# Built-ins have wrong descriptor (should all be false)
+# NaN is writable
 chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-178: FAIL_OK
+# Infinity is writable
 chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-179: FAIL_OK
+# undefined is writable
 chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-180: FAIL_OK
-chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-182: FAIL_OK
 
 # Our Function object has a "arguments" property which is used as a non
 # property in in the test
 chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-183: FAIL_OK
 
-
 # Our Function object has a "caller" property which is used as a non
 # property in in the test
 chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-184: FAIL_OK
 
-# Built-ins have wrong descriptor (should all be false)
-chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-185: FAIL_OK
-chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-186: FAIL_OK
-chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-187: FAIL_OK
+# Our function object has a name property which is used as a non
+# property in the test
 chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-188: FAIL_OK
-chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-189: FAIL_OK
-chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-190: FAIL_OK
-chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-191: FAIL_OK
-chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-192: FAIL_OK
-chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-193: FAIL_OK
-chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-194: FAIL_OK
-chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-195: FAIL_OK
-chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-201: FAIL_OK
-chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-210: FAIL_OK
-chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-211: FAIL_OK
-
 
 # NOT IMPLEMENTED: RegExp.prototype.source
 chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-212: FAIL_OK
@@ -131,18 +98,6 @@
 # NOT IMPLEMENTED: RegExp.prototype.multiline
 chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-215: FAIL_OK
 
-# Errors have wrong descriptor (should all be false)
-chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-216: FAIL_OK
-chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-217: FAIL_OK
-chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-218: FAIL_OK
-chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-219: FAIL_OK
-chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-220: FAIL_OK
-chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-221: FAIL_OK
-chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-222: FAIL_OK
-
-# Object.getOwnPropertyNames
-chapter15/15.2/15.2.3/15.2.3.4: PASS
-
 # All of the tests below marked SUBSETFAIL (in 15.2.3.4) fail because 
 # the tests assumes that objects can not have more properties
 # than those described in the spec - but according to spec they can 
@@ -252,12 +207,9 @@
 
 
 
-# Object.keys
-chapter15/15.2/15.2.3/15.2.3.14: PASS
-
 # We fail this because Object.keys returns numbers for element indices
 # rather than strings.
-chapter15/15.2/15.2.3/15.2.3.14/15.2.3.14-3-3: FAIL_OK
+#chapter15/15.2/15.2.3/15.2.3.14/15.2.3.14-3-3: FAIL_OK
 
 chapter15/15.3: UNIMPLEMENTED
 
@@ -267,9 +219,6 @@
 chapter15/15.4/15.4.4/15.4.4.21: UNIMPLEMENTED
 chapter15/15.4/15.4.4/15.4.4.22: UNIMPLEMENTED
 
-# Array.prototype.every
-chapter15/15.4/15.4.4/15.4.4.16: PASS
-
 # Wrong test - because this is not given as argument to arr.every
 # this._15_4_4_16_5_1 evaluates to undefined
 chapter15/15.4/15.4.4/15.4.4.16/15.4.4.16-5-1: FAIL_OK
@@ -285,10 +234,6 @@
 # if (val>1) in test should be if (val>2)
 chapter15/15.4/15.4.4/15.4.4.16/15.4.4.16-8-10: FAIL_OK
 
-
-# Array.prototype.some
-chapter15/15.4/15.4.4/15.4.4.17: PASS
-
 # Wrong assumption - according to spec some returns a Boolean, not a number
 chapter15/15.4/15.4.4/15.4.4.17/15.4.4.17-4-9: FAIL_OK
 
@@ -304,20 +249,12 @@
 # Same as 15.4.4.16-10-8
 chapter15/15.4/15.4.4/15.4.4.17/15.4.4.17-8-10: FAIL_OK
 
-
-# Array.prototype.forEach
-chapter15/15.4/15.4.4/15.4.4.18: PASS
-
 # Same as 15.4.4.16-5-1
 chapter15/15.4/15.4.4/15.4.4.18/15.4.4.18-5-1: FAIL_OK
 
 # Same as 15.4.4.16-7-7
 chapter15/15.4/15.4.4/15.4.4.18/15.4.4.18-7-6: FAIL_OK
 
-
-# Array.prototype.map
-chapter15/15.4/15.4.4/15.4.4.19: PASS
-
 # Same as 15.4.4.16-5-1
 chapter15/15.4/15.4.4/15.4.4.19/15.4.4.19-5-1: FAIL_OK
 
@@ -334,3 +271,8 @@
 chapter15/15.9: UNIMPLEMENTED
 chapter15/15.10: UNIMPLEMENTED
 chapter15/15.12: UNIMPLEMENTED
+
+[ $arch == mips ]
+
+# Skip all tests on MIPS.
+*: SKIP
diff --git a/test/message/bugs/.svn/all-wcprops b/test/message/bugs/.svn/all-wcprops
deleted file mode 100644
index f83e5fb..0000000
--- a/test/message/bugs/.svn/all-wcprops
+++ /dev/null
@@ -1,5 +0,0 @@
-K 25
-svn:wc:ra_dav:version-url
-V 58
-/svn/!svn/ver/565/branches/bleeding_edge/test/message/bugs
-END
diff --git a/test/message/bugs/.svn/format b/test/message/bugs/.svn/format
deleted file mode 100644
index 45a4fb7..0000000
--- a/test/message/bugs/.svn/format
+++ /dev/null
@@ -1 +0,0 @@
-8
diff --git a/test/message/message.status b/test/message/message.status
index fc2896b..c4a3842 100644
--- a/test/message/message.status
+++ b/test/message/message.status
@@ -29,3 +29,8 @@
 
 # All tests in the bug directory are expected to fail.
 bugs: FAIL
+
+[ $arch == mips ]
+
+# Skip all tests on MIPS.
+*: SKIP
diff --git a/test/mjsunit/debug-script.js b/test/mjsunit/debug-script.js
index effa145..402f90c 100644
--- a/test/mjsunit/debug-script.js
+++ b/test/mjsunit/debug-script.js
@@ -52,7 +52,7 @@
 }
 
 // This has to be updated if the number of native scripts change.
-assertEquals(12, named_native_count);
+assertEquals(13, named_native_count);
 // If no snapshot is used, only the 'gc' extension is loaded.
 // If snapshot is used, all extensions are cached in the snapshot.
 assertTrue(extension_count == 1 || extension_count == 5);
diff --git a/test/mjsunit/json.js b/test/mjsunit/json.js
index 56562e7..85457cd 100644
--- a/test/mjsunit/json.js
+++ b/test/mjsunit/json.js
@@ -200,8 +200,10 @@
 TestInvalid('"Unterminated string\\"');
 TestInvalid('"Unterminated string\\\\\\"');
 
-// Test bad JSON that would be good JavaScript (ES5).
+// JavaScript RegExp literals not valid in JSON.
+TestInvalid('/true/');
 
+// Test bad JSON that would be good JavaScript (ES5).
 TestInvalid("{true:42}");
 TestInvalid("{false:42}");
 TestInvalid("{null:42}");
@@ -211,7 +213,6 @@
 TestInvalid("{-1:42}");
 
 // Test for trailing garbage detection.
-
 TestInvalid('42 px');
 TestInvalid('42 .2');
 TestInvalid('42 2');
@@ -277,8 +278,35 @@
              JSON.stringify({a:"b",c:"d"}, null, 1));
 assertEquals('{"y":6,"x":5}', JSON.stringify({x:5,y:6}, ['y', 'x']));
 
+// The gap is capped at ten characters if specified as string.
+assertEquals('{\n          "a": "b",\n          "c": "d"\n}',
+              JSON.stringify({a:"b",c:"d"}, null, 
+                             "          /*characters after 10th*/"));
+
+//The gap is capped at ten characters if specified as number.
+assertEquals('{\n          "a": "b",\n          "c": "d"\n}',
+              JSON.stringify({a:"b",c:"d"}, null, 15));
+
+// Replaced wrapped primitives are unwrapped.
+function newx(k, v)  { return (k == "x") ? new v(42) : v; }
+assertEquals('{"x":"42"}', JSON.stringify({x: String}, newx));
+assertEquals('{"x":42}', JSON.stringify({x: Number}, newx));
+assertEquals('{"x":true}', JSON.stringify({x: Boolean}, newx));
+
 assertEquals(undefined, JSON.stringify(undefined));
 assertEquals(undefined, JSON.stringify(function () { }));
+// Arrays with missing, undefined or function elements have those elements 
+// replaced by null.
+assertEquals("[null,null,null]", 
+             JSON.stringify([undefined,,function(){}]));
+
+// Objects with undefined or function properties (including replaced properties)
+// have those properties ignored.
+assertEquals('{}', 
+             JSON.stringify({a: undefined, b: function(){}, c: 42, d: 42},
+                            function(k, v) { if (k == "c") return undefined; 
+                                             if (k == "d") return function(){};
+                                             return v; }));
 
 TestInvalid('1); throw "foo"; (1');
 
diff --git a/test/mjsunit/mjsunit.status b/test/mjsunit/mjsunit.status
index f1752b9..7cb2416 100644
--- a/test/mjsunit/mjsunit.status
+++ b/test/mjsunit/mjsunit.status
@@ -64,3 +64,7 @@
 # Skip long running test in debug mode on ARM.
 string-indexof-2: PASS, SKIP if $mode == debug
 
+[ $arch == mips ]
+
+# Skip all tests on MIPS.
+*: SKIP
diff --git a/test/mjsunit/substr.js b/test/mjsunit/substr.js
index 8c276f9..f69a9c0 100644
--- a/test/mjsunit/substr.js
+++ b/test/mjsunit/substr.js
@@ -44,9 +44,6 @@
 assertEquals(s1, s.substr({ valueOf: function() { return 1; } }));
 assertEquals(s1, s.substr({ toString: function() { return '1'; } }));
 
-for (var i = 0; i < s.length; i++)
-  for (var j = i; j < s.length + 5; j++)
-    assertEquals(s.substring(i, j), s.substr(i, j - i));
 
 assertEquals(s.substring(s.length - 1), s.substr(-1));
 assertEquals(s.substring(s.length - 1), s.substr(-1.2));
@@ -63,3 +60,78 @@
 assertEquals('', s.substr(0, null));
 assertEquals(s, s.substr(0, String(s.length)));
 assertEquals('a', s.substr(0, true));
+
+
+// Test substrings of different lengths and alignments.
+// First ASCII.
+var x = "ASCII";
+for (var i = 0; i < 25; i++) {
+  x += (i >> 4).toString(16) + (i & 0x0f).toString(16);
+}
+/x/.exec(x);  // Try to force a flatten.
+for (var i = 5; i < 25; i++) {
+  for (var j = 0; j < 25; j++) {
+    var z = x.substring(i, i+j);
+    var w = Math.random() * 42;  // Allocate something new in new-space.
+    assertEquals(j, z.length);
+    for (var k = 0; k < j; k++) {
+      assertEquals(x.charAt(i+k), z.charAt(k));
+    }
+  }
+}
+
+
+// Then two-byte strings.
+x = "UC16\u2028";  // Non-ascii char forces two-byte string.
+for (var i = 0; i < 25; i++) {
+  x += (i >> 4).toString(16) + (i & 0x0f).toString(16);
+}
+/x/.exec(x);  // Try to force a flatten.
+for (var i = 5; i < 25; i++) {
+  for (var j = 0; j < 25; j++) {
+    var z = x.substring(i, i + j);
+    var w = Math.random() * 42;  // Allocate something new in new-space.
+    assertEquals(j, z.length);
+    for (var k = 0; k < j; k++) {
+      assertEquals(x.charAt(i+k), z.charAt(k));
+    }
+  }
+}
+
+
+// Keep creating strings to to force allocation failure on substring creation.
+var x = "0123456789ABCDEF";
+x += x;  // 2^5
+x += x;
+x += x;
+x += x;
+x += x;
+x += x;  // 2^10
+x += x;
+x += x;
+var xl = x.length;
+var cache = [];
+for (var i = 0; i < 10000; i++) {
+  var z = x.substring(i % xl);
+  assertEquals(xl - (i % xl), z.length);
+  cache.push(z);
+}
+
+
+// Same with two-byte strings
+var x = "\u2028123456789ABCDEF";
+x += x;  // 2^5
+x += x;
+x += x;
+x += x;
+x += x;
+x += x;  // 2^10
+x += x;
+x += x;
+var xl = x.length;
+var cache = [];
+for (var i = 0; i < 10000; i++) {
+  var z = x.substring(i % xl);
+  assertEquals(xl - (i % xl), z.length);
+  cache.push(z);
+}
diff --git a/test/mjsunit/tools/logreader.js b/test/mjsunit/tools/logreader.js
index 8b74789..485990e 100644
--- a/test/mjsunit/tools/logreader.js
+++ b/test/mjsunit/tools/logreader.js
@@ -80,19 +80,3 @@
   assertEquals('bbbbaaaa', reader.expandBackRef_('bbbb#2:4'));
   assertEquals('"#1:1"', reader.expandBackRef_('"#1:1"'));
 })();
-
-
-// See http://code.google.com/p/v8/issues/detail?id=420
-(function testReadingTruncatedLog() {
-  // Having an incorrect event in the middle of a log should throw an exception.
-  var reader1 = new devtools.profiler.LogReader({});
-  assertThrows(function() {
-    reader1.processLogChunk('alias,a,b\nxxxx\nalias,c,d\n');
-  });
-
-  // But having it as the last record should not.
-  var reader2 = new devtools.profiler.LogReader({});
-  assertDoesNotThrow(function() {
-    reader2.processLogChunk('alias,a,b\nalias,c,d\nxxxx');
-  });
-})();
diff --git a/test/mjsunit/tools/tickprocessor.js b/test/mjsunit/tools/tickprocessor.js
index abcde89..30b0ec2 100644
--- a/test/mjsunit/tools/tickprocessor.js
+++ b/test/mjsunit/tools/tickprocessor.js
@@ -379,9 +379,7 @@
   var tp = new TickProcessor(
       new CppEntriesProviderMock(), separateIc, ignoreUnknown, stateFilter);
   var pm = new PrintMonitor(testsPath + refOutput);
-  tp.processLogFile(testsPath + logInput);
-  // Hack file name to avoid dealing with platform specifics.
-  tp.lastLogFileName_ = 'v8.log';
+  tp.processLogFileInTest(testsPath + logInput);
   tp.printStatistics();
   pm.finish();
 };
diff --git a/test/mjsunit/typeof.js b/test/mjsunit/typeof.js
index b460fbb..15ab7bf 100644
--- a/test/mjsunit/typeof.js
+++ b/test/mjsunit/typeof.js
@@ -25,7 +25,7 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --nofast-compiler
+// Flags: --nofull-compiler
 
 // The type of a regular expression should be 'function', including in
 // the context of string equality comparisons.
diff --git a/test/sputnik/sputnik.status b/test/sputnik/sputnik.status
index 16a44c5..e5b9e20 100644
--- a/test/sputnik/sputnik.status
+++ b/test/sputnik/sputnik.status
@@ -316,3 +316,8 @@
 S11.4.3_A3.6: FAIL_OK
 S15.10.7_A3_T2: FAIL_OK
 S15.10.7_A3_T1: FAIL_OK
+
+[ $arch == mips ]
+
+# Skip all tests on MIPS.
+*: SKIP
diff --git a/tools/js2c.py b/tools/js2c.py
index b889530..64de7d3 100755
--- a/tools/js2c.py
+++ b/tools/js2c.py
@@ -220,8 +220,8 @@
   }
 
   template <>
-  int NativesCollection<%(type)s>::GetDelayCount() {
-    return %(delay_count)i;
+  int NativesCollection<%(type)s>::GetDebuggerCount() {
+    return %(debugger_count)i;
   }
 
   template <>
@@ -252,23 +252,23 @@
 """
 
 
-GET_DELAY_INDEX_CASE = """\
+GET_DEBUGGER_INDEX_CASE = """\
     if (strcmp(name, "%(id)s") == 0) return %(i)i;
 """
 
 
-GET_DELAY_SCRIPT_SOURCE_CASE = """\
+GET_DEBUGGER_SCRIPT_SOURCE_CASE = """\
     if (index == %(i)i) return Vector<const char>(%(id)s, %(length)i);
 """
 
 
-GET_DELAY_SCRIPT_NAME_CASE = """\
+GET_DEBUGGER_SCRIPT_NAME_CASE = """\
     if (index == %(i)i) return Vector<const char>("%(name)s", %(length)i);
 """
 
 def JS2C(source, target, env):
   ids = []
-  delay_ids = []
+  debugger_ids = []
   modules = []
   # Locate the macros file name.
   consts = {}
@@ -287,7 +287,7 @@
   source_lines_empty = []
   for module in modules:
     filename = str(module)
-    delay = filename.endswith('-delay.js')
+    debugger = filename.endswith('-debugger.js')
     lines = ReadFile(filename)
     lines = ExpandConstants(lines, consts)
     lines = ExpandMacros(lines, macros)
@@ -295,29 +295,29 @@
     lines = minifier.JSMinify(lines)
     data = ToCArray(lines)
     id = (os.path.split(filename)[1])[:-3]
-    if delay: id = id[:-6]
-    if delay:
-      delay_ids.append((id, len(lines)))
+    if debugger: id = id[:-9]
+    if debugger:
+      debugger_ids.append((id, len(lines)))
     else:
       ids.append((id, len(lines)))
     source_lines.append(SOURCE_DECLARATION % { 'id': id, 'data': data })
     source_lines_empty.append(SOURCE_DECLARATION % { 'id': id, 'data': data })
 
-  # Build delay support functions
+  # Build debugger support functions
   get_index_cases = [ ]
   get_script_source_cases = [ ]
   get_script_name_cases = [ ]
 
   i = 0
-  for (id, length) in delay_ids:
+  for (id, length) in debugger_ids:
     native_name = "native %s.js" % id
-    get_index_cases.append(GET_DELAY_INDEX_CASE % { 'id': id, 'i': i })
-    get_script_source_cases.append(GET_DELAY_SCRIPT_SOURCE_CASE % {
+    get_index_cases.append(GET_DEBUGGER_INDEX_CASE % { 'id': id, 'i': i })
+    get_script_source_cases.append(GET_DEBUGGER_SCRIPT_SOURCE_CASE % {
       'id': id,
       'length': length,
       'i': i
     })
-    get_script_name_cases.append(GET_DELAY_SCRIPT_NAME_CASE % {
+    get_script_name_cases.append(GET_DEBUGGER_SCRIPT_NAME_CASE % {
       'name': native_name,
       'length': len(native_name),
       'i': i
@@ -326,13 +326,13 @@
 
   for (id, length) in ids:
     native_name = "native %s.js" % id
-    get_index_cases.append(GET_DELAY_INDEX_CASE % { 'id': id, 'i': i })
-    get_script_source_cases.append(GET_DELAY_SCRIPT_SOURCE_CASE % {
+    get_index_cases.append(GET_DEBUGGER_INDEX_CASE % { 'id': id, 'i': i })
+    get_script_source_cases.append(GET_DEBUGGER_SCRIPT_SOURCE_CASE % {
       'id': id,
       'length': length,
       'i': i
     })
-    get_script_name_cases.append(GET_DELAY_SCRIPT_NAME_CASE % {
+    get_script_name_cases.append(GET_DEBUGGER_SCRIPT_NAME_CASE % {
       'name': native_name,
       'length': len(native_name),
       'i': i
@@ -342,8 +342,8 @@
   # Emit result
   output = open(str(target[0]), "w")
   output.write(HEADER_TEMPLATE % {
-    'builtin_count': len(ids) + len(delay_ids),
-    'delay_count': len(delay_ids),
+    'builtin_count': len(ids) + len(debugger_ids),
+    'debugger_count': len(debugger_ids),
     'source_lines': "\n".join(source_lines),
     'get_index_cases': "".join(get_index_cases),
     'get_script_source_cases': "".join(get_script_source_cases),
@@ -355,8 +355,8 @@
   if len(target) > 1:
     output = open(str(target[1]), "w")
     output.write(HEADER_TEMPLATE % {
-      'builtin_count': len(ids) + len(delay_ids),
-      'delay_count': len(delay_ids),
+      'builtin_count': len(ids) + len(debugger_ids),
+      'debugger_count': len(debugger_ids),
       'source_lines': "\n".join(source_lines_empty),
       'get_index_cases': "".join(get_index_cases),
       'get_script_source_cases': "".join(get_script_source_cases),
diff --git a/tools/linux-tick-processor b/tools/linux-tick-processor
index ca1c721..1715705 100755
--- a/tools/linux-tick-processor
+++ b/tools/linux-tick-processor
@@ -16,8 +16,17 @@
   [ -x $d8_exec ] || scons -j4 -C $D8_PATH -Y $tools_path/.. d8
 fi
 
+# find the name of the log file to process, it must not start with a dash.
+log_file="v8.log"
+for arg in "$@"
+do
+  if [[ "${arg}" != -* ]]; then
+    log_file=${arg}
+  fi
+done
+
 # nm spits out 'no symbols found' messages to stderr.
-$d8_exec $tools_path/splaytree.js $tools_path/codemap.js \
+cat $log_file | $d8_exec $tools_path/splaytree.js $tools_path/codemap.js \
   $tools_path/csvparser.js $tools_path/consarray.js \
   $tools_path/profile.js $tools_path/profile_view.js \
   $tools_path/logreader.js $tools_path/tickprocessor.js \
diff --git a/tools/logreader.js b/tools/logreader.js
index 20a1f54..b2aca73 100644
--- a/tools/logreader.js
+++ b/tools/logreader.js
@@ -76,6 +76,18 @@
    * @type {Array.<string>}
    */
   this.backRefs_ = [];
+
+  /**
+   * Current line.
+   * @type {number}
+   */
+  this.lineNum_ = 0;
+
+  /**
+   * CSV lines parser.
+   * @type {devtools.profiler.CsvParser}
+   */
+  this.csvParser_ = new devtools.profiler.CsvParser();
 };
 
 
@@ -136,6 +148,16 @@
 
 
 /**
+ * Processes a line of V8 profiler event log.
+ *
+ * @param {string} line A line of log.
+ */
+devtools.profiler.LogReader.prototype.processLogLine = function(line) {
+  this.processLog_([line]);
+};
+
+
+/**
  * Processes stack record.
  *
  * @param {number} pc Program counter.
@@ -280,25 +302,20 @@
  * @private
  */
 devtools.profiler.LogReader.prototype.processLog_ = function(lines) {
-  var csvParser = new devtools.profiler.CsvParser();
-  try {
-    for (var i = 0, n = lines.length; i < n; ++i) {
-      var line = lines[i];
-      if (!line) {
-        continue;
-      }
+  for (var i = 0, n = lines.length; i < n; ++i, ++this.lineNum_) {
+    var line = lines[i];
+    if (!line) {
+      continue;
+    }
+    try {
       if (line.charAt(0) == '#' ||
           line.substr(0, line.indexOf(',')) in this.backRefsCommands_) {
         line = this.expandBackRef_(line);
       }
-      var fields = csvParser.parseLine(line);
+      var fields = this.csvParser_.parseLine(line);
       this.dispatchLogRow_(fields);
-    }
-  } catch (e) {
-    // An error on the last line is acceptable since log file can be truncated.
-    if (i < n - 1) {
-      this.printError('line ' + (i + 1) + ': ' + (e.message || e));
-      throw e;
+    } catch (e) {
+      this.printError('line ' + (this.lineNum_ + 1) + ': ' + (e.message || e));
     }
   }
 };
diff --git a/tools/tickprocessor.js b/tools/tickprocessor.js
index 35422e2..40cee8a 100644
--- a/tools/tickprocessor.js
+++ b/tools/tickprocessor.js
@@ -67,6 +67,9 @@
           processor: this.processCodeMove, backrefs: true },
       'code-delete': { parsers: [this.createAddressParser('code')],
           processor: this.processCodeDelete, backrefs: true },
+      'function-creation': null,
+      'function-move': null,
+      'function-delete': null,
       'snapshot-pos': { parsers: [this.createAddressParser('code'), parseInt],
           processor: this.processSnapshotPosition, backrefs: true }});
 
@@ -157,6 +160,10 @@
           processor: this.processHeapSampleEnd },
       'heap-js-prod-item': { parsers: [null, 'var-args'],
           processor: this.processJSProducer, backrefs: true },
+      'PAGE-LOAD-START': { parsers: [null, null],
+          processor: this.processPageLoadStart },
+      'PAGE-LOAD-END': { parsers: [null, null],
+          processor: this.processPageLoadEnd },
       // Ignored events.
       'profiler': null,
       'heap-sample-stats': null,
@@ -173,6 +180,7 @@
   this.stateFilter_ = stateFilter;
   this.snapshotLogProcessor_ = snapshotLogProcessor;
   this.deserializedEntriesNames_ = [];
+  this.handle_ticks_ = false;
   var ticks = this.ticks_ =
     { total: 0, unaccounted: 0, excluded: 0, gc: 0 };
 
@@ -259,6 +267,16 @@
 
 TickProcessor.prototype.processLogFile = function(fileName) {
   this.lastLogFileName_ = fileName;
+  var line;
+  while (line = readline()) {
+    this.processLogLine(line);
+  }
+};
+
+
+TickProcessor.prototype.processLogFileInTest = function(fileName) {
+   // Hack file name to avoid dealing with platform specifics.
+  this.lastLogFileName_ = 'v8.log';
   var contents = readFile(fileName);
   this.processLogChunk(contents);
 };
@@ -326,6 +344,7 @@
 
 
 TickProcessor.prototype.processTick = function(pc, sp, func, vmState, stack) {
+  if (!this.handle_ticks_) return;
   this.ticks_.total++;
   if (vmState == TickProcessor.VmStates.GC) this.ticks_.gc++;
   if (!this.includeTick(vmState)) {
@@ -373,6 +392,16 @@
 };
 
 
+TickProcessor.prototype.processPageLoadStart = function() {
+  this.handle_ticks_ = true;
+};
+
+
+TickProcessor.prototype.processPageLoadEnd = function() {
+  this.handle_ticks_ = false;
+};
+
+
 TickProcessor.prototype.processJSProducer = function(constructor, stack) {
   if (!this.currentProducerProfile_) return;
   if (stack.length == 0) return;
diff --git a/tools/windows-tick-processor.bat b/tools/windows-tick-processor.bat
index 6743f68..33b1f77 100755
--- a/tools/windows-tick-processor.bat
+++ b/tools/windows-tick-processor.bat
@@ -2,4 +2,28 @@
 
 SET tools_dir=%~dp0
 
-%tools_dir%..\d8 %tools_dir%splaytree.js %tools_dir%codemap.js %tools_dir%csvparser.js %tools_dir%consarray.js %tools_dir%profile.js %tools_dir%profile_view.js %tools_dir%logreader.js %tools_dir%tickprocessor.js %tools_dir%tickprocessor-driver.js -- --windows %*
+SET log_file=v8.log
+
+rem find the name of the log file to process, it must not start with a dash.
+rem we prepend cmdline args with a number (in fact, any letter or number)
+rem to cope with empty arguments.
+SET arg1=1%1
+IF NOT %arg1:~0,2% == 1 (IF NOT %arg1:~0,2% == 1- SET log_file=%1)
+SET arg2=2%2
+IF NOT %arg2:~0,2% == 2 (IF NOT %arg2:~0,2% == 2- SET log_file=%2)
+SET arg3=3%3
+IF NOT %arg3:~0,2% == 3 (IF NOT %arg3:~0,2% == 3- SET log_file=%3)
+SET arg4=4%4
+IF NOT %arg4:~0,2% == 4 (IF NOT %arg4:~0,2% == 4- SET log_file=%4)
+SET arg5=5%5
+IF NOT %arg5:~0,2% == 5 (IF NOT %arg5:~0,2% == 5- SET log_file=%5)
+SET arg6=6%6
+IF NOT %arg6:~0,2% == 6 (IF NOT %arg6:~0,2% == 6- SET log_file=%6)
+SET arg7=7%7
+IF NOT %arg7:~0,2% == 7 (IF NOT %arg7:~0,2% == 7- SET log_file=%7)
+SET arg8=8%8
+IF NOT %arg8:~0,2% == 8 (IF NOT %arg8:~0,2% == 8- SET log_file=%8)
+SET arg9=9%9
+IF NOT %arg9:~0,2% == 9 (IF NOT %arg9:~0,2% == 9- SET log_file=%9)
+
+type %log_file% | %tools_dir%..\d8 %tools_dir%splaytree.js %tools_dir%codemap.js %tools_dir%csvparser.js %tools_dir%consarray.js %tools_dir%profile.js %tools_dir%profile_view.js %tools_dir%logreader.js %tools_dir%tickprocessor.js %tools_dir%tickprocessor-driver.js -- --windows %*