Merge "Fix lazy-instance template to preserve object alignment on MIPS."
diff --git a/base/atomicops.h b/base/atomicops.h
index 5b2b9dd..ef7fc02 100644
--- a/base/atomicops.h
+++ b/base/atomicops.h
@@ -141,6 +141,8 @@
 #include "base/atomicops_internals_x86_gcc.h"
 #elif defined(COMPILER_GCC) && defined(ARCH_CPU_ARM_FAMILY)
 #include "base/atomicops_internals_arm_gcc.h"
+#elif defined(COMPILER_GCC) && defined(ARCH_CPU_MIPS_FAMILY)
+#include "base/atomicops_internals_mips_gcc.h"
 #else
 #error "Atomic operations are not supported on your platform"
 #endif
diff --git a/base/atomicops_internals_mips_gcc.h b/base/atomicops_internals_mips_gcc.h
new file mode 100644
index 0000000..2bcddf7
--- /dev/null
+++ b/base/atomicops_internals_mips_gcc.h
@@ -0,0 +1,162 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is an internal atomic implementation, use base/atomicops.h instead.
+//
+// LinuxKernelCmpxchg and Barrier_AtomicIncrement are from Google Gears.
+
+#ifndef BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_
+#define BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_
+#pragma once
+
+#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
+
+namespace base {
+namespace subtle {
+
+// Atomically execute:
+//      result = *ptr;
+//      if (*ptr == old_value)
+//        *ptr = new_value;
+//      return result;
+//
+// I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value".
+// Always return the old value of "*ptr"
+//
+// This routine implies no memory barriers.
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+                                         Atomic32 old_value,
+                                         Atomic32 new_value) {
+  Atomic32 prev, tmp;
+  __asm__ __volatile__(".set push\n"
+                       ".set noreorder\n"
+                       "1:\n"
+                       "ll %0, %5\n"  // prev = *ptr
+                       "bne %0, %3, 2f\n"  // if (prev != old_value) goto 2
+                       "move %2, %4\n"  // tmp = new_value
+                       "sc %2, %1\n"  // *ptr = tmp (with atomic check)
+                       "beqz %2, 1b\n"  // start again on atomic error
+                       "nop\n"  // delay slot nop
+                       "2:\n"
+                       ".set pop\n"
+                       : "=&r" (prev), "=m" (*ptr), "=&r" (tmp)
+                       : "Ir" (old_value), "r" (new_value), "m" (*ptr)
+                       : "memory");
+  return prev;
+}
+
+// Atomically store new_value into *ptr, returning the previous value held in
+// *ptr.  This routine implies no memory barriers.
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+                                         Atomic32 new_value) {
+  Atomic32 temp, old;
+  __asm__ __volatile__(".set push\n"
+                       ".set noreorder\n"
+                       "1:\n"
+                       "ll %1, %2\n"  // old = *ptr
+                       "move %0, %3\n"  // temp = new_value
+                       "sc %0, %2\n"  // *ptr = temp (with atomic check)
+                       "beqz %0, 1b\n"  // start again on atomic error
+                       "nop\n"  // delay slot nop
+                       ".set pop\n"
+                       : "=&r" (temp), "=&r" (old), "=m" (*ptr)
+                       : "r" (new_value), "m" (*ptr)
+                       : "memory");
+
+  return old;
+}
+
+// Atomically increment *ptr by "increment".  Returns the new value of
+// *ptr with the increment applied.  This routine implies no memory barriers.
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+                                          Atomic32 increment) {
+  Atomic32 temp, temp2;
+
+  __asm__ __volatile__(".set push\n"
+                       ".set noreorder\n"
+                       "1:\n"
+                       "ll %0, %2\n"  // temp = *ptr
+                       "addu %1, %0, %3\n"  // temp2 = temp + increment
+                       "sc %1, %2\n"  // *ptr = temp2 (with atomic check)
+                       "beqz %1, 1b\n"  // start again on atomic error
+                       "addu %1, %0, %3\n"  // temp2 = temp + increment
+                       ".set pop\n"
+                       : "=&r" (temp), "=&r" (temp2), "=m" (*ptr)
+                       : "Ir" (increment), "m" (*ptr)
+                       : "memory");
+  // temp2 now holds the final value.
+  return temp2;
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+                                        Atomic32 increment) {
+  ATOMICOPS_COMPILER_BARRIER();
+  Atomic32 res = NoBarrier_AtomicIncrement(ptr, increment);
+  ATOMICOPS_COMPILER_BARRIER();
+  return res;
+}
+
+// "Acquire" operations
+// ensure that no later memory access can be reordered ahead of the operation.
+// "Release" operations ensure that no previous memory access can be reordered
+// after the operation.  "Barrier" operations have both "Acquire" and "Release"
+// semantics.   A MemoryBarrier() has "Barrier" semantics, but does no memory
+// access.
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+                                       Atomic32 old_value,
+                                       Atomic32 new_value) {
+  ATOMICOPS_COMPILER_BARRIER();
+  Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+  ATOMICOPS_COMPILER_BARRIER();
+  return res;
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+                                       Atomic32 old_value,
+                                       Atomic32 new_value) {
+  ATOMICOPS_COMPILER_BARRIER();
+  Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+  ATOMICOPS_COMPILER_BARRIER();
+  return res;
+}
+
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
+  *ptr = value;
+}
+
+inline void MemoryBarrier() {
+  __asm__ __volatile__("sync" : : : "memory");
+}
+
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
+  *ptr = value;
+  MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
+  MemoryBarrier();
+  *ptr = value;
+}
+
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
+  return *ptr;
+}
+
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
+  Atomic32 value = *ptr;
+  MemoryBarrier();
+  return value;
+}
+
+inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
+  MemoryBarrier();
+  return *ptr;
+}
+
+} // namespace base::subtle
+} // namespace base
+
+#undef ATOMICOPS_COMPILER_BARRIER
+
+#endif  // BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_
diff --git a/base/debug/debugger_posix.cc b/base/debug/debugger_posix.cc
index bf90a0f..e7560a5 100644
--- a/base/debug/debugger_posix.cc
+++ b/base/debug/debugger_posix.cc
@@ -169,6 +169,8 @@
 #define DEBUG_BREAK() abort()
 #elif defined(ARCH_CPU_ARM_FAMILY)
 #define DEBUG_BREAK() asm("bkpt 0")
+#elif defined(ARCH_CPU_MIPS_FAMILY)
+#define DEBUG_BREAK() asm("break 2")
 #else
 #define DEBUG_BREAK() asm("int3")
 #endif
diff --git a/build/build_config.h b/build/build_config.h
index cedac01..b17531d 100644
--- a/build/build_config.h
+++ b/build/build_config.h
@@ -117,6 +117,11 @@
 #define ARCH_CPU_ARMEL 1
 #define ARCH_CPU_32_BITS 1
 #define WCHAR_T_IS_UNSIGNED 1
+#elif defined(__MIPSEL__)
+#define ARCH_CPU_MIPS_FAMILY 1
+#define ARCH_CPU_MIPSEL 1
+#define ARCH_CPU_32_BITS 1
+#define WCHAR_T_IS_UNSIGNED 0
 #else
 #error Please add support for your architecture in build/build_config.h
 #endif
diff --git a/net/http/http_auth_handler_ntlm_portable.cc b/net/http/http_auth_handler_ntlm_portable.cc
index fac37c8..5fa078d 100644
--- a/net/http/http_auth_handler_ntlm_portable.cc
+++ b/net/http/http_auth_handler_ntlm_portable.cc
@@ -70,9 +70,13 @@
  * ***** END LICENSE BLOCK ***** */
 
 // Discover the endianness by testing processor architecture.
-#if defined(ARCH_CPU_X86) || defined(ARCH_CPU_X86_64) || defined(ARCH_CPU_ARMEL)
+#if defined(ARCH_CPU_X86) || defined(ARCH_CPU_X86_64)\
+ || defined(ARCH_CPU_ARMEL) || defined(ARCH_CPU_MIPSEL)
 #define IS_LITTLE_ENDIAN 1
 #undef  IS_BIG_ENDIAN
+#elif defined(ARCH_CPU_MIPSEB)
+#undef IS_LITTLE_ENDIAN
+#define  IS_BIG_ENDIAN 1
 #else
 #error "Unknown endianness"
 #endif