Update renderscript to build AAQ07 #553727 in master-release.

Change-Id: Ida2f6cd58b8632297372665899ec8f24d0e04e9e
diff --git a/renderscript/clang-include/CMakeLists.txt b/renderscript/clang-include/CMakeLists.txt
index 1faf92f..6e9cc68 100644
--- a/renderscript/clang-include/CMakeLists.txt
+++ b/renderscript/clang-include/CMakeLists.txt
@@ -1,16 +1,24 @@
 set(files
   altivec.h
+  ammintrin.h
   avxintrin.h
+  avx2intrin.h
+  bmiintrin.h
+  bmi2intrin.h
   emmintrin.h
   float.h
+  fma4intrin.h
+  fmaintrin.h
   immintrin.h
   iso646.h
   limits.h
+  lzcntintrin.h
   mm3dnow.h
   mmintrin.h
   mm_malloc.h
   nmmintrin.h
   pmmintrin.h
+  popcntintrin.h
   smmintrin.h
   stdalign.h
   stdarg.h
@@ -23,6 +31,10 @@
   wmmintrin.h
   x86intrin.h
   xmmintrin.h
+  xopintrin.h
+  cpuid.h
+  unwind.h
+  module.map
   )
 
 set(output_dir ${LLVM_BINARY_DIR}/lib/clang/${CLANG_VERSION}/include)
diff --git a/renderscript/clang-include/LICENSE.TXT b/renderscript/clang-include/LICENSE.TXT
index 91895eb..6c224f8 100644
--- a/renderscript/clang-include/LICENSE.TXT
+++ b/renderscript/clang-include/LICENSE.TXT
@@ -4,7 +4,7 @@
 University of Illinois/NCSA
 Open Source License
 
-Copyright (c) 2007-2011 University of Illinois at Urbana-Champaign.
+Copyright (c) 2007-2012 University of Illinois at Urbana-Champaign.
 All rights reserved.
 
 Developed by:
diff --git a/renderscript/clang-include/ammintrin.h b/renderscript/clang-include/ammintrin.h
new file mode 100644
index 0000000..d87b9cd
--- /dev/null
+++ b/renderscript/clang-include/ammintrin.h
@@ -0,0 +1,68 @@
+/*===---- ammintrin.h - SSE4a intrinsics -----------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __AMMINTRIN_H
+#define __AMMINTRIN_H
+
+#ifndef __SSE4A__
+#error "SSE4A instruction set not enabled"
+#else
+
+#include <pmmintrin.h>
+
+#define _mm_extracti_si64(x, len, idx) \
+  ((__m128i)__builtin_ia32_extrqi((__v2di)(__m128i)(x), \
+                                  (char)(len), (char)(idx)))
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_extract_si64(__m128i __x, __m128i __y)
+{
+  return (__m128i)__builtin_ia32_extrq((__v2di)__x, (__v16qi)__y);
+}
+
+#define _mm_inserti_si64(x, y, len, idx) \
+  ((__m128i)__builtin_ia32_insertqi((__v2di)(__m128i)(x), \
+                                    (__v2di)(__m128i)(y), \
+                                    (char)(len), (char)(idx)))
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_insert_si64(__m128i __x, __m128i __y)
+{
+  return (__m128i)__builtin_ia32_insertq((__v2di)__x, (__v2di)__y);
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+_mm_stream_sd(double *__p, __m128d __a)
+{
+  __builtin_ia32_movntsd(__p, (__v2df)__a);
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+_mm_stream_ss(float *__p, __m128 __a)
+{
+  __builtin_ia32_movntss(__p, (__v4sf)__a);
+}
+
+#endif /* __SSE4A__ */
+
+#endif /* __AMMINTRIN_H */
diff --git a/renderscript/clang-include/avx2intrin.h b/renderscript/clang-include/avx2intrin.h
new file mode 100644
index 0000000..2c53aed
--- /dev/null
+++ b/renderscript/clang-include/avx2intrin.h
@@ -0,0 +1,1201 @@
+/*===---- avx2intrin.h - AVX2 intrinsics -----------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <avx2intrin.h> directly; include <immintrin.h> instead."
+#endif
+
+/* SSE4 Multiple Packed Sums of Absolute Difference.  */
+#define _mm256_mpsadbw_epu8(X, Y, M) __builtin_ia32_mpsadbw256((X), (Y), (M))
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_abs_epi8(__m256i a)
+{
+    return (__m256i)__builtin_ia32_pabsb256((__v32qi)a);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_abs_epi16(__m256i a)
+{
+    return (__m256i)__builtin_ia32_pabsw256((__v16hi)a);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_abs_epi32(__m256i a)
+{
+    return (__m256i)__builtin_ia32_pabsd256((__v8si)a);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_packs_epi16(__m256i a, __m256i b)
+{
+  return (__m256i)__builtin_ia32_packsswb256((__v16hi)a, (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_packs_epi32(__m256i a, __m256i b)
+{
+  return (__m256i)__builtin_ia32_packssdw256((__v8si)a, (__v8si)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_packus_epi16(__m256i a, __m256i b)
+{
+  return (__m256i)__builtin_ia32_packuswb256((__v16hi)a, (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_packus_epi32(__m256i __V1, __m256i __V2)
+{
+  return (__m256i) __builtin_ia32_packusdw256((__v8si)__V1, (__v8si)__V2);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_add_epi8(__m256i a, __m256i b)
+{
+  return (__m256i)((__v32qi)a + (__v32qi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_add_epi16(__m256i a, __m256i b)
+{
+  return (__m256i)((__v16hi)a + (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_add_epi32(__m256i a, __m256i b)
+{
+  return (__m256i)((__v8si)a + (__v8si)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_add_epi64(__m256i a, __m256i b)
+{
+  return a + b;
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_adds_epi8(__m256i a, __m256i b)
+{
+  return (__m256i)__builtin_ia32_paddsb256((__v32qi)a, (__v32qi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_adds_epi16(__m256i a, __m256i b)
+{
+  return (__m256i)__builtin_ia32_paddsw256((__v16hi)a, (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_adds_epu8(__m256i a, __m256i b)
+{
+  return (__m256i)__builtin_ia32_paddusb256((__v32qi)a, (__v32qi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_adds_epu16(__m256i a, __m256i b)
+{
+  return (__m256i)__builtin_ia32_paddusw256((__v16hi)a, (__v16hi)b);
+}
+
+#define _mm256_alignr_epi8(a, b, n) __extension__ ({ \
+  __m256i __a = (a); \
+  __m256i __b = (b); \
+  (__m256i)__builtin_ia32_palignr256((__v32qi)__a, (__v32qi)__b, (n)); })
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_and_si256(__m256i a, __m256i b)
+{
+  return a & b;
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_andnot_si256(__m256i a, __m256i b)
+{
+  return ~a & b;
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_avg_epu8(__m256i a, __m256i b)
+{
+  return (__m256i)__builtin_ia32_pavgb256((__v32qi)a, (__v32qi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_avg_epu16(__m256i a, __m256i b)
+{
+  return (__m256i)__builtin_ia32_pavgw256((__v16hi)a, (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_blendv_epi8(__m256i __V1, __m256i __V2, __m256i __M)
+{
+  return (__m256i)__builtin_ia32_pblendvb256((__v32qi)__V1, (__v32qi)__V2,
+                                              (__v32qi)__M);
+}
+
+#define _mm256_blend_epi16(V1, V2, M) __extension__ ({ \
+  __m256i __V1 = (V1); \
+  __m256i __V2 = (V2); \
+  (__m256i)__builtin_ia32_pblendw256((__v16hi)__V1, (__v16hi)__V2, (M)); })
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_cmpeq_epi8(__m256i a, __m256i b)
+{
+  return (__m256i)((__v32qi)a == (__v32qi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_cmpeq_epi16(__m256i a, __m256i b)
+{
+  return (__m256i)((__v16hi)a == (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_cmpeq_epi32(__m256i a, __m256i b)
+{
+  return (__m256i)((__v8si)a == (__v8si)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_cmpeq_epi64(__m256i a, __m256i b)
+{
+  return (__m256i)(a == b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_cmpgt_epi8(__m256i a, __m256i b)
+{
+  return (__m256i)((__v32qi)a > (__v32qi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_cmpgt_epi16(__m256i a, __m256i b)
+{
+  return (__m256i)((__v16hi)a > (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_cmpgt_epi32(__m256i a, __m256i b)
+{
+  return (__m256i)((__v8si)a > (__v8si)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_cmpgt_epi64(__m256i a, __m256i b)
+{
+  return (__m256i)(a > b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_hadd_epi16(__m256i a, __m256i b)
+{
+    return (__m256i)__builtin_ia32_phaddw256((__v16hi)a, (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_hadd_epi32(__m256i a, __m256i b)
+{
+    return (__m256i)__builtin_ia32_phaddd256((__v8si)a, (__v8si)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_hadds_epi16(__m256i a, __m256i b)
+{
+    return (__m256i)__builtin_ia32_phaddsw256((__v16hi)a, (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_hsub_epi16(__m256i a, __m256i b)
+{
+    return (__m256i)__builtin_ia32_phsubw256((__v16hi)a, (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_hsub_epi32(__m256i a, __m256i b)
+{
+    return (__m256i)__builtin_ia32_phsubd256((__v8si)a, (__v8si)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_hsubs_epi16(__m256i a, __m256i b)
+{
+    return (__m256i)__builtin_ia32_phsubsw256((__v16hi)a, (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_maddubs_epi16(__m256i a, __m256i b)
+{
+    return (__m256i)__builtin_ia32_pmaddubsw256((__v32qi)a, (__v32qi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_madd_epi16(__m256i a, __m256i b)
+{
+  return (__m256i)__builtin_ia32_pmaddwd256((__v16hi)a, (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_max_epi8(__m256i a, __m256i b)
+{
+  return (__m256i)__builtin_ia32_pmaxsb256((__v32qi)a, (__v32qi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_max_epi16(__m256i a, __m256i b)
+{
+  return (__m256i)__builtin_ia32_pmaxsw256((__v16hi)a, (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_max_epi32(__m256i a, __m256i b)
+{
+  return (__m256i)__builtin_ia32_pmaxsd256((__v8si)a, (__v8si)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_max_epu8(__m256i a, __m256i b)
+{
+  return (__m256i)__builtin_ia32_pmaxub256((__v32qi)a, (__v32qi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_max_epu16(__m256i a, __m256i b)
+{
+  return (__m256i)__builtin_ia32_pmaxuw256((__v16hi)a, (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_max_epu32(__m256i a, __m256i b)
+{
+  return (__m256i)__builtin_ia32_pmaxud256((__v8si)a, (__v8si)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_min_epi8(__m256i a, __m256i b)
+{
+  return (__m256i)__builtin_ia32_pminsb256((__v32qi)a, (__v32qi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_min_epi16(__m256i a, __m256i b)
+{
+  return (__m256i)__builtin_ia32_pminsw256((__v16hi)a, (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_min_epi32(__m256i a, __m256i b)
+{
+  return (__m256i)__builtin_ia32_pminsd256((__v8si)a, (__v8si)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_min_epu8(__m256i a, __m256i b)
+{
+  return (__m256i)__builtin_ia32_pminub256((__v32qi)a, (__v32qi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_min_epu16(__m256i a, __m256i b)
+{
+  return (__m256i)__builtin_ia32_pminuw256 ((__v16hi)a, (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_min_epu32(__m256i a, __m256i b)
+{
+  return (__m256i)__builtin_ia32_pminud256((__v8si)a, (__v8si)b);
+}
+
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
+_mm256_movemask_epi8(__m256i a)
+{
+  return __builtin_ia32_pmovmskb256((__v32qi)a);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_cvtepi8_epi16(__m128i __V)
+{
+  return (__m256i)__builtin_ia32_pmovsxbw256((__v16qi)__V);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_cvtepi8_epi32(__m128i __V)
+{
+  return (__m256i)__builtin_ia32_pmovsxbd256((__v16qi)__V);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_cvtepi8_epi64(__m128i __V)
+{
+  return (__m256i)__builtin_ia32_pmovsxbq256((__v16qi)__V);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_cvtepi16_epi32(__m128i __V)
+{
+  return (__m256i)__builtin_ia32_pmovsxwd256((__v8hi)__V);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_cvtepi16_epi64(__m128i __V)
+{
+  return (__m256i)__builtin_ia32_pmovsxwq256((__v8hi)__V);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_cvtepi32_epi64(__m128i __V)
+{
+  return (__m256i)__builtin_ia32_pmovsxdq256((__v4si)__V);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_cvtepu8_epi16(__m128i __V)
+{
+  return (__m256i)__builtin_ia32_pmovzxbw256((__v16qi)__V);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_cvtepu8_epi32(__m128i __V)
+{
+  return (__m256i)__builtin_ia32_pmovzxbd256((__v16qi)__V);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_cvtepu8_epi64(__m128i __V)
+{
+  return (__m256i)__builtin_ia32_pmovzxbq256((__v16qi)__V);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_cvtepu16_epi32(__m128i __V)
+{
+  return (__m256i)__builtin_ia32_pmovzxwd256((__v8hi)__V);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_cvtepu16_epi64(__m128i __V)
+{
+  return (__m256i)__builtin_ia32_pmovzxwq256((__v8hi)__V);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_cvtepu32_epi64(__m128i __V)
+{
+  return (__m256i)__builtin_ia32_pmovzxdq256((__v4si)__V);
+}
+
+static __inline__  __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_mul_epi32(__m256i a, __m256i b)
+{
+  return (__m256i)__builtin_ia32_pmuldq256((__v8si)a, (__v8si)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_mulhrs_epi16(__m256i a, __m256i b)
+{
+  return (__m256i)__builtin_ia32_pmulhrsw256((__v16hi)a, (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_mulhi_epu16(__m256i a, __m256i b)
+{
+  return (__m256i)__builtin_ia32_pmulhuw256((__v16hi)a, (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_mulhi_epi16(__m256i a, __m256i b)
+{
+  return (__m256i)__builtin_ia32_pmulhw256((__v16hi)a, (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_mullo_epi16(__m256i a, __m256i b)
+{
+  return (__m256i)((__v16hi)a * (__v16hi)b);
+}
+
+static __inline__  __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_mullo_epi32 (__m256i a, __m256i b)
+{
+  return (__m256i)((__v8si)a * (__v8si)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_mul_epu32(__m256i a, __m256i b)
+{
+  return __builtin_ia32_pmuludq256((__v8si)a, (__v8si)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_or_si256(__m256i a, __m256i b)
+{
+  return a | b;
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_sad_epu8(__m256i a, __m256i b)
+{
+  return __builtin_ia32_psadbw256((__v32qi)a, (__v32qi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_shuffle_epi8(__m256i a, __m256i b)
+{
+  return (__m256i)__builtin_ia32_pshufb256((__v32qi)a, (__v32qi)b);
+}
+
+#define _mm256_shuffle_epi32(a, imm) __extension__ ({ \
+  __m256i __a = (a); \
+  (__m256i)__builtin_shufflevector((__v8si)__a, (__v8si)_mm256_set1_epi32(0), \
+                                   (imm) & 0x3, ((imm) & 0xc) >> 2, \
+                                   ((imm) & 0x30) >> 4, ((imm) & 0xc0) >> 6, \
+                                   4 + (((imm) & 0x03) >> 0), \
+                                   4 + (((imm) & 0x0c) >> 2), \
+                                   4 + (((imm) & 0x30) >> 4), \
+                                   4 + (((imm) & 0xc0) >> 6)); })
+
+#define _mm256_shufflehi_epi16(a, imm) __extension__ ({ \
+  __m256i __a = (a); \
+  (__m256i)__builtin_shufflevector((__v16hi)__a, (__v16hi)_mm256_set1_epi16(0), \
+                                   0, 1, 2, 3, \
+                                   4 + (((imm) & 0x03) >> 0), \
+                                   4 + (((imm) & 0x0c) >> 2), \
+                                   4 + (((imm) & 0x30) >> 4), \
+                                   4 + (((imm) & 0xc0) >> 6), \
+                                   8, 9, 10, 11, \
+                                   12 + (((imm) & 0x03) >> 0), \
+                                   12 + (((imm) & 0x0c) >> 2), \
+                                   12 + (((imm) & 0x30) >> 4), \
+                                   12 + (((imm) & 0xc0) >> 6)); })
+
+#define _mm256_shufflelo_epi16(a, imm) __extension__ ({ \
+  __m256i __a = (a); \
+  (__m256i)__builtin_shufflevector((__v16hi)__a, (__v16hi)_mm256_set1_epi16(0), \
+                                   (imm) & 0x3,((imm) & 0xc) >> 2, \
+                                   ((imm) & 0x30) >> 4, ((imm) & 0xc0) >> 6, \
+                                   4, 5, 6, 7, \
+                                   8 + (((imm) & 0x03) >> 0), \
+                                   8 + (((imm) & 0x0c) >> 2), \
+                                   8 + (((imm) & 0x30) >> 4), \
+                                   8 + (((imm) & 0xc0) >> 6), \
+                                   12, 13, 14, 15); })
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_sign_epi8(__m256i a, __m256i b)
+{
+    return (__m256i)__builtin_ia32_psignb256((__v32qi)a, (__v32qi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_sign_epi16(__m256i a, __m256i b)
+{
+    return (__m256i)__builtin_ia32_psignw256((__v16hi)a, (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_sign_epi32(__m256i a, __m256i b)
+{
+    return (__m256i)__builtin_ia32_psignd256((__v8si)a, (__v8si)b);
+}
+
+#define _mm256_slli_si256(a, count) __extension__ ({ \
+  __m256i __a = (a); \
+  (__m256i)__builtin_ia32_pslldqi256(__a, (count)*8); })
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_slli_epi16(__m256i a, int count)
+{
+  return (__m256i)__builtin_ia32_psllwi256((__v16hi)a, count);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_sll_epi16(__m256i a, __m128i count)
+{
+  return (__m256i)__builtin_ia32_psllw256((__v16hi)a, (__v8hi)count);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_slli_epi32(__m256i a, int count)
+{
+  return (__m256i)__builtin_ia32_pslldi256((__v8si)a, count);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_sll_epi32(__m256i a, __m128i count)
+{
+  return (__m256i)__builtin_ia32_pslld256((__v8si)a, (__v4si)count);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_slli_epi64(__m256i a, int count)
+{
+  return __builtin_ia32_psllqi256(a, count);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_sll_epi64(__m256i a, __m128i count)
+{
+  return __builtin_ia32_psllq256(a, count);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_srai_epi16(__m256i a, int count)
+{
+  return (__m256i)__builtin_ia32_psrawi256((__v16hi)a, count);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_sra_epi16(__m256i a, __m128i count)
+{
+  return (__m256i)__builtin_ia32_psraw256((__v16hi)a, (__v8hi)count);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_srai_epi32(__m256i a, int count)
+{
+  return (__m256i)__builtin_ia32_psradi256((__v8si)a, count);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_sra_epi32(__m256i a, __m128i count)
+{
+  return (__m256i)__builtin_ia32_psrad256((__v8si)a, (__v4si)count);
+}
+
+#define _mm256_srli_si256(a, count) __extension__ ({ \
+  __m256i __a = (a); \
+  (__m256i)__builtin_ia32_psrldqi256(__a, (count)*8); })
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_srli_epi16(__m256i a, int count)
+{
+  return (__m256i)__builtin_ia32_psrlwi256((__v16hi)a, count);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_srl_epi16(__m256i a, __m128i count)
+{
+  return (__m256i)__builtin_ia32_psrlw256((__v16hi)a, (__v8hi)count);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_srli_epi32(__m256i a, int count)
+{
+  return (__m256i)__builtin_ia32_psrldi256((__v8si)a, count);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_srl_epi32(__m256i a, __m128i count)
+{
+  return (__m256i)__builtin_ia32_psrld256((__v8si)a, (__v4si)count);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_srli_epi64(__m256i a, int count)
+{
+  return __builtin_ia32_psrlqi256(a, count);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_srl_epi64(__m256i a, __m128i count)
+{
+  return __builtin_ia32_psrlq256(a, count);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_sub_epi8(__m256i a, __m256i b)
+{
+  return (__m256i)((__v32qi)a - (__v32qi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_sub_epi16(__m256i a, __m256i b)
+{
+  return (__m256i)((__v16hi)a - (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_sub_epi32(__m256i a, __m256i b)
+{
+  return (__m256i)((__v8si)a - (__v8si)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_sub_epi64(__m256i a, __m256i b)
+{
+  return a - b;
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_subs_epi8(__m256i a, __m256i b)
+{
+  return (__m256i)__builtin_ia32_psubsb256((__v32qi)a, (__v32qi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_subs_epi16(__m256i a, __m256i b)
+{
+  return (__m256i)__builtin_ia32_psubsw256((__v16hi)a, (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_subs_epu8(__m256i a, __m256i b)
+{
+  return (__m256i)__builtin_ia32_psubusb256((__v32qi)a, (__v32qi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_subs_epu16(__m256i a, __m256i b)
+{
+  return (__m256i)__builtin_ia32_psubusw256((__v16hi)a, (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_unpackhi_epi8(__m256i a, __m256i b)
+{
+  return (__m256i)__builtin_shufflevector((__v32qi)a, (__v32qi)b, 8, 32+8, 9, 32+9, 10, 32+10, 11, 32+11, 12, 32+12, 13, 32+13, 14, 32+14, 15, 32+15, 24, 32+24, 25, 32+25, 26, 32+26, 27, 32+27, 28, 32+28, 29, 32+29, 30, 32+30, 31, 32+31);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_unpackhi_epi16(__m256i a, __m256i b)
+{
+  return (__m256i)__builtin_shufflevector((__v16hi)a, (__v16hi)b, 4, 16+4, 5, 16+5, 6, 16+6, 7, 16+7, 12, 16+12, 13, 16+13, 14, 16+14, 15, 16+15);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_unpackhi_epi32(__m256i a, __m256i b)
+{
+  return (__m256i)__builtin_shufflevector((__v8si)a, (__v8si)b, 2, 8+2, 3, 8+3, 6, 8+6, 7, 8+7);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_unpackhi_epi64(__m256i a, __m256i b)
+{
+  return (__m256i)__builtin_shufflevector(a, b, 1, 4+1, 3, 4+3);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_unpacklo_epi8(__m256i a, __m256i b)
+{
+  return (__m256i)__builtin_shufflevector((__v32qi)a, (__v32qi)b, 0, 32+0, 1, 32+1, 2, 32+2, 3, 32+3, 4, 32+4, 5, 32+5, 6, 32+6, 7, 32+7, 16, 32+16, 17, 32+17, 18, 32+18, 19, 32+19, 20, 32+20, 21, 32+21, 22, 32+22, 23, 32+23);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_unpacklo_epi16(__m256i a, __m256i b)
+{
+  return (__m256i)__builtin_shufflevector((__v16hi)a, (__v16hi)b, 0, 16+0, 1, 16+1, 2, 16+2, 3, 16+3, 8, 16+8, 9, 16+9, 10, 16+10, 11, 16+11);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_unpacklo_epi32(__m256i a, __m256i b)
+{
+  return (__m256i)__builtin_shufflevector((__v8si)a, (__v8si)b, 0, 8+0, 1, 8+1, 4, 8+4, 5, 8+5);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_unpacklo_epi64(__m256i a, __m256i b)
+{
+  return (__m256i)__builtin_shufflevector(a, b, 0, 4+0, 2, 4+2);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_xor_si256(__m256i a, __m256i b)
+{
+  return a ^ b;
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_stream_load_si256(__m256i *__V)
+{
+  return (__m256i)__builtin_ia32_movntdqa256((__v4di *)__V);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_broadcastss_ps(__m128 __X)
+{
+  return (__m128)__builtin_ia32_vbroadcastss_ps((__v4sf)__X);
+}
+
+static __inline__ __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_broadcastss_ps(__m128 __X)
+{
+  return (__m256)__builtin_ia32_vbroadcastss_ps256((__v4sf)__X);
+}
+
+static __inline__ __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_broadcastsd_pd(__m128d __X)
+{
+  return (__m256d)__builtin_ia32_vbroadcastsd_pd256((__v2df)__X);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm_broadcastsi128_si256(__m128i const *a)
+{
+  return (__m256i)__builtin_ia32_vbroadcastsi256(a);
+}
+
+#define _mm_blend_epi32(V1, V2, M) __extension__ ({ \
+  __m128i __V1 = (V1); \
+  __m128i __V2 = (V2); \
+  (__m128i)__builtin_ia32_pblendd128((__v4si)__V1, (__v4si)__V2, (M)); })
+
+#define _mm256_blend_epi32(V1, V2, M) __extension__ ({ \
+  __m256i __V1 = (V1); \
+  __m256i __V2 = (V2); \
+  (__m256i)__builtin_ia32_pblendd256((__v8si)__V1, (__v8si)__V2, (M)); })
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_broadcastb_epi8(__m128i __X)
+{
+  return (__m256i)__builtin_ia32_pbroadcastb256((__v16qi)__X);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_broadcastw_epi16(__m128i __X)
+{
+  return (__m256i)__builtin_ia32_pbroadcastw256((__v8hi)__X);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_broadcastd_epi32(__m128i __X)
+{
+  return (__m256i)__builtin_ia32_pbroadcastd256((__v4si)__X);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_broadcastq_epi64(__m128i __X)
+{
+  return (__m256i)__builtin_ia32_pbroadcastq256(__X);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_broadcastb_epi8(__m128i __X)
+{
+  return (__m128i)__builtin_ia32_pbroadcastb128((__v16qi)__X);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_broadcastw_epi16(__m128i __X)
+{
+  return (__m128i)__builtin_ia32_pbroadcastw128((__v8hi)__X);
+}
+
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_broadcastd_epi32(__m128i __X)
+{
+  return (__m128i)__builtin_ia32_pbroadcastd128((__v4si)__X);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_broadcastq_epi64(__m128i __X)
+{
+  return (__m128i)__builtin_ia32_pbroadcastq128(__X);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_permutevar8x32_epi32(__m256i a, __m256i b)
+{
+  return (__m256i)__builtin_ia32_permvarsi256((__v8si)a, (__v8si)b);
+}
+
+#define _mm256_permute4x64_pd(V, M) __extension__ ({ \
+  __m256d __V = (V); \
+  (__m256d)__builtin_shufflevector((__v4df)__V, (__v4df) _mm256_setzero_pd(), \
+                                   (M) & 0x3, ((M) & 0xc) >> 2, \
+                                   ((M) & 0x30) >> 4, ((M) & 0xc0) >> 6); })
+
+static __inline__ __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_permutevar8x32_ps(__m256 a, __m256 b)
+{
+  return (__m256)__builtin_ia32_permvarsf256((__v8sf)a, (__v8sf)b);
+}
+
+#define _mm256_permute4x64_epi64(V, M) __extension__ ({ \
+  __m256i __V = (V); \
+  (__m256i)__builtin_shufflevector((__v4di)__V, (__v4di) _mm256_setzero_si256(), \
+                                   (M) & 0x3, ((M) & 0xc) >> 2, \
+                                   ((M) & 0x30) >> 4, ((M) & 0xc0) >> 6); })
+
+#define _mm256_permute2x128_si256(V1, V2, M) __extension__ ({ \
+  __m256i __V1 = (V1); \
+  __m256i __V2 = (V2); \
+  (__m256i)__builtin_ia32_permti256(__V1, __V2, (M)); })
+
+#define _mm256_extracti128_si256(A, O) __extension__ ({ \
+  __m256i __A = (A); \
+  (__m128i)__builtin_ia32_extract128i256(__A, (O)); })
+
+#define _mm256_inserti128_si256(V1, V2, O) __extension__ ({ \
+  __m256i __V1 = (V1); \
+  __m128i __V2 = (V2); \
+  (__m256i)__builtin_ia32_insert128i256(__V1, __V2, (O)); })
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_maskload_epi32(int const *__X, __m256i __M)
+{
+  return (__m256i)__builtin_ia32_maskloadd256((const __v8si *)__X, (__v8si)__M);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_maskload_epi64(long long const *__X, __m256i __M)
+{
+  return (__m256i)__builtin_ia32_maskloadq256((const __v4di *)__X, __M);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_maskload_epi32(int const *__X, __m128i __M)
+{
+  return (__m128i)__builtin_ia32_maskloadd((const __v4si *)__X, (__v4si)__M);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_maskload_epi64(long long const *__X, __m128i __M)
+{
+  return (__m128i)__builtin_ia32_maskloadq((const __v2di *)__X, (__v2di)__M);
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+_mm256_maskstore_epi32(int *__X, __m256i __M, __m256i __Y)
+{
+  __builtin_ia32_maskstored256((__v8si *)__X, (__v8si)__M, (__v8si)__Y);
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+_mm256_maskstore_epi64(long long *__X, __m256i __M, __m256i __Y)
+{
+  __builtin_ia32_maskstoreq256((__v4di *)__X, __M, __Y);
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+_mm_maskstore_epi32(int *__X, __m128i __M, __m128i __Y)
+{
+  __builtin_ia32_maskstored((__v4si *)__X, (__v4si)__M, (__v4si)__Y);
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+_mm_maskstore_epi64(long long *__X, __m128i __M, __m128i __Y)
+{
+  __builtin_ia32_maskstoreq(( __v2di *)__X, __M, __Y);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_sllv_epi32(__m256i __X, __m256i __Y)
+{
+  return (__m256i)__builtin_ia32_psllv8si((__v8si)__X, (__v8si)__Y);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_sllv_epi32(__m128i __X, __m128i __Y)
+{
+  return (__m128i)__builtin_ia32_psllv4si((__v4si)__X, (__v4si)__Y);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_sllv_epi64(__m256i __X, __m256i __Y)
+{
+  return (__m256i)__builtin_ia32_psllv4di(__X, __Y);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_sllv_epi64(__m128i __X, __m128i __Y)
+{
+  return (__m128i)__builtin_ia32_psllv2di(__X, __Y);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_srav_epi32(__m256i __X, __m256i __Y)
+{
+  return (__m256i)__builtin_ia32_psrav8si((__v8si)__X, (__v8si)__Y);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_srav_epi32(__m128i __X, __m128i __Y)
+{
+  return (__m128i)__builtin_ia32_psrav4si((__v4si)__X, (__v4si)__Y);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_srlv_epi32(__m256i __X, __m256i __Y)
+{
+  return (__m256i)__builtin_ia32_psrlv8si((__v8si)__X, (__v8si)__Y);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_srlv_epi32(__m128i __X, __m128i __Y)
+{
+  return (__m128i)__builtin_ia32_psrlv4si((__v4si)__X, (__v4si)__Y);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_srlv_epi64(__m256i __X, __m256i __Y)
+{
+  return (__m256i)__builtin_ia32_psrlv4di(__X, __Y);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_srlv_epi64(__m128i __X, __m128i __Y)
+{
+  return (__m128i)__builtin_ia32_psrlv2di(__X, __Y);
+}
+
+#define _mm_mask_i32gather_pd(a, m, i, mask, s) __extension__ ({ \
+  __m128d __a = (a); \
+  double const *__m = (m); \
+  __m128i __i = (i); \
+  __m128d __mask = (mask); \
+  (__m128d)__builtin_ia32_gatherd_pd((__v2df)__a, (const __v2df *)__m, \
+             (__v4si)__i, (__v2df)__mask, (s)); })
+
+#define _mm256_mask_i32gather_pd(a, m, i, mask, s) __extension__ ({ \
+  __m256d __a = (a); \
+  double const *__m = (m); \
+  __m128i __i = (i); \
+  __m256d __mask = (mask); \
+  (__m256d)__builtin_ia32_gatherd_pd256((__v4df)__a, (const __v4df *)__m, \
+             (__v4si)__i, (__v4df)__mask, (s)); })
+
+#define _mm_mask_i64gather_pd(a, m, i, mask, s) __extension__ ({ \
+  __m128d __a = (a); \
+  double const *__m = (m); \
+  __m128i __i = (i); \
+  __m128d __mask = (mask); \
+  (__m128d)__builtin_ia32_gatherq_pd((__v2df)__a, (const __v2df *)__m, \
+             (__v2di)__i, (__v2df)__mask, (s)); })
+
+#define _mm256_mask_i64gather_pd(a, m, i, mask, s) __extension__ ({ \
+  __m256d __a = (a); \
+  double const *__m = (m); \
+  __m256i __i = (i); \
+  __m256d __mask = (mask); \
+  (__m256d)__builtin_ia32_gatherq_pd256((__v4df)__a, (const __v4df *)__m, \
+             (__v4di)__i, (__v4df)__mask, (s)); })
+
+#define _mm_mask_i32gather_ps(a, m, i, mask, s) __extension__ ({ \
+  __m128 __a = (a); \
+  float const *__m = (m); \
+  __m128i __i = (i); \
+  __m128 __mask = (mask); \
+  (__m128)__builtin_ia32_gatherd_ps((__v4sf)__a, (const __v4sf *)__m, \
+            (__v4si)__i, (__v4sf)__mask, (s)); })
+
+#define _mm256_mask_i32gather_ps(a, m, i, mask, s) __extension__ ({ \
+  __m256 __a = (a); \
+  float const *__m = (m); \
+  __m256i __i = (i); \
+  __m256 __mask = (mask); \
+  (__m256)__builtin_ia32_gatherd_ps256((__v8sf)__a, (const __v8sf *)__m, \
+            (__v8si)__i, (__v8sf)__mask, (s)); })
+
+#define _mm_mask_i64gather_ps(a, m, i, mask, s) __extension__ ({ \
+  __m128 __a = (a); \
+  float const *__m = (m); \
+  __m128i __i = (i); \
+  __m128 __mask = (mask); \
+  (__m128)__builtin_ia32_gatherq_ps((__v4sf)__a, (const __v4sf *)__m, \
+            (__v2di)__i, (__v4sf)__mask, (s)); })
+
+#define _mm256_mask_i64gather_ps(a, m, i, mask, s) __extension__ ({ \
+  __m128 __a = (a); \
+  float const *__m = (m); \
+  __m256i __i = (i); \
+  __m128 __mask = (mask); \
+  (__m128)__builtin_ia32_gatherq_ps256((__v4sf)__a, (const __v4sf *)__m, \
+            (__v4di)__i, (__v4sf)__mask, (s)); })
+
+#define _mm_mask_i32gather_epi32(a, m, i, mask, s) __extension__ ({ \
+  __m128i __a = (a); \
+  int const *__m = (m); \
+  __m128i __i = (i); \
+  __m128i __mask = (mask); \
+  (__m128i)__builtin_ia32_gatherd_d((__v4si)__a, (const __v4si *)__m, \
+            (__v4si)__i, (__v4si)__mask, (s)); })
+
+#define _mm256_mask_i32gather_epi32(a, m, i, mask, s) __extension__ ({ \
+  __m256i __a = (a); \
+  int const *__m = (m); \
+  __m256i __i = (i); \
+  __m256i __mask = (mask); \
+  (__m256i)__builtin_ia32_gatherd_d256((__v8si)__a, (const __v8si *)__m, \
+            (__v8si)__i, (__v8si)__mask, (s)); })
+
+#define _mm_mask_i64gather_epi32(a, m, i, mask, s) __extension__ ({ \
+  __m128i __a = (a); \
+  int const *__m = (m); \
+  __m128i __i = (i); \
+  __m128i __mask = (mask); \
+  (__m128i)__builtin_ia32_gatherq_d((__v4si)__a, (const __v4si *)__m, \
+            (__v2di)__i, (__v4si)__mask, (s)); })
+
+#define _mm256_mask_i64gather_epi32(a, m, i, mask, s) __extension__ ({ \
+  __m128i __a = (a); \
+  int const *__m = (m); \
+  __m256i __i = (i); \
+  __m128i __mask = (mask); \
+  (__m128i)__builtin_ia32_gatherq_d256((__v4si)__a, (const __v4si *)__m, \
+            (__v4di)__i, (__v4si)__mask, (s)); })
+
+#define _mm_mask_i32gather_epi64(a, m, i, mask, s) __extension__ ({ \
+  __m128i __a = (a); \
+  int const *__m = (m); \
+  __m128i __i = (i); \
+  __m128i __mask = (mask); \
+  (__m128i)__builtin_ia32_gatherd_q((__v2di)__a, (const __v2di *)__m, \
+             (__v4si)__i, (__v2di)__mask, (s)); })
+
+#define _mm256_mask_i32gather_epi64(a, m, i, mask, s) __extension__ ({ \
+  __m256i __a = (a); \
+  int const *__m = (m); \
+  __m128i __i = (i); \
+  __m256i __mask = (mask); \
+  (__m256i)__builtin_ia32_gatherd_q256((__v4di)__a, (const __v4di *)__m, \
+             (__v4si)__i, (__v4di)__mask, (s)); })
+
+#define _mm_mask_i64gather_epi64(a, m, i, mask, s) __extension__ ({ \
+  __m128i __a = (a); \
+  int const *__m = (m); \
+  __m128i __i = (i); \
+  __m128i __mask = (mask); \
+  (__m128i)__builtin_ia32_gatherq_q((__v2di)__a, (const __v2di *)__m, \
+             (__v2di)__i, (__v2di)__mask, (s)); })
+
+#define _mm256_mask_i64gather_epi64(a, m, i, mask, s) __extension__ ({ \
+  __m256i __a = (a); \
+  int const *__m = (m); \
+  __m256i __i = (i); \
+  __m256i __mask = (mask); \
+  (__m256i)__builtin_ia32_gatherq_q256((__v4di)__a, (const __v4di *)__m, \
+             (__v4di)__i, (__v4di)__mask, (s)); })
+
+#define _mm_i32gather_pd(m, i, s) __extension__ ({ \
+  double const *__m = (m); \
+  __m128i __i = (i); \
+  (__m128d)__builtin_ia32_gatherd_pd((__v2df)_mm_setzero_pd(), \
+             (const __v2df *)__m, (__v4si)__i, \
+             (__v2df)_mm_set1_pd((double)(long long int)-1), (s)); })
+
+#define _mm256_i32gather_pd(m, i, s) __extension__ ({ \
+  double const *__m = (m); \
+  __m128i __i = (i); \
+  (__m256d)__builtin_ia32_gatherd_pd256((__v4df)_mm256_setzero_pd(), \
+             (const __v4df *)__m, (__v4si)__i, \
+             (__v4df)_mm256_set1_pd((double)(long long int)-1), (s)); })
+
+#define _mm_i64gather_pd(m, i, s) __extension__ ({ \
+  double const *__m = (m); \
+  __m128i __i = (i); \
+  (__m128d)__builtin_ia32_gatherq_pd((__v2df)_mm_setzero_pd(), \
+             (const __v2df *)__m, (__v2di)__i, \
+             (__v2df)_mm_set1_pd((double)(long long int)-1), (s)); })
+
+#define _mm256_i64gather_pd(m, i, s) __extension__ ({ \
+  double const *__m = (m); \
+  __m256i __i = (i); \
+  (__m256d)__builtin_ia32_gatherq_pd256((__v4df)_mm256_setzero_pd(), \
+             (const __v4df *)__m, (__v4di)__i, \
+             (__v4df)_mm256_set1_pd((double)(long long int)-1), (s)); })
+
+#define _mm_i32gather_ps(m, i, s) __extension__ ({ \
+  float const *__m = (m); \
+  __m128i __i = (i); \
+  (__m128)__builtin_ia32_gatherd_ps((__v4sf)_mm_setzero_ps(), \
+             (const __v4sf *)__m, (__v4si)__i, \
+             (__v4sf)_mm_set1_ps((float)(int)-1), (s)); })
+
+#define _mm256_i32gather_ps(m, i, s) __extension__ ({ \
+  float const *__m = (m); \
+  __m256i __i = (i); \
+  (__m256)__builtin_ia32_gatherd_ps256((__v8sf)_mm256_setzero_ps(), \
+             (const __v8sf *)__m, (__v8si)__i, \
+             (__v8sf)_mm256_set1_ps((float)(int)-1), (s)); })
+
+#define _mm_i64gather_ps(m, i, s) __extension__ ({ \
+  float const *__m = (m); \
+  __m128i __i = (i); \
+  (__m128)__builtin_ia32_gatherq_ps((__v4sf)_mm_setzero_ps(), \
+             (const __v4sf *)__m, (__v2di)__i, \
+             (__v4sf)_mm_set1_ps((float)(int)-1), (s)); })
+
+#define _mm256_i64gather_ps(m, i, s) __extension__ ({ \
+  float const *__m = (m); \
+  __m256i __i = (i); \
+  (__m128)__builtin_ia32_gatherq_ps256((__v4sf)_mm_setzero_ps(), \
+             (const __v4sf *)__m, (__v4di)__i, \
+             (__v4sf)_mm_set1_ps((float)(int)-1), (s)); })
+
+#define _mm_i32gather_epi32(m, i, s) __extension__ ({ \
+  int const *__m = (m); \
+  __m128i __i = (i); \
+  (__m128i)__builtin_ia32_gatherd_d((__v4si)_mm_setzero_si128(), \
+            (const __v4si *)__m, (__v4si)__i, \
+            (__v4si)_mm_set1_epi32(-1), (s)); })
+
+#define _mm256_i32gather_epi32(m, i, s) __extension__ ({ \
+  int const *__m = (m); \
+  __m256i __i = (i); \
+  (__m256i)__builtin_ia32_gatherd_d256((__v8si)_mm256_setzero_si256(), \
+            (const __v8si *)__m, (__v8si)__i, \
+            (__v8si)_mm256_set1_epi32(-1), (s)); })
+
+#define _mm_i64gather_epi32(m, i, s) __extension__ ({ \
+  int const *__m = (m); \
+  __m128i __i = (i); \
+  (__m128i)__builtin_ia32_gatherq_d((__v4si)_mm_setzero_si128(), \
+            (const __v4si *)__m, (__v2di)__i, \
+            (__v4si)_mm_set1_epi32(-1), (s)); })
+
+#define _mm256_i64gather_epi32(m, i, s) __extension__ ({ \
+  int const *__m = (m); \
+  __m256i __i = (i); \
+  (__m128i)__builtin_ia32_gatherq_d256((__v4si)_mm_setzero_si128(), \
+            (const __v4si *)__m, (__v4di)__i, \
+            (__v4si)_mm_set1_epi32(-1), (s)); })
+
+#define _mm_i32gather_epi64(m, i, s) __extension__ ({ \
+  int const *__m = (m); \
+  __m128i __i = (i); \
+  (__m128i)__builtin_ia32_gatherd_q((__v2di)_mm_setzero_si128(), \
+             (const __v2di *)__m, (__v4si)__i, \
+             (__v2di)_mm_set1_epi64x(-1), (s)); })
+
+#define _mm256_i32gather_epi64(m, i, s) __extension__ ({ \
+  int const *__m = (m); \
+  __m128i __i = (i); \
+  (__m256i)__builtin_ia32_gatherd_q256((__v4di)_mm256_setzero_si256(), \
+             (const __v4di *)__m, (__v4si)__i, \
+             (__v4di)_mm256_set1_epi64x(-1), (s)); })
+
+#define _mm_i64gather_epi64(m, i, s) __extension__ ({ \
+  int const *__m = (m); \
+  __m128i __i = (i); \
+  (__m128i)__builtin_ia32_gatherq_q((__v2di)_mm_setzero_si128(), \
+             (const __v2di *)__m, (__v2di)__i, \
+             (__v2di)_mm_set1_epi64x(-1), (s)); })
+
+#define _mm256_i64gather_epi64(m, i, s) __extension__ ({ \
+  int const *__m = (m); \
+  __m256i __i = (i); \
+  (__m256i)__builtin_ia32_gatherq_q256((__v4di)_mm256_setzero_si256(), \
+             (const __v4di *)__m, (__v4di)__i, \
+             (__v4di)_mm256_set1_epi64x(-1), (s)); })
diff --git a/renderscript/clang-include/avxintrin.h b/renderscript/clang-include/avxintrin.h
index 0a0d2e4..ee7f835 100644
--- a/renderscript/clang-include/avxintrin.h
+++ b/renderscript/clang-include/avxintrin.h
@@ -145,17 +145,13 @@
   return (__m256)__builtin_ia32_rcpps256((__v8sf)a);
 }
 
-static __inline __m256d __attribute__((__always_inline__, __nodebug__))
-_mm256_round_pd(__m256d v, const int m)
-{
-  return (__m256d)__builtin_ia32_roundpd256((__v4df)v, m);
-}
+#define _mm256_round_pd(V, M) __extension__ ({ \
+    __m256d __V = (V); \
+    (__m256d)__builtin_ia32_roundpd256((__v4df)__V, (M)); })
 
-static __inline __m256 __attribute__((__always_inline__, __nodebug__))
-_mm256_round_ps(__m256 v, const int m)
-{
-  return (__m256)__builtin_ia32_roundps256((__v8sf)v, m);
-}
+#define _mm256_round_ps(V, M) __extension__ ({ \
+  __m256 __V = (V); \
+  (__m256)__builtin_ia32_roundps256((__v8sf)__V, (M)); })
 
 #define _mm256_ceil_pd(V)  _mm256_round_pd((V), _MM_FROUND_CEIL)
 #define _mm256_floor_pd(V) _mm256_round_pd((V), _MM_FROUND_FLOOR)
@@ -262,60 +258,59 @@
 						  (__v8si)c);
 }
 
-static __inline __m128d __attribute__((__always_inline__, __nodebug__))
-_mm_permute_pd(__m128d a, const int c)
-{
-  return (__m128d)__builtin_ia32_vpermilpd((__v2df)a, c);
-}
+#define _mm_permute_pd(A, C) __extension__ ({ \
+  __m128d __A = (A); \
+  (__m128d)__builtin_shufflevector((__v2df)__A, (__v2df) _mm_setzero_pd(), \
+                                   (C) & 0x1, ((C) & 0x2) >> 1); })
 
-static __inline __m256d __attribute__((__always_inline__, __nodebug__))
-_mm256_permute_pd(__m256d a, const int c)
-{
-  return (__m256d)__builtin_ia32_vpermilpd256((__v4df)a, c);
-}
+#define _mm256_permute_pd(A, C) __extension__ ({ \
+  __m256d __A = (A); \
+  (__m256d)__builtin_shufflevector((__v4df)__A, (__v4df) _mm256_setzero_pd(), \
+                                   (C) & 0x1, ((C) & 0x2) >> 1, \
+                                   2 + (((C) & 0x4) >> 2), \
+                                   2 + (((C) & 0x8) >> 3)); })
 
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
-_mm_permute_ps(__m128 a, const int c)
-{
-  return (__m128)__builtin_ia32_vpermilps((__v4sf)a, c);
-}
+#define _mm_permute_ps(A, C) __extension__ ({ \
+  __m128 __A = (A); \
+  (__m128)__builtin_shufflevector((__v4sf)__A, (__v4sf) _mm_setzero_ps(), \
+                                   (C) & 0x3, ((C) & 0xc) >> 2, \
+                                   ((C) & 0x30) >> 4, ((C) & 0xc0) >> 6); })
 
-static __inline __m256 __attribute__((__always_inline__, __nodebug__))
-_mm256_permute_ps(__m256 a, const int c)
-{
-  return (__m256)__builtin_ia32_vpermilps256((__v8sf)a, c);
-}
+#define _mm256_permute_ps(A, C) __extension__ ({ \
+  __m256 __A = (A); \
+  (__m256)__builtin_shufflevector((__v8sf)__A, (__v8sf) _mm256_setzero_ps(), \
+                                  (C) & 0x3, ((C) & 0xc) >> 2, \
+                                  ((C) & 0x30) >> 4, ((C) & 0xc0) >> 6, \
+                                  4 + (((C) & 0x03) >> 0), \
+                                  4 + (((C) & 0x0c) >> 2), \
+                                  4 + (((C) & 0x30) >> 4), \
+                                  4 + (((C) & 0xc0) >> 6)); })
 
-static __inline __m256d __attribute__((__always_inline__, __nodebug__))
-_mm256_permute2f128_pd(__m256d a, __m256d b, const int c)
-{
-  return (__m256d)__builtin_ia32_vperm2f128_pd256((__v4df)a, (__v4df)b, c);
-}
+#define _mm256_permute2f128_pd(V1, V2, M) __extension__ ({ \
+  __m256d __V1 = (V1); \
+  __m256d __V2 = (V2); \
+  (__m256d)__builtin_ia32_vperm2f128_pd256((__v4df)__V1, (__v4df)__V2, (M)); })
 
-static __inline __m256 __attribute__((__always_inline__, __nodebug__))
-_mm256_permute2f128_ps(__m256 a, __m256 b, const int c)
-{
-  return (__m256)__builtin_ia32_vperm2f128_ps256((__v8sf)a, (__v8sf)b, c);
-}
+#define _mm256_permute2f128_ps(V1, V2, M) __extension__ ({ \
+  __m256 __V1 = (V1); \
+  __m256 __V2 = (V2); \
+  (__m256)__builtin_ia32_vperm2f128_ps256((__v8sf)__V1, (__v8sf)__V2, (M)); })
 
-static __inline __m256i __attribute__((__always_inline__, __nodebug__))
-_mm256_permute2f128_si256(__m256i a, __m256i b, const int c)
-{
-  return (__m256i)__builtin_ia32_vperm2f128_si256((__v8si)a, (__v8si)b, c);
-}
+#define _mm256_permute2f128_si256(V1, V2, M) __extension__ ({ \
+  __m256i __V1 = (V1); \
+  __m256i __V2 = (V2); \
+  (__m256i)__builtin_ia32_vperm2f128_si256((__v8si)__V1, (__v8si)__V2, (M)); })
 
 /* Vector Blend */
-static __inline __m256d __attribute__((__always_inline__, __nodebug__))
-_mm256_blend_pd(__m256d a, __m256d b, const int c)
-{
-  return (__m256d)__builtin_ia32_blendpd256((__v4df)a, (__v4df)b, c);
-}
+#define _mm256_blend_pd(V1, V2, M) __extension__ ({ \
+  __m256d __V1 = (V1); \
+  __m256d __V2 = (V2); \
+  (__m256d)__builtin_ia32_blendpd256((__v4df)__V1, (__v4df)__V2, (M)); })
 
-static __inline __m256 __attribute__((__always_inline__, __nodebug__))
-_mm256_blend_ps(__m256 a, __m256 b, const int c)
-{
-  return (__m256)__builtin_ia32_blendps256((__v8sf)a, (__v8sf)b, c);
-}
+#define _mm256_blend_ps(V1, V2, M) __extension__ ({ \
+  __m256 __V1 = (V1); \
+  __m256 __V2 = (V2); \
+  (__m256)__builtin_ia32_blendps256((__v8sf)__V1, (__v8sf)__V2, (M)); })
 
 static __inline __m256d __attribute__((__always_inline__, __nodebug__))
 _mm256_blendv_pd(__m256d a, __m256d b, __m256d c)
@@ -330,26 +325,29 @@
 }
 
 /* Vector Dot Product */
-static __inline __m256 __attribute__((__always_inline__, __nodebug__))
-_mm256_dp_ps(__m256 a, __m256 b, const int c)
-{
-  return (__m256)__builtin_ia32_dpps256((__v8sf)a, (__v8sf)b, c);
-}
+#define _mm256_dp_ps(V1, V2, M) __extension__ ({ \
+  __m256 __V1 = (V1); \
+  __m256 __V2 = (V2); \
+  (__m256)__builtin_ia32_dpps256((__v8sf)__V1, (__v8sf)__V2, (M)); })
 
 /* Vector shuffle */
-#define _mm256_shuffle_ps(a, b, mask) \
-        (__builtin_shufflevector((__v8sf)(a), (__v8sf)(b), \
+#define _mm256_shuffle_ps(a, b, mask) __extension__ ({ \
+        __m256 __a = (a); \
+        __m256 __b = (b); \
+        (__m256)__builtin_shufflevector((__v8sf)__a, (__v8sf)__b, \
         (mask) & 0x3,                ((mask) & 0xc) >> 2, \
         (((mask) & 0x30) >> 4) + 8,  (((mask) & 0xc0) >> 6) + 8, \
         ((mask) & 0x3) + 4,          (((mask) & 0xc) >> 2) + 4, \
-        (((mask) & 0x30) >> 4) + 12, (((mask) & 0xc0) >> 6) + 12))
+        (((mask) & 0x30) >> 4) + 12, (((mask) & 0xc0) >> 6) + 12); })
 
-#define _mm256_shuffle_pd(a, b, mask) \
-        (__builtin_shufflevector((__v4df)(a), (__v4df)(b), \
+#define _mm256_shuffle_pd(a, b, mask) __extension__ ({ \
+        __m256d __a = (a); \
+        __m256d __b = (b); \
+        (__m256d)__builtin_shufflevector((__v4df)__a, (__v4df)__b, \
         (mask) & 0x1, \
         (((mask) & 0x2) >> 1) + 4, \
         (((mask) & 0x4) >> 2) + 2, \
-        (((mask) & 0x8) >> 3) + 6))
+        (((mask) & 0x8) >> 3) + 6); })
 
 /* Compare */
 #define _CMP_EQ_OQ    0x00 /* Equal (ordered, non-signaling)  */
@@ -385,42 +383,48 @@
 #define _CMP_GT_OQ    0x1e /* Greater-than (ordered, non-signaling)  */
 #define _CMP_TRUE_US  0x1f /* True (unordered, signaling)  */
 
-#define _mm_cmp_pd(a, b, c) \
-  (__m128d)__builtin_ia32_cmppd((__v2df)(a), (__v2df)(b), (c))
+#define _mm_cmp_pd(a, b, c) __extension__ ({ \
+  __m128d __a = (a); \
+  __m128d __b = (b); \
+  (__m128d)__builtin_ia32_cmppd((__v2df)__a, (__v2df)__b, (c)); })
 
-#define _mm_cmp_ps(a, b, c) \
-  (__m128)__builtin_ia32_cmpps((__v4sf)(a), (__v4sf)(b), (c))
+#define _mm_cmp_ps(a, b, c) __extension__ ({ \
+  __m128 __a = (a); \
+  __m128 __b = (b); \
+  (__m128)__builtin_ia32_cmpps((__v4sf)__a, (__v4sf)__b, (c)); })
 
-#define _mm256_cmp_pd(a, b, c) \
-  (__m256d)__builtin_ia32_cmppd256((__v4df)(a), (__v4df)(b), (c))
+#define _mm256_cmp_pd(a, b, c) __extension__ ({ \
+  __m256d __a = (a); \
+  __m256d __b = (b); \
+  (__m256d)__builtin_ia32_cmppd256((__v4df)__a, (__v4df)__b, (c)); })
 
-#define _mm256_cmp_ps(a, b, c) \
-  (__m256)__builtin_ia32_cmpps256((__v8sf)(a), (__v8sf)(b), (c))
+#define _mm256_cmp_ps(a, b, c) __extension__ ({ \
+  __m256 __a = (a); \
+  __m256 __b = (b); \
+  (__m256)__builtin_ia32_cmpps256((__v8sf)__a, (__v8sf)__b, (c)); })
 
-#define _mm_cmp_sd(a, b, c) \
-  (__m128d)__builtin_ia32_cmpsd((__v2df)(a), (__v2df)(b), (c))
+#define _mm_cmp_sd(a, b, c) __extension__ ({ \
+  __m128d __a = (a); \
+  __m128d __b = (b); \
+  (__m128d)__builtin_ia32_cmpsd((__v2df)__a, (__v2df)__b, (c)); })
 
-#define _mm_cmp_ss(a, b, c) \
-  (__m128)__builtin_ia32_cmpss((__v4sf)(a), (__v4sf)(b), (c))
+#define _mm_cmp_ss(a, b, c) __extension__ ({ \
+  __m128 __a = (a); \
+  __m128 __b = (b); \
+  (__m128)__builtin_ia32_cmpss((__v4sf)__a, (__v4sf)__b, (c)); })
 
 /* Vector extract */
-static __inline __m128d __attribute__((__always_inline__, __nodebug__))
-_mm256_extractf128_pd(__m256d a, const int o)
-{
-  return (__m128d)__builtin_ia32_vextractf128_pd256((__v4df)a, o);
-}
+#define _mm256_extractf128_pd(A, O) __extension__ ({ \
+  __m256d __A = (A); \
+  (__m128d)__builtin_ia32_vextractf128_pd256((__v4df)__A, (O)); })
 
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
-_mm256_extractf128_ps(__m256 a, const int o)
-{
-  return (__m128)__builtin_ia32_vextractf128_ps256((__v8sf)a, o);
-}
+#define _mm256_extractf128_ps(A, O) __extension__ ({ \
+  __m256 __A = (A); \
+  (__m128)__builtin_ia32_vextractf128_ps256((__v8sf)__A, (O)); })
 
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
-_mm256_extractf128_si256(__m256i a, const int o)
-{
-  return (__m128i)__builtin_ia32_vextractf128_si256((__v8si)a, o);
-}
+#define _mm256_extractf128_si256(A, O) __extension__ ({ \
+  __m256i __A = (A); \
+  (__m128i)__builtin_ia32_vextractf128_si256((__v8si)__A, (O)); })
 
 static __inline int __attribute__((__always_inline__, __nodebug__))
 _mm256_extract_epi32(__m256i a, int const imm)
@@ -453,23 +457,20 @@
 #endif
 
 /* Vector insert */
-static __inline __m256d __attribute__((__always_inline__, __nodebug__))
-_mm256_insertf128_pd(__m256d a, __m128d b, const int o)
-{
-  return (__m256d)__builtin_ia32_vinsertf128_pd256((__v4df)a, (__v2df)b, o);
-}
+#define _mm256_insertf128_pd(V1, V2, O) __extension__ ({ \
+  __m256d __V1 = (V1); \
+  __m128d __V2 = (V2); \
+  (__m256d)__builtin_ia32_vinsertf128_pd256((__v4df)__V1, (__v2df)__V2, (O)); })
 
-static __inline __m256 __attribute__((__always_inline__, __nodebug__))
-_mm256_insertf128_ps(__m256 a, __m128 b, const int o)
-{
-  return (__m256)__builtin_ia32_vinsertf128_ps256((__v8sf)a, (__v4sf)b, o);
-}
+#define _mm256_insertf128_ps(V1, V2, O) __extension__ ({ \
+  __m256 __V1 = (V1); \
+  __m128 __V2 = (V2); \
+  (__m256)__builtin_ia32_vinsertf128_ps256((__v8sf)__V1, (__v4sf)__V2, (O)); })
 
-static __inline __m256i __attribute__((__always_inline__, __nodebug__))
-_mm256_insertf128_si256(__m256i a, __m128i b, const int o)
-{
-  return (__m256i)__builtin_ia32_vinsertf128_si256((__v8si)a, (__v4si)b, o);
-}
+#define _mm256_insertf128_si256(V1, V2, O) __extension__ ({ \
+  __m256i __V1 = (V1); \
+  __m128i __V2 = (V2); \
+  (__m256i)__builtin_ia32_vinsertf128_si256((__v8si)__V1, (__v4si)__V2, (O)); })
 
 static __inline __m256i __attribute__((__always_inline__, __nodebug__))
 _mm256_insert_epi32(__m256i a, int b, int const imm)
@@ -762,13 +763,19 @@
 static __inline __m256d __attribute__((__always_inline__, __nodebug__))
 _mm256_loadu_pd(double const *p)
 {
-  return (__m256d)__builtin_ia32_loadupd256(p);
+  struct __loadu_pd {
+    __m256d v;
+  } __attribute__((packed, may_alias));
+  return ((struct __loadu_pd*)p)->v;
 }
 
 static __inline __m256 __attribute__((__always_inline__, __nodebug__))
 _mm256_loadu_ps(float const *p)
 {
-  return (__m256)__builtin_ia32_loadups256(p);
+  struct __loadu_ps {
+    __m256 v;
+  } __attribute__((packed, may_alias));
+  return ((struct __loadu_ps*)p)->v;
 }
 
 static __inline __m256i __attribute__((__always_inline__, __nodebug__))
@@ -780,7 +787,10 @@
 static __inline __m256i __attribute__((__always_inline__, __nodebug__))
 _mm256_loadu_si256(__m256i const *p)
 {
-  return (__m256i)__builtin_ia32_loaddqu256((char const *)p);
+  struct __loadu_si256 {
+    __m256i v;
+  } __attribute__((packed, may_alias));
+  return ((struct __loadu_si256*)p)->v;
 }
 
 static __inline __m256i __attribute__((__always_inline__, __nodebug__))
@@ -1136,3 +1146,70 @@
   __m128i zero = _mm_setzero_si128();
   return __builtin_shufflevector(in, zero, 0, 1, 2, 2);
 }
+
+/* SIMD load ops (unaligned) */
+static __inline __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_loadu2_m128(float const *addr_hi, float const *addr_lo)
+{
+  struct __loadu_ps {
+    __m128 v;
+  } __attribute__((__packed__, __may_alias__));
+
+  __m256 v256 = _mm256_castps128_ps256(((struct __loadu_ps*)addr_lo)->v);
+  return _mm256_insertf128_ps(v256, ((struct __loadu_ps*)addr_hi)->v, 1);
+}
+
+static __inline __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_loadu2_m128d(double const *addr_hi, double const *addr_lo)
+{
+  struct __loadu_pd {
+    __m128d v;
+  } __attribute__((__packed__, __may_alias__));
+  
+  __m256d v256 = _mm256_castpd128_pd256(((struct __loadu_pd*)addr_lo)->v);
+  return _mm256_insertf128_pd(v256, ((struct __loadu_pd*)addr_hi)->v, 1);
+}
+
+static __inline __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_loadu2_m128i(__m128i const *addr_hi, __m128i const *addr_lo)
+{
+  struct __loadu_si128 {
+    __m128i v;
+  } __attribute__((packed, may_alias));
+  __m256i v256 = _mm256_castsi128_si256(((struct __loadu_si128*)addr_lo)->v);
+  return _mm256_insertf128_si256(v256, ((struct __loadu_si128*)addr_hi)->v, 1);
+}
+
+/* SIMD store ops (unaligned) */
+static __inline void __attribute__((__always_inline__, __nodebug__))
+_mm256_storeu2_m128(float *addr_hi, float *addr_lo, __m256 a)
+{
+  __m128 v128;
+
+  v128 = _mm256_castps256_ps128(a);
+  __builtin_ia32_storeups(addr_lo, v128);
+  v128 = _mm256_extractf128_ps(a, 1);
+  __builtin_ia32_storeups(addr_hi, v128);
+}
+
+static __inline void __attribute__((__always_inline__, __nodebug__))
+_mm256_storeu2_m128d(double *addr_hi, double *addr_lo, __m256d a)
+{
+  __m128d v128;
+
+  v128 = _mm256_castpd256_pd128(a);
+  __builtin_ia32_storeupd(addr_lo, v128);
+  v128 = _mm256_extractf128_pd(a, 1);
+  __builtin_ia32_storeupd(addr_hi, v128);
+}
+
+static __inline void __attribute__((__always_inline__, __nodebug__))
+_mm256_storeu2_m128i(__m128i *addr_hi, __m128i *addr_lo, __m256i a)
+{
+  __m128i v128;
+
+  v128 = _mm256_castsi256_si128(a);
+  __builtin_ia32_storedqu((char *)addr_lo, (__v16qi)v128);
+  v128 = _mm256_extractf128_si256(a, 1);
+  __builtin_ia32_storedqu((char *)addr_hi, (__v16qi)v128);
+}
diff --git a/renderscript/clang-include/bmi2intrin.h b/renderscript/clang-include/bmi2intrin.h
new file mode 100644
index 0000000..c60b0c4
--- /dev/null
+++ b/renderscript/clang-include/bmi2intrin.h
@@ -0,0 +1,75 @@
+/*===---- bmi2intrin.h - BMI2 intrinsics -----------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#if !defined __X86INTRIN_H && !defined __IMMINTRIN_H
+#error "Never use <bmi2intrin.h> directly; include <x86intrin.h> instead."
+#endif
+
+#ifndef __BMI2__
+# error "BMI2 instruction set not enabled"
+#endif /* __BMI2__ */
+
+#ifndef __BMI2INTRIN_H
+#define __BMI2INTRIN_H
+
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
+_bzhi_u32(unsigned int __X, unsigned int __Y)
+{
+  return __builtin_ia32_bzhi_si(__X, __Y);
+}
+
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
+_pdep_u32(unsigned int __X, unsigned int __Y)
+{
+  return __builtin_ia32_pdep_si(__X, __Y);
+}
+
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
+_pext_u32(unsigned int __X, unsigned int __Y)
+{
+  return __builtin_ia32_pext_si(__X, __Y);
+}
+
+#ifdef  __x86_64__
+
+static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
+_bzhi_u64(unsigned long long __X, unsigned long long __Y)
+{
+  return __builtin_ia32_bzhi_di(__X, __Y);
+}
+
+static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
+_pdep_u64(unsigned long long __X, unsigned long long __Y)
+{
+  return __builtin_ia32_pdep_di(__X, __Y);
+}
+
+static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
+_pext_u64(unsigned long long __X, unsigned long long __Y)
+{
+  return __builtin_ia32_pext_di(__X, __Y);
+}
+
+#endif /* !__x86_64__  */
+
+#endif /* __BMI2INTRIN_H */
diff --git a/renderscript/clang-include/bmiintrin.h b/renderscript/clang-include/bmiintrin.h
new file mode 100644
index 0000000..8cb00f5
--- /dev/null
+++ b/renderscript/clang-include/bmiintrin.h
@@ -0,0 +1,115 @@
+/*===---- bmiintrin.h - BMI intrinsics -------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#if !defined __X86INTRIN_H && !defined __IMMINTRIN_H
+#error "Never use <bmiintrin.h> directly; include <x86intrin.h> instead."
+#endif
+
+#ifndef __BMI__
+# error "BMI instruction set not enabled"
+#endif /* __BMI__ */
+
+#ifndef __BMIINTRIN_H
+#define __BMIINTRIN_H
+
+static __inline__ unsigned short __attribute__((__always_inline__, __nodebug__))
+__tzcnt_u16(unsigned short __X)
+{
+  return __builtin_ctzs(__X);
+}
+
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
+__andn_u32(unsigned int __X, unsigned int __Y)
+{
+  return ~__X & __Y;
+}
+
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
+__bextr_u32(unsigned int __X, unsigned int __Y)
+{
+  return __builtin_ia32_bextr_u32(__X, __Y);
+}
+
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
+__blsi_u32(unsigned int __X)
+{
+  return __X & -__X;
+}
+
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
+__blsmsk_u32(unsigned int __X)
+{
+  return __X ^ (__X - 1);
+}
+
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
+__blsr_u32(unsigned int __X)
+{
+  return __X & (__X - 1);
+}
+
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
+__tzcnt_u32(unsigned int __X)
+{
+  return __builtin_ctz(__X);
+}
+
+#ifdef __x86_64__
+static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
+__andn_u64 (unsigned long long __X, unsigned long long __Y)
+{
+  return ~__X & __Y;
+}
+
+static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
+__bextr_u64(unsigned long long __X, unsigned long long __Y)
+{
+  return __builtin_ia32_bextr_u64(__X, __Y);
+}
+
+static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
+__blsi_u64(unsigned long long __X)
+{
+  return __X & -__X;
+}
+
+static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
+__blsmsk_u64(unsigned long long __X)
+{
+  return __X ^ (__X - 1);
+}
+
+static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
+__blsr_u64(unsigned long long __X)
+{
+  return __X & (__X - 1);
+}
+
+static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
+__tzcnt_u64(unsigned long long __X)
+{
+  return __builtin_ctzll(__X);
+}
+#endif
+
+#endif /* __BMIINTRIN_H */
diff --git a/renderscript/clang-include/cpuid.h b/renderscript/clang-include/cpuid.h
new file mode 100644
index 0000000..05c293f
--- /dev/null
+++ b/renderscript/clang-include/cpuid.h
@@ -0,0 +1,33 @@
+/*===---- cpuid.h - X86 cpu model detection --------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#if !(__x86_64__ || __i386__)
+#error this header is for x86 only
+#endif
+
+static inline int __get_cpuid (unsigned int level, unsigned int *eax,
+                               unsigned int *ebx, unsigned int *ecx,
+                               unsigned int *edx) {
+    asm("cpuid" : "=a"(*eax), "=b" (*ebx), "=c"(*ecx), "=d"(*edx) : "0"(level));
+    return 1;
+}
diff --git a/renderscript/clang-include/emmintrin.h b/renderscript/clang-include/emmintrin.h
index 903cfde..91395ed 100644
--- a/renderscript/clang-include/emmintrin.h
+++ b/renderscript/clang-include/emmintrin.h
@@ -821,8 +821,9 @@
   return a ^ b;
 }
 
-#define _mm_slli_si128(VEC, IMM) \
-  ((__m128i)__builtin_ia32_pslldqi128((__m128i)(VEC), (IMM)*8))
+#define _mm_slli_si128(a, count) __extension__ ({ \
+  __m128i __a = (a); \
+  (__m128i)__builtin_ia32_pslldqi128(__a, (count)*8); })
 
 static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
 _mm_slli_epi16(__m128i a, int count)
@@ -885,8 +886,9 @@
 }
 
 
-#define _mm_srli_si128(VEC, IMM) \
-  ((__m128i)__builtin_ia32_psrldqi128((__m128i)(VEC), (IMM)*8))
+#define _mm_srli_si128(a, count) __extension__ ({ \
+  __m128i __a = (a); \
+  (__m128i)__builtin_ia32_psrldqi128(__a, (count)*8); })
 
 static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
 _mm_srli_epi16(__m128i a, int count)
@@ -945,7 +947,10 @@
 static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
 _mm_cmpgt_epi8(__m128i a, __m128i b)
 {
-  return (__m128i)((__v16qi)a > (__v16qi)b);
+  /* This function always performs a signed comparison, but __v16qi is a char
+     which may be signed or unsigned. */
+  typedef signed char __v16qs __attribute__((__vector_size__(16)));
+  return (__m128i)((__v16qs)a > (__v16qs)b);
 }
 
 static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
@@ -1181,7 +1186,10 @@
 static __inline__ void __attribute__((__always_inline__, __nodebug__))
 _mm_storel_epi64(__m128i *p, __m128i a)
 {
-  __builtin_ia32_storelv4si((__v2si *)p, a);
+  struct __mm_storel_epi64_struct {
+    long long u;
+  } __attribute__((__packed__, __may_alias__));
+  ((struct __mm_storel_epi64_struct*)p)->u = a[0];
 }
 
 static __inline__ void __attribute__((__always_inline__, __nodebug__))
@@ -1259,23 +1267,27 @@
   return __builtin_ia32_pmovmskb128((__v16qi)a);
 }
 
-#define _mm_shuffle_epi32(a, imm) \
-  ((__m128i)__builtin_shufflevector((__v4si)(a), (__v4si) _mm_set1_epi32(0), \
-                                    (imm) & 0x3, ((imm) & 0xc) >> 2, \
-                                    ((imm) & 0x30) >> 4, ((imm) & 0xc0) >> 6))
+#define _mm_shuffle_epi32(a, imm) __extension__ ({ \
+  __m128i __a = (a); \
+  (__m128i)__builtin_shufflevector((__v4si)__a, (__v4si) _mm_set1_epi32(0), \
+                                   (imm) & 0x3, ((imm) & 0xc) >> 2, \
+                                   ((imm) & 0x30) >> 4, ((imm) & 0xc0) >> 6); })
 
+#define _mm_shufflelo_epi16(a, imm) __extension__ ({ \
+  __m128i __a = (a); \
+  (__m128i)__builtin_shufflevector((__v8hi)__a, (__v8hi) _mm_set1_epi16(0), \
+                                   (imm) & 0x3, ((imm) & 0xc) >> 2, \
+                                   ((imm) & 0x30) >> 4, ((imm) & 0xc0) >> 6, \
+                                   4, 5, 6, 7); })
 
-#define _mm_shufflelo_epi16(a, imm) \
-  ((__m128i)__builtin_shufflevector((__v8hi)(a), (__v8hi) _mm_set1_epi16(0), \
-                                    (imm) & 0x3, ((imm) & 0xc) >> 2, \
-                                    ((imm) & 0x30) >> 4, ((imm) & 0xc0) >> 6, \
-                                    4, 5, 6, 7))
-#define _mm_shufflehi_epi16(a, imm) \
-  ((__m128i)__builtin_shufflevector((__v8hi)(a), (__v8hi) _mm_set1_epi16(0), 0, 1, 2, 3, \
-                                    4 + (((imm) & 0x03) >> 0), \
-                                    4 + (((imm) & 0x0c) >> 2), \
-                                    4 + (((imm) & 0x30) >> 4), \
-                                    4 + (((imm) & 0xc0) >> 6)))
+#define _mm_shufflehi_epi16(a, imm) __extension__ ({ \
+  __m128i __a = (a); \
+  (__m128i)__builtin_shufflevector((__v8hi)__a, (__v8hi) _mm_set1_epi16(0), \
+                                   0, 1, 2, 3, \
+                                   4 + (((imm) & 0x03) >> 0), \
+                                   4 + (((imm) & 0x0c) >> 2), \
+                                   4 + (((imm) & 0x30) >> 4), \
+                                   4 + (((imm) & 0xc0) >> 6)); })
 
 static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
 _mm_unpackhi_epi8(__m128i a, __m128i b)
@@ -1361,9 +1373,10 @@
   return __builtin_ia32_movmskpd(a);
 }
 
-#define _mm_shuffle_pd(a, b, i) \
-  (__builtin_shufflevector((__m128d)(a), (__m128d)(b), (i) & 1, \
-                                                       (((i) & 2) >> 1) + 2))
+#define _mm_shuffle_pd(a, b, i) __extension__ ({ \
+  __m128d __a = (a); \
+  __m128d __b = (b); \
+  __builtin_shufflevector(__a, __b, (i) & 1, (((i) & 2) >> 1) + 2); })
 
 static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
 _mm_castpd_ps(__m128d in)
diff --git a/renderscript/clang-include/float.h b/renderscript/clang-include/float.h
index b7cb73a..2cb13d3 100644
--- a/renderscript/clang-include/float.h
+++ b/renderscript/clang-include/float.h
@@ -28,7 +28,7 @@
  * additional definitions provided for Windows.
  * For more details see http://msdn.microsoft.com/en-us/library/y0ybw9fy.aspx
  */
-#if defined(__MINGW32__) && \
+#if (defined(__MINGW32__) || defined(_MSC_VER)) && \
     defined(__has_include_next) && __has_include_next(<float.h>)
 #  include_next <float.h>
 
@@ -64,6 +64,11 @@
 #  undef FLT_MIN
 #  undef DBL_MIN
 #  undef LDBL_MIN
+#  if __STDC_VERSION__ >= 201112L || !defined(__STRICT_ANSI__)
+#    undef FLT_TRUE_MIN
+#    undef DBL_TRUE_MIN
+#    undef LDBL_TRUE_MIN
+#  endif
 #endif
 
 /* Characteristics of floating point types, C99 5.2.4.2.2 */
@@ -110,4 +115,10 @@
 #define DBL_MIN __DBL_MIN__
 #define LDBL_MIN __LDBL_MIN__
 
+#if __STDC_VERSION__ >= 201112L || !defined(__STRICT_ANSI__)
+#  define FLT_TRUE_MIN __FLT_DENORM_MIN__
+#  define DBL_TRUE_MIN __DBL_DENORM_MIN__
+#  define LDBL_TRUE_MIN __LDBL_DENORM_MIN__
+#endif
+
 #endif /* __FLOAT_H */
diff --git a/renderscript/clang-include/fma4intrin.h b/renderscript/clang-include/fma4intrin.h
new file mode 100644
index 0000000..c30920d
--- /dev/null
+++ b/renderscript/clang-include/fma4intrin.h
@@ -0,0 +1,231 @@
+/*===---- fma4intrin.h - FMA4 intrinsics -----------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __X86INTRIN_H
+#error "Never use <fma4intrin.h> directly; include <x86intrin.h> instead."
+#endif
+
+#ifndef __FMA4INTRIN_H
+#define __FMA4INTRIN_H
+
+#ifndef __FMA4__
+# error "FMA4 instruction set is not enabled"
+#else
+
+#include <pmmintrin.h>
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_macc_ps(__m128 __A, __m128 __B, __m128 __C)
+{
+  return (__m128)__builtin_ia32_vfmaddps(__A, __B, __C);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_macc_pd(__m128d __A, __m128d __B, __m128d __C)
+{
+  return (__m128d)__builtin_ia32_vfmaddpd(__A, __B, __C);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_macc_ss(__m128 __A, __m128 __B, __m128 __C)
+{
+  return (__m128)__builtin_ia32_vfmaddss(__A, __B, __C);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_macc_sd(__m128d __A, __m128d __B, __m128d __C)
+{
+  return (__m128d)__builtin_ia32_vfmaddsd(__A, __B, __C);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_msub_ps(__m128 __A, __m128 __B, __m128 __C)
+{
+  return (__m128)__builtin_ia32_vfmsubps(__A, __B, __C);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_msub_pd(__m128d __A, __m128d __B, __m128d __C)
+{
+  return (__m128d)__builtin_ia32_vfmsubpd(__A, __B, __C);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_msub_ss(__m128 __A, __m128 __B, __m128 __C)
+{
+  return (__m128)__builtin_ia32_vfmsubss(__A, __B, __C);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_msub_sd(__m128d __A, __m128d __B, __m128d __C)
+{
+  return (__m128d)__builtin_ia32_vfmsubsd(__A, __B, __C);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_nmacc_ps(__m128 __A, __m128 __B, __m128 __C)
+{
+  return (__m128)__builtin_ia32_vfnmaddps(__A, __B, __C);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_nmacc_pd(__m128d __A, __m128d __B, __m128d __C)
+{
+  return (__m128d)__builtin_ia32_vfnmaddpd(__A, __B, __C);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_nmacc_ss(__m128 __A, __m128 __B, __m128 __C)
+{
+  return (__m128)__builtin_ia32_vfnmaddss(__A, __B, __C);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_nmacc_sd(__m128d __A, __m128d __B, __m128d __C)
+{
+  return (__m128d)__builtin_ia32_vfnmaddsd(__A, __B, __C);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_nmsub_ps(__m128 __A, __m128 __B, __m128 __C)
+{
+  return (__m128)__builtin_ia32_vfnmsubps(__A, __B, __C);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_nmsub_pd(__m128d __A, __m128d __B, __m128d __C)
+{
+  return (__m128d)__builtin_ia32_vfnmsubpd(__A, __B, __C);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_nmsub_ss(__m128 __A, __m128 __B, __m128 __C)
+{
+  return (__m128)__builtin_ia32_vfnmsubss(__A, __B, __C);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_nmsub_sd(__m128d __A, __m128d __B, __m128d __C)
+{
+  return (__m128d)__builtin_ia32_vfnmsubsd(__A, __B, __C);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_maddsub_ps(__m128 __A, __m128 __B, __m128 __C)
+{
+  return (__m128)__builtin_ia32_vfmaddsubps(__A, __B, __C);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_maddsub_pd(__m128d __A, __m128d __B, __m128d __C)
+{
+  return (__m128d)__builtin_ia32_vfmaddsubpd(__A, __B, __C);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_msubadd_ps(__m128 __A, __m128 __B, __m128 __C)
+{
+  return (__m128)__builtin_ia32_vfmsubaddps(__A, __B, __C);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_msubadd_pd(__m128d __A, __m128d __B, __m128d __C)
+{
+  return (__m128d)__builtin_ia32_vfmsubaddpd(__A, __B, __C);
+}
+
+static __inline__ __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_macc_ps(__m256 __A, __m256 __B, __m256 __C)
+{
+  return (__m256)__builtin_ia32_vfmaddps256(__A, __B, __C);
+}
+
+static __inline__ __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_macc_pd(__m256d __A, __m256d __B, __m256d __C)
+{
+  return (__m256d)__builtin_ia32_vfmaddpd256(__A, __B, __C);
+}
+
+static __inline__ __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_msub_ps(__m256 __A, __m256 __B, __m256 __C)
+{
+  return (__m256)__builtin_ia32_vfmsubps256(__A, __B, __C);
+}
+
+static __inline__ __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_msub_pd(__m256d __A, __m256d __B, __m256d __C)
+{
+  return (__m256d)__builtin_ia32_vfmsubpd256(__A, __B, __C);
+}
+
+static __inline__ __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_nmacc_ps(__m256 __A, __m256 __B, __m256 __C)
+{
+  return (__m256)__builtin_ia32_vfnmaddps256(__A, __B, __C);
+}
+
+static __inline__ __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_nmacc_pd(__m256d __A, __m256d __B, __m256d __C)
+{
+  return (__m256d)__builtin_ia32_vfnmaddpd256(__A, __B, __C);
+}
+
+static __inline__ __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_nmsub_ps(__m256 __A, __m256 __B, __m256 __C)
+{
+  return (__m256)__builtin_ia32_vfnmsubps256(__A, __B, __C);
+}
+
+static __inline__ __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_nmsub_pd(__m256d __A, __m256d __B, __m256d __C)
+{
+  return (__m256d)__builtin_ia32_vfnmsubpd256(__A, __B, __C);
+}
+
+static __inline__ __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_maddsub_ps(__m256 __A, __m256 __B, __m256 __C)
+{
+  return (__m256)__builtin_ia32_vfmaddsubps256(__A, __B, __C);
+}
+
+static __inline__ __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_maddsub_pd(__m256d __A, __m256d __B, __m256d __C)
+{
+  return (__m256d)__builtin_ia32_vfmaddsubpd256(__A, __B, __C);
+}
+
+static __inline__ __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_msubadd_ps(__m256 __A, __m256 __B, __m256 __C)
+{
+  return (__m256)__builtin_ia32_vfmsubaddps256(__A, __B, __C);
+}
+
+static __inline__ __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_msubadd_pd(__m256d __A, __m256d __B, __m256d __C)
+{
+  return (__m256d)__builtin_ia32_vfmsubaddpd256(__A, __B, __C);
+}
+
+#endif /* __FMA4__ */
+
+#endif /* __FMA4INTRIN_H */
diff --git a/renderscript/clang-include/fmaintrin.h b/renderscript/clang-include/fmaintrin.h
new file mode 100644
index 0000000..6bfd5a8
--- /dev/null
+++ b/renderscript/clang-include/fmaintrin.h
@@ -0,0 +1,229 @@
+/*===---- fma4intrin.h - FMA4 intrinsics -----------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <fmaintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __FMAINTRIN_H
+#define __FMAINTRIN_H
+
+#ifndef __FMA__
+# error "FMA instruction set is not enabled"
+#else
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_fmadd_ps(__m128 __A, __m128 __B, __m128 __C)
+{
+  return (__m128)__builtin_ia32_vfmaddps(__A, __B, __C);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_fmadd_pd(__m128d __A, __m128d __B, __m128d __C)
+{
+  return (__m128d)__builtin_ia32_vfmaddpd(__A, __B, __C);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_fmadd_ss(__m128 __A, __m128 __B, __m128 __C)
+{
+  return (__m128)__builtin_ia32_vfmaddss(__A, __B, __C);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_fmadd_sd(__m128d __A, __m128d __B, __m128d __C)
+{
+  return (__m128d)__builtin_ia32_vfmaddsd(__A, __B, __C);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_fmsub_ps(__m128 __A, __m128 __B, __m128 __C)
+{
+  return (__m128)__builtin_ia32_vfmsubps(__A, __B, __C);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_fmsub_pd(__m128d __A, __m128d __B, __m128d __C)
+{
+  return (__m128d)__builtin_ia32_vfmsubpd(__A, __B, __C);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_fmsub_ss(__m128 __A, __m128 __B, __m128 __C)
+{
+  return (__m128)__builtin_ia32_vfmsubss(__A, __B, __C);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_fmsub_sd(__m128d __A, __m128d __B, __m128d __C)
+{
+  return (__m128d)__builtin_ia32_vfmsubsd(__A, __B, __C);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_fnmadd_ps(__m128 __A, __m128 __B, __m128 __C)
+{
+  return (__m128)__builtin_ia32_vfnmaddps(__A, __B, __C);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_fnmadd_pd(__m128d __A, __m128d __B, __m128d __C)
+{
+  return (__m128d)__builtin_ia32_vfnmaddpd(__A, __B, __C);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_fnmadd_ss(__m128 __A, __m128 __B, __m128 __C)
+{
+  return (__m128)__builtin_ia32_vfnmaddss(__A, __B, __C);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_fnmadd_sd(__m128d __A, __m128d __B, __m128d __C)
+{
+  return (__m128d)__builtin_ia32_vfnmaddsd(__A, __B, __C);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_fnmsub_ps(__m128 __A, __m128 __B, __m128 __C)
+{
+  return (__m128)__builtin_ia32_vfnmsubps(__A, __B, __C);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_fnmsub_pd(__m128d __A, __m128d __B, __m128d __C)
+{
+  return (__m128d)__builtin_ia32_vfnmsubpd(__A, __B, __C);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_fnmsub_ss(__m128 __A, __m128 __B, __m128 __C)
+{
+  return (__m128)__builtin_ia32_vfnmsubss(__A, __B, __C);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_fnmsub_sd(__m128d __A, __m128d __B, __m128d __C)
+{
+  return (__m128d)__builtin_ia32_vfnmsubsd(__A, __B, __C);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_fmaddsub_ps(__m128 __A, __m128 __B, __m128 __C)
+{
+  return (__m128)__builtin_ia32_vfmaddsubps(__A, __B, __C);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_fmaddsub_pd(__m128d __A, __m128d __B, __m128d __C)
+{
+  return (__m128d)__builtin_ia32_vfmaddsubpd(__A, __B, __C);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_fmsubadd_ps(__m128 __A, __m128 __B, __m128 __C)
+{
+  return (__m128)__builtin_ia32_vfmsubaddps(__A, __B, __C);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_fmsubadd_pd(__m128d __A, __m128d __B, __m128d __C)
+{
+  return (__m128d)__builtin_ia32_vfmsubaddpd(__A, __B, __C);
+}
+
+static __inline__ __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_fmadd_ps(__m256 __A, __m256 __B, __m256 __C)
+{
+  return (__m256)__builtin_ia32_vfmaddps256(__A, __B, __C);
+}
+
+static __inline__ __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_fmadd_pd(__m256d __A, __m256d __B, __m256d __C)
+{
+  return (__m256d)__builtin_ia32_vfmaddpd256(__A, __B, __C);
+}
+
+static __inline__ __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_fmsub_ps(__m256 __A, __m256 __B, __m256 __C)
+{
+  return (__m256)__builtin_ia32_vfmsubps256(__A, __B, __C);
+}
+
+static __inline__ __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_fmsub_pd(__m256d __A, __m256d __B, __m256d __C)
+{
+  return (__m256d)__builtin_ia32_vfmsubpd256(__A, __B, __C);
+}
+
+static __inline__ __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_fnmadd_ps(__m256 __A, __m256 __B, __m256 __C)
+{
+  return (__m256)__builtin_ia32_vfnmaddps256(__A, __B, __C);
+}
+
+static __inline__ __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_fnmadd_pd(__m256d __A, __m256d __B, __m256d __C)
+{
+  return (__m256d)__builtin_ia32_vfnmaddpd256(__A, __B, __C);
+}
+
+static __inline__ __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_fnmsub_ps(__m256 __A, __m256 __B, __m256 __C)
+{
+  return (__m256)__builtin_ia32_vfnmsubps256(__A, __B, __C);
+}
+
+static __inline__ __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_fnmsub_pd(__m256d __A, __m256d __B, __m256d __C)
+{
+  return (__m256d)__builtin_ia32_vfnmsubpd256(__A, __B, __C);
+}
+
+static __inline__ __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_fmaddsub_ps(__m256 __A, __m256 __B, __m256 __C)
+{
+  return (__m256)__builtin_ia32_vfmaddsubps256(__A, __B, __C);
+}
+
+static __inline__ __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_fmaddsub_pd(__m256d __A, __m256d __B, __m256d __C)
+{
+  return (__m256d)__builtin_ia32_vfmaddsubpd256(__A, __B, __C);
+}
+
+static __inline__ __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_fmsubadd_ps(__m256 __A, __m256 __B, __m256 __C)
+{
+  return (__m256)__builtin_ia32_vfmsubaddps256(__A, __B, __C);
+}
+
+static __inline__ __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_fmsubadd_pd(__m256d __A, __m256d __B, __m256d __C)
+{
+  return (__m256d)__builtin_ia32_vfmsubaddpd256(__A, __B, __C);
+}
+
+#endif /* __FMA__ */
+
+#endif /* __FMAINTRIN_H */
diff --git a/renderscript/clang-include/immintrin.h b/renderscript/clang-include/immintrin.h
index a19deaa..15b65f3 100644
--- a/renderscript/clang-include/immintrin.h
+++ b/renderscript/clang-include/immintrin.h
@@ -48,7 +48,7 @@
 #include <smmintrin.h>
 #endif
 
-#if defined (__AES__) || defined (__PCLMUL__)
+#if defined (__AES__)
 #include <wmmintrin.h>
 #endif
 
@@ -56,4 +56,46 @@
 #include <avxintrin.h>
 #endif
 
+#ifdef __AVX2__
+#include <avx2intrin.h>
+#endif
+
+#ifdef __BMI__
+#include <bmiintrin.h>
+#endif
+
+#ifdef __BMI2__
+#include <bmi2intrin.h>
+#endif
+
+#ifdef __LZCNT__
+#include <lzcntintrin.h>
+#endif
+
+#ifdef __FMA__
+#include <fmaintrin.h>
+#endif
+
+#ifdef __RDRND__
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
+_rdrand16_step(unsigned short *__p)
+{
+  return __builtin_ia32_rdrand16_step(__p);
+}
+
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
+_rdrand32_step(unsigned int *__p)
+{
+  return __builtin_ia32_rdrand32_step(__p);
+}
+
+#ifdef __x86_64__
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
+_rdrand64_step(unsigned long long *__p)
+{
+  return __builtin_ia32_rdrand64_step(__p);
+}
+#endif
+#endif /* __RDRND__ */
+
 #endif /* __IMMINTRIN_H */
diff --git a/renderscript/clang-include/lzcntintrin.h b/renderscript/clang-include/lzcntintrin.h
new file mode 100644
index 0000000..62ab5ca
--- /dev/null
+++ b/renderscript/clang-include/lzcntintrin.h
@@ -0,0 +1,55 @@
+/*===---- lzcntintrin.h - LZCNT intrinsics ---------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#if !defined __X86INTRIN_H && !defined __IMMINTRIN_H
+#error "Never use <lzcntintrin.h> directly; include <x86intrin.h> instead."
+#endif
+
+#ifndef __LZCNT__
+# error "LZCNT instruction is not enabled"
+#endif /* __LZCNT__ */
+
+#ifndef __LZCNTINTRIN_H
+#define __LZCNTINTRIN_H
+
+static __inline__ unsigned short __attribute__((__always_inline__, __nodebug__))
+__lzcnt16(unsigned short __X)
+{
+  return __builtin_clzs(__X);
+}
+
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
+__lzcnt32(unsigned int __X)
+{
+  return __builtin_clz(__X);
+}
+
+#ifdef __x86_64__
+static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
+__lzcnt64(unsigned long long __X)
+{
+  return __builtin_clzll(__X);
+}
+#endif
+
+#endif /* __LZCNTINTRIN_H */
diff --git a/renderscript/clang-include/mm3dnow.h b/renderscript/clang-include/mm3dnow.h
index 2f456ad..d5236f8 100644
--- a/renderscript/clang-include/mm3dnow.h
+++ b/renderscript/clang-include/mm3dnow.h
@@ -105,7 +105,7 @@
 
 static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
 _m_pfrsqrtit1(__m64 __m1, __m64 __m2) {
-  return (__m64)__builtin_ia32_pfrsqrtit1((__v2sf)__m1, (__v2sf)__m2);
+  return (__m64)__builtin_ia32_pfrsqit1((__v2sf)__m1, (__v2sf)__m2);
 }
 
 static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
diff --git a/renderscript/clang-include/module.map b/renderscript/clang-include/module.map
new file mode 100644
index 0000000..418ba50
--- /dev/null
+++ b/renderscript/clang-include/module.map
@@ -0,0 +1,108 @@
+module _Builtin_intrinsics [system] {
+  explicit module altivec {
+    requires altivec
+    header "altivec.h"
+  }
+
+  explicit module intel {
+    requires x86
+    export *
+
+    header "immintrin.h"
+    header "x86intrin.h"
+
+    explicit module mm_malloc {
+      header "mm_malloc.h"
+      export * // note: for <stdlib.h> dependency
+    }
+
+    explicit module cpuid {
+      header "cpuid.h"
+    }
+
+    explicit module mmx {
+      requires mmx
+      header "mmintrin.h"
+    }
+
+    explicit module sse {
+      requires sse
+      export mmx
+      export * // note: for hackish <emmintrin.h> dependency
+      header "xmmintrin.h"
+    }
+
+    explicit module sse2 {
+      requires sse2
+      export sse
+      header "emmintrin.h"
+    }
+
+    explicit module sse3 {
+      requires sse3
+      export sse2
+      header "pmmintrin.h"
+    }
+
+    explicit module ssse3 {
+      requires ssse3
+      export sse3
+      header "tmmintrin.h"
+    }
+
+    explicit module sse4_1 {
+      requires sse41
+      export ssse3
+      header "smmintrin.h"
+    }
+
+    explicit module sse4_2 {
+      requires sse42
+      export sse4_1
+      header "nmmintrin.h"
+    }
+
+    explicit module avx {
+      requires avx
+      export sse4_2
+      header "avxintrin.h"
+    }
+
+    explicit module avx2 {
+      requires avx2
+      export avx
+      header "avx2intrin.h"
+    }
+
+    explicit module bmi {
+      requires bmi
+      header "bmiintrin.h"
+    }
+
+    explicit module bmi2 {
+      requires bmi2
+      header "bmi2intrin.h"
+    }
+
+    explicit module fma4 {
+      requires fma4
+      export sse3
+      header "fma4intrin.h"
+    }
+
+    explicit module lzcnt {
+      requires lzcnt
+      header "lzcntintrin.h"
+    }
+
+    explicit module popcnt {
+      requires popcnt
+      header "popcntintrin.h"
+    }
+
+    explicit module mm3dnow {
+      requires mm3dnow
+      header "mm3dnow.h"
+    }
+  }
+}
diff --git a/renderscript/clang-include/popcntintrin.h b/renderscript/clang-include/popcntintrin.h
new file mode 100644
index 0000000..d439daa
--- /dev/null
+++ b/renderscript/clang-include/popcntintrin.h
@@ -0,0 +1,45 @@
+/*===---- popcntintrin.h - POPCNT intrinsics -------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __POPCNT__
+#error "POPCNT instruction set not enabled"
+#endif
+
+#ifndef _POPCNTINTRIN_H
+#define _POPCNTINTRIN_H
+
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
+_mm_popcnt_u32(unsigned int __A)
+{
+  return __builtin_popcount(__A);
+}
+
+#ifdef __x86_64__
+static __inline__ long long __attribute__((__always_inline__, __nodebug__))
+_mm_popcnt_u64(unsigned long long __A)
+{
+  return __builtin_popcountll(__A);
+}
+#endif /* __x86_64__ */
+
+#endif /* _POPCNTINTRIN_H */
diff --git a/renderscript/clang-include/smmintrin.h b/renderscript/clang-include/smmintrin.h
index 2b8b321..2fab50e 100644
--- a/renderscript/clang-include/smmintrin.h
+++ b/renderscript/clang-include/smmintrin.h
@@ -57,23 +57,34 @@
 #define _mm_floor_ss(X, Y)   _mm_round_ss((X), (Y), _MM_FROUND_FLOOR)
 #define _mm_floor_sd(X, Y)   _mm_round_sd((X), (Y), _MM_FROUND_FLOOR)
 
-#define _mm_round_ps(X, Y)      __builtin_ia32_roundps((X), (Y))
-#define _mm_round_ss(X, Y, M)   __builtin_ia32_roundss((X), (Y), (M))
-#define _mm_round_pd(X, M)      __builtin_ia32_roundpd((X), (M))
-#define _mm_round_sd(X, Y, M)   __builtin_ia32_roundsd((X), (Y), (M))
+#define _mm_round_ps(X, M) __extension__ ({ \
+  __m128 __X = (X); \
+  (__m128) __builtin_ia32_roundps((__v4sf)__X, (M)); })
+
+#define _mm_round_ss(X, Y, M) __extension__ ({ \
+  __m128 __X = (X); \
+  __m128 __Y = (Y); \
+  (__m128) __builtin_ia32_roundss((__v4sf)__X, (__v4sf)__Y, (M)); })
+
+#define _mm_round_pd(X, M) __extension__ ({ \
+  __m128d __X = (X); \
+  (__m128d) __builtin_ia32_roundpd((__v2df)__X, (M)); })
+
+#define _mm_round_sd(X, Y, M) __extension__ ({ \
+  __m128d __X = (X); \
+  __m128d __Y = (Y); \
+  (__m128d) __builtin_ia32_roundsd((__v2df)__X, (__v2df)__Y, (M)); })
 
 /* SSE4 Packed Blending Intrinsics.  */
-static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
-_mm_blend_pd (__m128d __V1, __m128d __V2, const int __M)
-{
-  return (__m128d) __builtin_ia32_blendpd ((__v2df)__V1, (__v2df)__V2, __M);
-}
+#define _mm_blend_pd(V1, V2, M) __extension__ ({ \
+  __m128d __V1 = (V1); \
+  __m128d __V2 = (V2); \
+  (__m128d) __builtin_ia32_blendpd ((__v2df)__V1, (__v2df)__V2, (M)); })
 
-static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
-_mm_blend_ps (__m128 __V1, __m128 __V2, const int __M)
-{
-  return (__m128) __builtin_ia32_blendps ((__v4sf)__V1, (__v4sf)__V2, __M);
-}
+#define _mm_blend_ps(V1, V2, M) __extension__ ({ \
+  __m128 __V1 = (V1); \
+  __m128 __V2 = (V2); \
+  (__m128) __builtin_ia32_blendps ((__v4sf)__V1, (__v4sf)__V2, (M)); })
 
 static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
 _mm_blendv_pd (__m128d __V1, __m128d __V2, __m128d __M)
@@ -96,11 +107,10 @@
                                                (__v16qi)__M);
 }
 
-static __inline__  __m128i __attribute__((__always_inline__, __nodebug__))
-_mm_blend_epi16 (__m128i __V1, __m128i __V2, const int __M)
-{
-  return (__m128i) __builtin_ia32_pblendw128 ((__v8hi)__V1, (__v8hi)__V2, __M);
-}
+#define _mm_blend_epi16(V1, V2, M) __extension__ ({ \
+  __m128i __V1 = (V1); \
+  __m128i __V2 = (V2); \
+  (__m128i) __builtin_ia32_pblendw128 ((__v8hi)__V1, (__v8hi)__V2, (M)); })
 
 /* SSE4 Dword Multiply Instructions.  */
 static __inline__  __m128i __attribute__((__always_inline__, __nodebug__))
@@ -116,8 +126,15 @@
 }
 
 /* SSE4 Floating Point Dot Product Instructions.  */
-#define _mm_dp_ps(X, Y, M) __builtin_ia32_dpps ((X), (Y), (M))
-#define _mm_dp_pd(X, Y, M) __builtin_ia32_dppd ((X), (Y), (M))
+#define _mm_dp_ps(X, Y, M) __extension__ ({ \
+  __m128 __X = (X); \
+  __m128 __Y = (Y); \
+  (__m128) __builtin_ia32_dpps((__v4sf)__X, (__v4sf)__Y, (M)); })
+
+#define _mm_dp_pd(X, Y, M) __extension__ ({\
+  __m128d __X = (X); \
+  __m128d __Y = (Y); \
+  (__m128d) __builtin_ia32_dppd((__v2df)__X, (__v2df)__Y, (M)); })
 
 /* SSE4 Streaming Load Hint Instruction.  */
 static __inline__  __m128i __attribute__((__always_inline__, __nodebug__))
@@ -198,14 +215,14 @@
                                              
 /* Insert int into packed integer array at index.  */
 #define _mm_insert_epi8(X, I, N) (__extension__ ({ __v16qi __a = (__v16qi)(X); \
-                                                   __a[N] = I;               \
+                                                   __a[(N)] = (I);             \
                                                    __a;}))
 #define _mm_insert_epi32(X, I, N) (__extension__ ({ __v4si __a = (__v4si)(X); \
-                                                    __a[N] = I;             \
+                                                    __a[(N)] = (I);           \
                                                     __a;}))
 #ifdef __x86_64__
 #define _mm_insert_epi64(X, I, N) (__extension__ ({ __v2di __a = (__v2di)(X); \
-                                                    __a[N] = I;             \
+                                                    __a[(N)] = (I);           \
                                                     __a;}))
 #endif /* __x86_64__ */
 
@@ -213,12 +230,12 @@
  * as a zero extended value, so it is unsigned.
  */
 #define _mm_extract_epi8(X, N) (__extension__ ({ __v16qi __a = (__v16qi)(X); \
-                                                 (unsigned char)__a[N];}))
+                                                 (unsigned char)__a[(N)];}))
 #define _mm_extract_epi32(X, N) (__extension__ ({ __v4si __a = (__v4si)(X); \
-                                                  (unsigned)__a[N];}))
+                                                  (unsigned)__a[(N)];}))
 #ifdef __x86_64__
 #define _mm_extract_epi64(X, N) (__extension__ ({ __v2di __a = (__v2di)(X); \
-                                                  __a[N];}))
+                                                  __a[(N)];}))
 #endif /* __x86_64 */
 
 /* SSE4 128-bit Packed Integer Comparisons.  */
@@ -242,13 +259,13 @@
 
 #define _mm_test_all_ones(V) _mm_testc_si128((V), _mm_cmpeq_epi32((V), (V)))
 #define _mm_test_mix_ones_zeros(M, V) _mm_testnzc_si128((M), (V))
-#define _mm_test_all_zeros(M, V) _mm_testz_si128 ((V), (V))
+#define _mm_test_all_zeros(M, V) _mm_testz_si128 ((M), (V))
 
 /* SSE4 64-bit Packed Integer Comparisons.  */
 static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
 _mm_cmpeq_epi64(__m128i __V1, __m128i __V2)
 {
-  return (__m128i) __builtin_ia32_pcmpeqq((__v2di)__V1, (__v2di)__V2);
+  return (__m128i)((__v2di)__V1 == (__v2di)__V2);
 }
 
 /* SSE4 Packed Integer Sign-Extension.  */
@@ -333,7 +350,16 @@
 }
 
 /* SSE4 Multiple Packed Sums of Absolute Difference.  */
-#define _mm_mpsadbw_epu8(X, Y, M) __builtin_ia32_mpsadbw128((X), (Y), (M))
+#define _mm_mpsadbw_epu8(X, Y, M) __extension__ ({ \
+  __m128i __X = (X); \
+  __m128i __Y = (Y); \
+  (__m128i) __builtin_ia32_mpsadbw128((__v16qi)__X, (__v16qi)__Y, (M)); })
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_minpos_epu16(__m128i __V)
+{
+  return (__m128i) __builtin_ia32_phminposuw128((__v8hi)__V);
+}
 
 /* These definitions are normally in nmmintrin.h, but gcc puts them in here
    so we'll do the same.  */
@@ -371,20 +397,20 @@
 
 #define _mm_cmpestrm(A, LA, B, LB, M) \
      __builtin_ia32_pcmpestrm128((A), (LA), (B), (LB), (M))
-#define _mm_cmpestri(X, LX, Y, LY, M) \
+#define _mm_cmpestri(A, LA, B, LB, M) \
      __builtin_ia32_pcmpestri128((A), (LA), (B), (LB), (M))
      
 /* SSE4.2 Packed Comparison Intrinsics and EFlag Reading.  */
-#define _mm_cmpistra(A, LA, B, LB, M) \
-     __builtin_ia32_pcmpistria128((A), (LA), (B), (LB), (M))
-#define _mm_cmpistrc(A, LA, B, LB, M) \
-     __builtin_ia32_pcmpistric128((A), (LA), (B), (LB), (M))
-#define _mm_cmpistro(A, LA, B, LB, M) \
-     __builtin_ia32_pcmpistrio128((A), (LA), (B), (LB), (M))
-#define _mm_cmpistrs(A, LA, B, LB, M) \
-     __builtin_ia32_pcmpistris128((A), (LA), (B), (LB), (M))
-#define _mm_cmpistrz(A, LA, B, LB, M) \
-     __builtin_ia32_pcmpistriz128((A), (LA), (B), (LB), (M))
+#define _mm_cmpistra(A, B, M) \
+     __builtin_ia32_pcmpistria128((A), (B), (M))
+#define _mm_cmpistrc(A, B, M) \
+     __builtin_ia32_pcmpistric128((A), (B), (M))
+#define _mm_cmpistro(A, B, M) \
+     __builtin_ia32_pcmpistrio128((A), (B), (M))
+#define _mm_cmpistrs(A, B, M) \
+     __builtin_ia32_pcmpistris128((A), (B), (M))
+#define _mm_cmpistrz(A, B, M) \
+     __builtin_ia32_pcmpistriz128((A), (B), (M))
 
 #define _mm_cmpestra(A, LA, B, LB, M) \
      __builtin_ia32_pcmpestria128((A), (LA), (B), (LB), (M))
@@ -401,7 +427,7 @@
 static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
 _mm_cmpgt_epi64(__m128i __V1, __m128i __V2)
 {
-  return __builtin_ia32_pcmpgtq((__v2di)__V1, (__v2di)__V2);
+  return (__m128i)((__v2di)__V1 > (__v2di)__V2);
 }
 
 /* SSE4.2 Accumulate CRC32.  */
@@ -431,20 +457,9 @@
 }
 #endif /* __x86_64__ */
 
-/* SSE4.2 Population Count.  */
-static __inline__ int __attribute__((__always_inline__, __nodebug__))
-_mm_popcnt_u32(unsigned int __A)
-{
-  return __builtin_popcount(__A);
-}
-
-#ifdef __x86_64__
-static __inline__ long long __attribute__((__always_inline__, __nodebug__))
-_mm_popcnt_u64(unsigned long long __A)
-{
-  return __builtin_popcountll(__A);
-}
-#endif /* __x86_64__ */
+#ifdef __POPCNT__
+#include <popcntintrin.h>
+#endif
 
 #endif /* __SSE4_2__ */
 #endif /* __SSE4_1__ */
diff --git a/renderscript/clang-include/stddef.h b/renderscript/clang-include/stddef.h
index 9e87ee8..eb919b5 100644
--- a/renderscript/clang-include/stddef.h
+++ b/renderscript/clang-include/stddef.h
@@ -43,10 +43,20 @@
 
 #undef NULL
 #ifdef __cplusplus
-#undef __null  // VC++ hack.
-#define NULL __null
+#  if !defined(__MINGW32__) && !defined(_MSC_VER)
+#    define NULL __null
+#  else
+#    define NULL 0
+#  endif
 #else
-#define NULL ((void*)0)
+#  define NULL ((void*)0)
+#endif
+
+#ifdef __cplusplus
+#if defined(_MSC_EXTENSIONS) && defined(_NATIVE_NULLPTR_SUPPORTED)
+namespace std { typedef decltype(nullptr) nullptr_t; }
+using ::std::nullptr_t;
+#endif
 #endif
 
 #define offsetof(t, d) __builtin_offsetof(t, d)
diff --git a/renderscript/clang-include/tgmath.h b/renderscript/clang-include/tgmath.h
index 1b0b9d2..4fa1cf7 100644
--- a/renderscript/clang-include/tgmath.h
+++ b/renderscript/clang-include/tgmath.h
@@ -540,15 +540,15 @@
     _TG_ATTRS
     __tg_fabs(long double __x) {return fabsl(__x);}
 
-static float _Complex
+static float
     _TG_ATTRS
     __tg_fabs(float _Complex __x) {return cabsf(__x);}
 
-static double _Complex
+static double
     _TG_ATTRS
     __tg_fabs(double _Complex __x) {return cabs(__x);}
 
-static long double _Complex
+static long double
     _TG_ATTRS
     __tg_fabs(long double _Complex __x) {return cabsl(__x);}
 
@@ -976,6 +976,23 @@
 #undef log2
 #define log2(__x) __tg_log2(__tg_promote1((__x))(__x))
 
+// logb
+
+static float
+    _TG_ATTRS
+    __tg_logb(float __x) {return logbf(__x);}
+
+static double
+    _TG_ATTRS
+    __tg_logb(double __x) {return logb(__x);}
+
+static long double
+    _TG_ATTRS
+    __tg_logb(long double __x) {return logbl(__x);}
+
+#undef logb
+#define logb(__x) __tg_logb(__tg_promote1((__x))(__x))
+
 // lrint
 
 static long
diff --git a/renderscript/clang-include/tmmintrin.h b/renderscript/clang-include/tmmintrin.h
index 07fea1c..a62c6cc 100644
--- a/renderscript/clang-include/tmmintrin.h
+++ b/renderscript/clang-include/tmmintrin.h
@@ -66,8 +66,15 @@
     return (__m128i)__builtin_ia32_pabsd128((__v4si)a);
 }
 
-#define _mm_alignr_epi8(a, b, n) (__builtin_ia32_palignr128((a), (b), (n)))
-#define _mm_alignr_pi8(a, b, n) (__builtin_ia32_palignr((a), (b), (n)))
+#define _mm_alignr_epi8(a, b, n) __extension__ ({ \
+  __m128i __a = (a); \
+  __m128i __b = (b); \
+  (__m128i)__builtin_ia32_palignr128((__v16qi)__a, (__v16qi)__b, (n)); })
+
+#define _mm_alignr_pi8(a, b, n) __extension__ ({ \
+  __m64 __a = (a); \
+  __m64 __b = (b); \
+  (__m64)__builtin_ia32_palignr((__v8qi)__a, (__v8qi)__b, (n)); })
 
 static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
 _mm_hadd_epi16(__m128i a, __m128i b)
diff --git a/renderscript/clang-include/unwind.h b/renderscript/clang-include/unwind.h
new file mode 100644
index 0000000..a065920
--- /dev/null
+++ b/renderscript/clang-include/unwind.h
@@ -0,0 +1,124 @@
+/*===---- unwind.h - Stack unwinding ----------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+/* See "Data Definitions for libgcc_s" in the Linux Standard Base.*/
+
+#if __has_include_next(<unwind.h>)
+/* Darwin and libunwind provide an unwind.h. If that's available, use
+ * it. libunwind wraps some of its definitions in #ifdef _GNU_SOURCE,
+ * so define that around the include.*/
+# ifndef _GNU_SOURCE
+#  define _SHOULD_UNDEFINE_GNU_SOURCE
+#  define _GNU_SOURCE
+# endif
+// libunwind's unwind.h reflects the current visibility.  However, Mozilla
+// builds with -fvisibility=hidden and relies on gcc's unwind.h to reset the
+// visibility to default and export its contents.  gcc also allows users to
+// override its override by #defining HIDE_EXPORTS (but note, this only obeys
+// the user's -fvisibility setting; it doesn't hide any exports on its own).  We
+// imitate gcc's header here:
+# ifdef HIDE_EXPORTS
+#  include_next <unwind.h>
+# else
+#  pragma GCC visibility push(default)
+#  include_next <unwind.h>
+#  pragma GCC visibility pop
+# endif
+# ifdef _SHOULD_UNDEFINE_GNU_SOURCE
+#  undef _GNU_SOURCE
+#  undef _SHOULD_UNDEFINE_GNU_SOURCE
+# endif
+#else
+
+#include <stdint.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* It is a bit strange for a header to play with the visibility of the
+   symbols it declares, but this matches gcc's behavior and some programs
+   depend on it */
+#pragma GCC visibility push(default)
+
+struct _Unwind_Context;
+typedef enum {
+  _URC_NO_REASON = 0,
+  _URC_FOREIGN_EXCEPTION_CAUGHT = 1,
+
+  _URC_FATAL_PHASE2_ERROR = 2,
+  _URC_FATAL_PHASE1_ERROR = 3,
+  _URC_NORMAL_STOP = 4,
+
+  _URC_END_OF_STACK = 5,
+  _URC_HANDLER_FOUND = 6,
+  _URC_INSTALL_CONTEXT = 7,
+  _URC_CONTINUE_UNWIND = 8
+} _Unwind_Reason_Code;
+
+
+#ifdef __arm__
+
+typedef enum { 
+  _UVRSC_CORE = 0,        /* integer register */ 
+  _UVRSC_VFP = 1,         /* vfp */ 
+  _UVRSC_WMMXD = 3,       /* Intel WMMX data register */ 
+  _UVRSC_WMMXC = 4        /* Intel WMMX control register */ 
+} _Unwind_VRS_RegClass; 
+
+typedef enum { 
+  _UVRSD_UINT32 = 0,  
+  _UVRSD_VFPX = 1,  
+  _UVRSD_UINT64 = 3,  
+  _UVRSD_FLOAT = 4,  
+  _UVRSD_DOUBLE = 5 
+} _Unwind_VRS_DataRepresentation; 
+
+typedef enum { 
+  _UVRSR_OK = 0,  
+  _UVRSR_NOT_IMPLEMENTED = 1,  
+  _UVRSR_FAILED = 2  
+} _Unwind_VRS_Result; 
+
+_Unwind_VRS_Result _Unwind_VRS_Get(_Unwind_Context *context,
+  _Unwind_VRS_RegClass regclass,
+  uint32_t regno,
+  _Unwind_VRS_DataRepresentation representation,
+  void *valuep);
+
+#else
+
+uintptr_t _Unwind_GetIP(struct _Unwind_Context* context);
+
+#endif
+
+typedef _Unwind_Reason_Code (*_Unwind_Trace_Fn)(struct _Unwind_Context*, void*);
+_Unwind_Reason_Code _Unwind_Backtrace(_Unwind_Trace_Fn, void*);
+
+#pragma GCC visibility pop
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/renderscript/clang-include/wmmintrin.h b/renderscript/clang-include/wmmintrin.h
index 6b2e468..dca896f 100644
--- a/renderscript/clang-include/wmmintrin.h
+++ b/renderscript/clang-include/wmmintrin.h
@@ -24,11 +24,13 @@
 #ifndef _WMMINTRIN_H
 #define _WMMINTRIN_H
 
-#if !defined (__AES__)
-# error "AES instructions not enabled"
+#include <emmintrin.h>
+
+#if !defined (__AES__) && !defined (__PCLMUL__)
+# error "AES/PCLMUL instructions not enabled"
 #else
 
-#include <smmintrin.h>
+#ifdef __AES__
 
 static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
 _mm_aesenc_si128(__m128i __V, __m128i __R)
@@ -64,4 +66,14 @@
   __builtin_ia32_aeskeygenassist128((C), (R))
 
 #endif /* __AES__ */
+
+#ifdef __PCLMUL__
+
+#define _mm_clmulepi64_si128(__X, __Y, __I) \
+  ((__m128i)__builtin_ia32_pclmulqdq128((__v2di)(__m128i)(__X), \
+                                        (__v2di)(__m128i)(__Y), (char)(__I)))
+
+#endif /* __PCLMUL__ */
+
+#endif /* __AES__ || __PCLMUL__ */
 #endif /* _WMMINTRIN_H */
diff --git a/renderscript/clang-include/x86intrin.h b/renderscript/clang-include/x86intrin.h
index e5e7a6a..556cd01 100644
--- a/renderscript/clang-include/x86intrin.h
+++ b/renderscript/clang-include/x86intrin.h
@@ -26,6 +26,38 @@
 
 #include <immintrin.h>
 
-// FIXME: SSE4A, 3dNOW, FMA4, XOP, LWP, ABM, POPCNT
+#ifdef __3dNOW__
+#include <mm3dnow.h>
+#endif
+
+#ifdef __BMI__
+#include <bmiintrin.h>
+#endif
+
+#ifdef __BMI2__
+#include <bmi2intrin.h>
+#endif
+
+#ifdef __LZCNT__
+#include <lzcntintrin.h>
+#endif
+
+#ifdef __POPCNT__
+#include <popcntintrin.h>
+#endif
+
+#ifdef __SSE4A__
+#include <ammintrin.h>
+#endif
+
+#ifdef __FMA4__
+#include <fma4intrin.h>
+#endif
+
+#ifdef __XOP__
+#include <xopintrin.h>
+#endif
+
+// FIXME: LWP
 
 #endif /* __X86INTRIN_H */
diff --git a/renderscript/clang-include/xmmintrin.h b/renderscript/clang-include/xmmintrin.h
index a0bc0bb..e616157 100644
--- a/renderscript/clang-include/xmmintrin.h
+++ b/renderscript/clang-include/xmmintrin.h
@@ -664,7 +664,7 @@
 /* FIXME: We have to #define this because "sel" must be a constant integer, and
    Sema doesn't do any form of constant propagation yet. */
 
-#define _mm_prefetch(a, sel) (__builtin_prefetch((void *)(a), 0, sel))
+#define _mm_prefetch(a, sel) (__builtin_prefetch((void *)(a), 0, (sel)))
 
 static __inline__ void __attribute__((__always_inline__, __nodebug__))
 _mm_stream_pi(__m64 *p, __m64 a)
@@ -735,8 +735,9 @@
   return (__m64)__builtin_ia32_pmulhuw((__v4hi)a, (__v4hi)b);  
 }
 
-#define _mm_shuffle_pi16(a, n) \
-  ((__m64)__builtin_ia32_pshufw(a, n))
+#define _mm_shuffle_pi16(a, n) __extension__ ({ \
+  __m64 __a = (a); \
+  (__m64)__builtin_ia32_pshufw((__v4hi)__a, (n)); })
 
 static __inline__ void __attribute__((__always_inline__, __nodebug__))
 _mm_maskmove_si64(__m64 d, __m64 n, char *p)
@@ -774,11 +775,13 @@
   __builtin_ia32_ldmxcsr(i);
 }
 
-#define _mm_shuffle_ps(a, b, mask) \
-        (__builtin_shufflevector((__v4sf)(a), (__v4sf)(b),                \
-                                 (mask) & 0x3, ((mask) & 0xc) >> 2, \
-                                 (((mask) & 0x30) >> 4) + 4, \
-                                 (((mask) & 0xc0) >> 6) + 4))
+#define _mm_shuffle_ps(a, b, mask) __extension__ ({ \
+  __m128 __a = (a); \
+  __m128 __b = (b); \
+  (__m128)__builtin_shufflevector((__v4sf)__a, (__v4sf)__b, \
+                                  (mask) & 0x3, ((mask) & 0xc) >> 2, \
+                                  (((mask) & 0x30) >> 4) + 4, \
+                                  (((mask) & 0xc0) >> 6) + 4); })
 
 static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
 _mm_unpackhi_ps(__m128 a, __m128 b)
@@ -935,7 +938,7 @@
 
 #define _MM_FLUSH_ZERO_MASK   (0x8000)
 #define _MM_FLUSH_ZERO_ON     (0x8000)
-#define _MM_FLUSH_ZERO_OFF    (0x8000)
+#define _MM_FLUSH_ZERO_OFF    (0x0000)
 
 #define _MM_GET_EXCEPTION_MASK() (_mm_getcsr() & _MM_MASK_MASK)
 #define _MM_GET_EXCEPTION_STATE() (_mm_getcsr() & _MM_EXCEPT_MASK)
diff --git a/renderscript/clang-include/xopintrin.h b/renderscript/clang-include/xopintrin.h
new file mode 100644
index 0000000..d107be4
--- /dev/null
+++ b/renderscript/clang-include/xopintrin.h
@@ -0,0 +1,411 @@
+/*===---- xopintrin.h - FMA4 intrinsics ------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __X86INTRIN_H
+#error "Never use <fma4intrin.h> directly; include <x86intrin.h> instead."
+#endif
+
+#ifndef __XOPINTRIN_H
+#define __XOPINTRIN_H
+
+#ifndef __XOP__
+# error "XOP instruction set is not enabled"
+#else
+
+#include <fma4intrin.h>
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_maccs_epi16(__m128i __A, __m128i __B, __m128i __C)
+{
+  return (__m128i)__builtin_ia32_vpmacssww((__v8hi)__A, (__v8hi)__B, (__v8hi)__C);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_macc_epi16(__m128i __A, __m128i __B, __m128i __C)
+{
+  return (__m128i)__builtin_ia32_vpmacsww((__v8hi)__A, (__v8hi)__B, (__v8hi)__C);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_maccsd_epi16(__m128i __A, __m128i __B, __m128i __C)
+{
+  return (__m128i)__builtin_ia32_vpmacsswd((__v8hi)__A, (__v8hi)__B, (__v4si)__C);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_maccd_epi16(__m128i __A, __m128i __B, __m128i __C)
+{
+  return (__m128i)__builtin_ia32_vpmacswd((__v8hi)__A, (__v8hi)__B, (__v4si)__C);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_maccs_epi32(__m128i __A, __m128i __B, __m128i __C)
+{
+  return (__m128i)__builtin_ia32_vpmacssdd((__v4si)__A, (__v4si)__B, (__v4si)__C);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_macc_epi32(__m128i __A, __m128i __B, __m128i __C)
+{
+  return (__m128i)__builtin_ia32_vpmacsdd((__v4si)__A, (__v4si)__B, (__v4si)__C);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_maccslo_epi32(__m128i __A, __m128i __B, __m128i __C)
+{
+  return (__m128i)__builtin_ia32_vpmacssdql((__v4si)__A, (__v4si)__B, (__v2di)__C);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_macclo_epi32(__m128i __A, __m128i __B, __m128i __C)
+{
+  return (__m128i)__builtin_ia32_vpmacsdql((__v4si)__A, (__v4si)__B, (__v2di)__C);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_maccshi_epi32(__m128i __A, __m128i __B, __m128i __C)
+{
+  return (__m128i)__builtin_ia32_vpmacssdqh((__v4si)__A, (__v4si)__B, (__v2di)__C);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_macchi_epi32(__m128i __A, __m128i __B, __m128i __C)
+{
+  return (__m128i)__builtin_ia32_vpmacsdqh((__v4si)__A, (__v4si)__B, (__v2di)__C);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_maddsd_epi16(__m128i __A, __m128i __B, __m128i __C)
+{
+  return (__m128i)__builtin_ia32_vpmadcsswd((__v8hi)__A, (__v8hi)__B, (__v4si)__C);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_maddd_epi16(__m128i __A, __m128i __B, __m128i __C)
+{
+  return (__m128i)__builtin_ia32_vpmadcswd((__v8hi)__A, (__v8hi)__B, (__v4si)__C);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_haddw_epi8(__m128i __A)
+{
+  return (__m128i)__builtin_ia32_vphaddbw((__v16qi)__A);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_haddd_epi8(__m128i __A)
+{
+  return (__m128i)__builtin_ia32_vphaddbd((__v16qi)__A);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_haddq_epi8(__m128i __A)
+{
+  return (__m128i)__builtin_ia32_vphaddbq((__v16qi)__A);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_haddd_epi16(__m128i __A)
+{
+  return (__m128i)__builtin_ia32_vphaddwd((__v8hi)__A);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_haddq_epi16(__m128i __A)
+{
+  return (__m128i)__builtin_ia32_vphaddwq((__v8hi)__A);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_haddq_epi32(__m128i __A)
+{
+  return (__m128i)__builtin_ia32_vphadddq((__v4si)__A);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_haddw_epu8(__m128i __A)
+{
+  return (__m128i)__builtin_ia32_vphaddubw((__v16qi)__A);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_haddd_epu8(__m128i __A)
+{
+  return (__m128i)__builtin_ia32_vphaddubd((__v16qi)__A);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_haddq_epu8(__m128i __A)
+{
+  return (__m128i)__builtin_ia32_vphaddubq((__v16qi)__A);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_haddd_epu16(__m128i __A)
+{
+  return (__m128i)__builtin_ia32_vphadduwd((__v8hi)__A);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_haddq_epu16(__m128i __A)
+{
+  return (__m128i)__builtin_ia32_vphadduwq((__v8hi)__A);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_haddq_epu32(__m128i __A)
+{
+  return (__m128i)__builtin_ia32_vphaddudq((__v4si)__A);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_hsubw_epi8(__m128i __A)
+{
+  return (__m128i)__builtin_ia32_vphsubbw((__v16qi)__A);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_hsubd_epi16(__m128i __A)
+{
+  return (__m128i)__builtin_ia32_vphsubwd((__v8hi)__A);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_hsubq_epi32(__m128i __A)
+{
+  return (__m128i)__builtin_ia32_vphsubdq((__v4si)__A);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_cmov_si128(__m128i __A, __m128i __B, __m128i __C)
+{
+  return (__m128i)__builtin_ia32_vpcmov(__A, __B, __C);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_cmov_si256(__m256i __A, __m256i __B, __m256i __C)
+{
+  return (__m256i)__builtin_ia32_vpcmov_256(__A, __B, __C);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_perm_epi8(__m128i __A, __m128i __B, __m128i __C)
+{
+  return (__m128i)__builtin_ia32_vpperm((__v16qi)__A, (__v16qi)__B, (__v16qi)__C);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_rot_epi8(__m128i __A, __m128i __B)
+{
+  return (__m128i)__builtin_ia32_vprotb((__v16qi)__A, (__v16qi)__B);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_rot_epi16(__m128i __A, __m128i __B)
+{
+  return (__m128i)__builtin_ia32_vprotw((__v8hi)__A, (__v8hi)__B);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_rot_epi32(__m128i __A, __m128i __B)
+{
+  return (__m128i)__builtin_ia32_vprotd((__v4si)__A, (__v4si)__B);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_rot_epi64(__m128i __A, __m128i __B)
+{
+  return (__m128i)__builtin_ia32_vprotq((__v2di)__A, (__v2di)__B);
+}
+
+#define _mm_roti_epi8(A, N) __extension__ ({ \
+  __m128i __A = (A); \
+  (__m128i)__builtin_ia32_vprotbi((__v16qi)__A, (N)); })
+
+#define _mm_roti_epi16(A, N) __extension__ ({ \
+  __m128i __A = (A); \
+  (__m128i)__builtin_ia32_vprotwi((__v8hi)__A, (N)); })
+
+#define _mm_roti_epi32(A, N) __extension__ ({ \
+  __m128i __A = (A); \
+  (__m128i)__builtin_ia32_vprotdi((__v4si)__A, (N)); })
+
+#define _mm_roti_epi64(A, N) __extension__ ({ \
+  __m128i __A = (A); \
+  (__m128i)__builtin_ia32_vprotqi((__v2di)__A, (N)); })
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_shl_epi8(__m128i __A, __m128i __B)
+{
+  return (__m128i)__builtin_ia32_vpshlb((__v16qi)__A, (__v16qi)__B);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_shl_epi16(__m128i __A, __m128i __B)
+{
+  return (__m128i)__builtin_ia32_vpshlw((__v8hi)__A, (__v8hi)__B);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_shl_epi32(__m128i __A, __m128i __B)
+{
+  return (__m128i)__builtin_ia32_vpshld((__v4si)__A, (__v4si)__B);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_shl_epi64(__m128i __A, __m128i __B)
+{
+  return (__m128i)__builtin_ia32_vpshlq((__v2di)__A, (__v2di)__B);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_sha_epi8(__m128i __A, __m128i __B)
+{
+  return (__m128i)__builtin_ia32_vpshab((__v16qi)__A, (__v16qi)__B);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_sha_epi16(__m128i __A, __m128i __B)
+{
+  return (__m128i)__builtin_ia32_vpshaw((__v8hi)__A, (__v8hi)__B);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_sha_epi32(__m128i __A, __m128i __B)
+{
+  return (__m128i)__builtin_ia32_vpshad((__v4si)__A, (__v4si)__B);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_sha_epi64(__m128i __A, __m128i __B)
+{
+  return (__m128i)__builtin_ia32_vpshaq((__v2di)__A, (__v2di)__B);
+}
+
+#define _mm_com_epu8(A, B, N) __extension__ ({ \
+  __m128i __A = (A); \
+  __m128i __B = (B); \
+  (__m128i)__builtin_ia32_vpcomub((__v16qi)__A, (__v16qi)__B, (N)); })
+
+#define _mm_com_epu16(A, B, N) __extension__ ({ \
+  __m128i __A = (A); \
+  __m128i __B = (B); \
+  (__m128i)__builtin_ia32_vpcomuw((__v8hi)__A, (__v8hi)__B, (N)); })
+
+#define _mm_com_epu32(A, B, N) __extension__ ({ \
+  __m128i __A = (A); \
+  __m128i __B = (B); \
+  (__m128i)__builtin_ia32_vpcomud((__v4si)__A, (__v4si)__B, (N)); })
+
+#define _mm_com_epu64(A, B, N) __extension__ ({ \
+  __m128i __A = (A); \
+  __m128i __B = (B); \
+  (__m128i)__builtin_ia32_vpcomuq((__v2di)__A, (__v2di)__B, (N)); })
+
+#define _mm_com_epi8(A, B, N) __extension__ ({ \
+  __m128i __A = (A); \
+  __m128i __B = (B); \
+  (__m128i)__builtin_ia32_vpcomb((__v16qi)__A, (__v16qi)__B, (N)); })
+
+#define _mm_com_epi16(A, B, N) __extension__ ({ \
+  __m128i __A = (A); \
+  __m128i __B = (B); \
+  (__m128i)__builtin_ia32_vpcomw((__v8hi)__A, (__v8hi)__B, (N)); })
+
+#define _mm_com_epi32(A, B, N) __extension__ ({ \
+  __m128i __A = (A); \
+  __m128i __B = (B); \
+  (__m128i)__builtin_ia32_vpcomd((__v4si)__A, (__v4si)__B, (N)); })
+
+#define _mm_com_epi64(A, B, N) __extension__ ({ \
+  __m128i __A = (A); \
+  __m128i __B = (B); \
+  (__m128i)__builtin_ia32_vpcomq((__v2di)__A, (__v2di)__B, (N)); })
+
+#define _mm_permute2_pd(X, Y, C, I) __extension__ ({ \
+  __m128d __X = (X); \
+  __m128d __Y = (Y); \
+  __m128i __C = (C); \
+  (__m128d)__builtin_ia32_vpermil2pd((__v2df)__X, (__v2df)__Y, \
+                                     (__v2di)__C, (I)); })
+
+#define _mm256_permute2_pd(X, Y, C, I) __extension__ ({ \
+  __m256d __X = (X); \
+  __m256d __Y = (Y); \
+  __m256i __C = (C); \
+  (__m256d)__builtin_ia32_vpermil2pd256((__v4df)__X, (__v4df)__Y, \
+                                        (__v4di)__C, (I)); })
+
+#define _mm_permute2_ps(X, Y, C, I) __extension__ ({ \
+  __m128 __X = (X); \
+  __m128 __Y = (Y); \
+  __m128i __C = (C); \
+  (__m128)__builtin_ia32_vpermil2ps((__v4sf)__X, (__v4sf)__Y, \
+                                    (__v4si)__C, (I)); })
+
+#define _mm256_permute2_ps(X, Y, C, I) __extension__ ({ \
+  __m256 __X = (X); \
+  __m256 __Y = (Y); \
+  __m256i __C = (C); \
+  (__m256)__builtin_ia32_vpermil2ps256((__v8sf)__X, (__v8sf)__Y, \
+                                       (__v8si)__C, (I)); })
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_frcz_ss(__m128 __A)
+{
+  return (__m128)__builtin_ia32_vfrczss((__v4sf)__A);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_frcz_sd(__m128d __A)
+{
+  return (__m128d)__builtin_ia32_vfrczsd((__v2df)__A);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_frcz_ps(__m128 __A)
+{
+  return (__m128)__builtin_ia32_vfrczps((__v4sf)__A);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_frcz_pd(__m128d __A)
+{
+  return (__m128d)__builtin_ia32_vfrczpd((__v2df)__A);
+}
+
+static __inline__ __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_frcz_ps(__m256 __A)
+{
+  return (__m256)__builtin_ia32_vfrczps256((__v8sf)__A);
+}
+
+static __inline__ __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_frcz_pd(__m256d __A)
+{
+  return (__m256d)__builtin_ia32_vfrczpd256((__v4df)__A);
+}
+
+#endif /* __XOP__ */
+
+#endif /* __XOPINTRIN_H */
diff --git a/renderscript/include/rs_allocation.rsh b/renderscript/include/rs_allocation.rsh
index 154a099..46ff3cf 100644
--- a/renderscript/include/rs_allocation.rsh
+++ b/renderscript/include/rs_allocation.rsh
@@ -27,6 +27,9 @@
  * Returns the Allocation for a given pointer.  The pointer should point within
  * a valid allocation.  The results are undefined if the pointer is not from a
  * valid allocation.
+ *
+ * This function is deprecated and will be removed in the SDK from a future
+ * release.
  */
 extern rs_allocation __attribute__((overloadable))
     rsGetAllocation(const void *);
@@ -143,5 +146,207 @@
 extern const void * __attribute__((overloadable))
     rsGetElementAt(rs_allocation, uint32_t x, uint32_t y, uint32_t z);
 
+
+#define GET_ELEMENT_AT(T) \
+static inline T __attribute__((overloadable)) \
+        rsGetElementAt_##T(rs_allocation a, uint32_t x) {  \
+    return ((T *)rsGetElementAt(a, x))[0]; \
+} \
+static inline T __attribute__((overloadable)) \
+        rsGetElementAt_##T(rs_allocation a, uint32_t x, uint32_t y) {  \
+    return ((T *)rsGetElementAt(a, x, y))[0]; \
+} \
+static inline T __attribute__((overloadable)) \
+        rsGetElementAt_##T(rs_allocation a, uint32_t x, uint32_t y, uint32_t z) {  \
+    return ((T *)rsGetElementAt(a, x, y, z))[0]; \
+}
+
+GET_ELEMENT_AT(char)
+GET_ELEMENT_AT(char2)
+GET_ELEMENT_AT(char3)
+GET_ELEMENT_AT(char4)
+GET_ELEMENT_AT(uchar)
+GET_ELEMENT_AT(uchar2)
+GET_ELEMENT_AT(uchar3)
+GET_ELEMENT_AT(uchar4)
+GET_ELEMENT_AT(short)
+GET_ELEMENT_AT(short2)
+GET_ELEMENT_AT(short3)
+GET_ELEMENT_AT(short4)
+GET_ELEMENT_AT(ushort)
+GET_ELEMENT_AT(ushort2)
+GET_ELEMENT_AT(ushort3)
+GET_ELEMENT_AT(ushort4)
+GET_ELEMENT_AT(int)
+GET_ELEMENT_AT(int2)
+GET_ELEMENT_AT(int3)
+GET_ELEMENT_AT(int4)
+GET_ELEMENT_AT(uint)
+GET_ELEMENT_AT(uint2)
+GET_ELEMENT_AT(uint3)
+GET_ELEMENT_AT(uint4)
+GET_ELEMENT_AT(long)
+GET_ELEMENT_AT(long2)
+GET_ELEMENT_AT(long3)
+GET_ELEMENT_AT(long4)
+GET_ELEMENT_AT(ulong)
+GET_ELEMENT_AT(ulong2)
+GET_ELEMENT_AT(ulong3)
+GET_ELEMENT_AT(ulong4)
+GET_ELEMENT_AT(float)
+GET_ELEMENT_AT(float2)
+GET_ELEMENT_AT(float3)
+GET_ELEMENT_AT(float4)
+GET_ELEMENT_AT(double)
+GET_ELEMENT_AT(double2)
+GET_ELEMENT_AT(double3)
+GET_ELEMENT_AT(double4)
+
+#undef GET_ELEMENT_AT
+
+// Jelly Bean
+#if (defined(RS_VERSION) && (RS_VERSION >= 16))
+
+/**
+ * Send the contents of the Allocation to the queue.
+ * @param a allocation to work on
+ */
+extern const void __attribute__((overloadable))
+    rsAllocationIoSend(rs_allocation a);
+
+/**
+ * Receive a new set of contents from the queue.
+ * @param a allocation to work on
+ */
+extern const void __attribute__((overloadable))
+    rsAllocationIoReceive(rs_allocation a);
+
+
+/**
+ * Get the element object describing the allocation's layout
+ * @param a allocation to get data from
+ * @return element describing allocation layout
+ */
+extern rs_element __attribute__((overloadable))
+    rsAllocationGetElement(rs_allocation a);
+
+/**
+ * Fetch allocation in a way described by the sampler
+ * @param a 1D allocation to sample from
+ * @param s sampler state
+ * @param location to sample from
+ */
+extern const float4 __attribute__((overloadable))
+    rsSample(rs_allocation a, rs_sampler s, float location);
+/**
+ * Fetch allocation in a way described by the sampler
+ * @param a 1D allocation to sample from
+ * @param s sampler state
+ * @param location to sample from
+ * @param lod mip level to sample from, for fractional values
+ *            mip levels will be interpolated if
+ *            RS_SAMPLER_LINEAR_MIP_LINEAR is used
+ */
+extern const float4 __attribute__((overloadable))
+    rsSample(rs_allocation a, rs_sampler s, float location, float lod);
+
+/**
+ * Fetch allocation in a way described by the sampler
+ * @param a 2D allocation to sample from
+ * @param s sampler state
+ * @param location to sample from
+ */
+extern const float4 __attribute__((overloadable))
+    rsSample(rs_allocation a, rs_sampler s, float2 location);
+
+/**
+ * Fetch allocation in a way described by the sampler
+ * @param a 2D allocation to sample from
+ * @param s sampler state
+ * @param location to sample from
+ * @param lod mip level to sample from, for fractional values
+ *            mip levels will be interpolated if
+ *            RS_SAMPLER_LINEAR_MIP_LINEAR is used
+ */
+extern const float4 __attribute__((overloadable))
+    rsSample(rs_allocation a, rs_sampler s, float2 location, float lod);
+
+#endif // (defined(RS_VERSION) && (RS_VERSION >= 16))
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+
+/**
+ * Set single element of an allocation.
+ */
+extern void __attribute__((overloadable))
+    rsSetElementAt(rs_allocation a, void* ptr, uint32_t x);
+
+/**
+ * \overload
+ */
+extern void __attribute__((overloadable))
+    rsSetElementAt(rs_allocation a, void* ptr, uint32_t x, uint32_t y);
+
+#define SET_ELEMENT_AT(T)                                               \
+    extern void __attribute__((overloadable))                           \
+    __rsSetElementAt_##T(rs_allocation a, T val, uint32_t x);           \
+    extern void __attribute__((overloadable))                           \
+    __rsSetElementAt_##T(rs_allocation a, T val, uint32_t x, uint32_t y); \
+                                                                        \
+    static inline void __attribute__((overloadable))                    \
+    rsSetElementAt_##T(rs_allocation a, T val, uint32_t x) {            \
+        __rsSetElementAt_##T(a, val, x);                                \
+    }                                                                   \
+    static inline void __attribute__((overloadable))                    \
+    rsSetElementAt_##T(rs_allocation a, T val, uint32_t x, uint32_t y) { \
+        __rsSetElementAt_##T(a, val, x, y);                     \
+    }                                                           \
+
+SET_ELEMENT_AT(char)
+SET_ELEMENT_AT(char2)
+SET_ELEMENT_AT(char3)
+SET_ELEMENT_AT(char4)
+SET_ELEMENT_AT(uchar)
+SET_ELEMENT_AT(uchar2)
+SET_ELEMENT_AT(uchar3)
+SET_ELEMENT_AT(uchar4)
+SET_ELEMENT_AT(short)
+SET_ELEMENT_AT(short2)
+SET_ELEMENT_AT(short3)
+SET_ELEMENT_AT(short4)
+SET_ELEMENT_AT(ushort)
+SET_ELEMENT_AT(ushort2)
+SET_ELEMENT_AT(ushort3)
+SET_ELEMENT_AT(ushort4)
+SET_ELEMENT_AT(int)
+SET_ELEMENT_AT(int2)
+SET_ELEMENT_AT(int3)
+SET_ELEMENT_AT(int4)
+SET_ELEMENT_AT(uint)
+SET_ELEMENT_AT(uint2)
+SET_ELEMENT_AT(uint3)
+SET_ELEMENT_AT(uint4)
+SET_ELEMENT_AT(long)
+SET_ELEMENT_AT(long2)
+SET_ELEMENT_AT(long3)
+SET_ELEMENT_AT(long4)
+SET_ELEMENT_AT(ulong)
+SET_ELEMENT_AT(ulong2)
+SET_ELEMENT_AT(ulong3)
+SET_ELEMENT_AT(ulong4)
+SET_ELEMENT_AT(float)
+SET_ELEMENT_AT(float2)
+SET_ELEMENT_AT(float3)
+SET_ELEMENT_AT(float4)
+SET_ELEMENT_AT(double)
+SET_ELEMENT_AT(double2)
+SET_ELEMENT_AT(double3)
+SET_ELEMENT_AT(double4)
+
+#undef SET_ELEMENT_AT
+
+
+#endif // (defined(RS_VERSION) && (RS_VERSION >= 18))
+
 #endif
 
diff --git a/renderscript/include/rs_atomic.rsh b/renderscript/include/rs_atomic.rsh
index 87c6c02..a455edd 100644
--- a/renderscript/include/rs_atomic.rsh
+++ b/renderscript/include/rs_atomic.rsh
@@ -242,7 +242,7 @@
  * @return old value
  */
 extern uint32_t __attribute__((overloadable))
-    rsAtomicCas(volatile uint32_t* addr, int32_t compareValue, int32_t newValue);
+    rsAtomicCas(volatile uint32_t* addr, uint32_t compareValue, uint32_t newValue);
 
 #endif //defined(RS_VERSION) && (RS_VERSION >= 14)
 
diff --git a/renderscript/include/rs_cl.rsh b/renderscript/include/rs_cl.rsh
index bbc8fc5..211f53b 100644
--- a/renderscript/include/rs_cl.rsh
+++ b/renderscript/include/rs_cl.rsh
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2011 The Android Open Source Project
+ * Copyright (C) 2011-2012 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -25,11 +25,11 @@
 
 // Conversions
 #define CVT_FUNC_2(typeout, typein)                             \
-_RS_RUNTIME typeout##2 __attribute__((overloadable))             \
+_RS_RUNTIME typeout##2 __attribute__((overloadable))            \
         convert_##typeout##2(typein##2 v);                      \
-_RS_RUNTIME typeout##3 __attribute__((overloadable))             \
+_RS_RUNTIME typeout##3 __attribute__((overloadable))            \
         convert_##typeout##3(typein##3 v);                      \
-_RS_RUNTIME typeout##4 __attribute__((overloadable))             \
+_RS_RUNTIME typeout##4 __attribute__((overloadable))            \
         convert_##typeout##4(typein##4 v);
 
 
@@ -41,73 +41,140 @@
                         CVT_FUNC_2(type, int)       \
                         CVT_FUNC_2(type, float)
 
+/**
+ * Convert to char.
+ *
+ * Supports 2,3,4 components of uchar, char, ushort, short, uint, int, float.
+ */
 CVT_FUNC(char)
+
+/**
+ * Convert to unsigned char.
+ *
+ * Supports 2,3,4 components of uchar, char, ushort, short, uint, int, float.
+ */
 CVT_FUNC(uchar)
+
+/**
+ * Convert to short.
+ *
+ * Supports 2,3,4 components of uchar, char, ushort, short, uint, int, float.
+ */
 CVT_FUNC(short)
+
+/**
+ * Convert to unsigned short.
+ *
+ * Supports 2,3,4 components of uchar, char, ushort, short, uint, int, float.
+ */
 CVT_FUNC(ushort)
+
+/**
+ * Convert to int.
+ *
+ * Supports 2,3,4 components of uchar, char, ushort, short, uint, int, float.
+ */
 CVT_FUNC(int)
+
+/**
+ * Convert to unsigned int.
+ *
+ * Supports 2,3,4 components of uchar, char, ushort, short, uint, int, float.
+ */
 CVT_FUNC(uint)
+
+/**
+ * Convert to float.
+ *
+ * Supports 2,3,4 components of uchar, char, ushort, short, uint, int, float.
+ */
 CVT_FUNC(float)
 
 // Float ops, 6.11.2
 
 #define FN_FUNC_FN(fnc)                                         \
-_RS_RUNTIME float2 __attribute__((overloadable)) fnc(float2 v);  \
-_RS_RUNTIME float3 __attribute__((overloadable)) fnc(float3 v);  \
+_RS_RUNTIME float2 __attribute__((overloadable)) fnc(float2 v); \
+_RS_RUNTIME float3 __attribute__((overloadable)) fnc(float3 v); \
 _RS_RUNTIME float4 __attribute__((overloadable)) fnc(float4 v);
 
+#define F_FUNC_FN(fnc)                                          \
+_RS_RUNTIME float __attribute__((overloadable)) fnc(float2 v);  \
+_RS_RUNTIME float __attribute__((overloadable)) fnc(float3 v);  \
+_RS_RUNTIME float __attribute__((overloadable)) fnc(float4 v);
+
 #define IN_FUNC_FN(fnc)                                         \
-_RS_RUNTIME int2 __attribute__((overloadable)) fnc(float2 v);    \
-_RS_RUNTIME int3 __attribute__((overloadable)) fnc(float3 v);    \
+_RS_RUNTIME int2 __attribute__((overloadable)) fnc(float2 v);   \
+_RS_RUNTIME int3 __attribute__((overloadable)) fnc(float3 v);   \
 _RS_RUNTIME int4 __attribute__((overloadable)) fnc(float4 v);
 
 #define FN_FUNC_FN_FN(fnc)                                                  \
-_RS_RUNTIME float2 __attribute__((overloadable)) fnc(float2 v1, float2 v2);  \
-_RS_RUNTIME float3 __attribute__((overloadable)) fnc(float3 v1, float3 v2);  \
+_RS_RUNTIME float2 __attribute__((overloadable)) fnc(float2 v1, float2 v2); \
+_RS_RUNTIME float3 __attribute__((overloadable)) fnc(float3 v1, float3 v2); \
 _RS_RUNTIME float4 __attribute__((overloadable)) fnc(float4 v1, float4 v2);
 
+#define F_FUNC_FN_FN(fnc)                                                   \
+_RS_RUNTIME float __attribute__((overloadable)) fnc(float2 v1, float2 v2);  \
+_RS_RUNTIME float __attribute__((overloadable)) fnc(float3 v1, float3 v2);  \
+_RS_RUNTIME float __attribute__((overloadable)) fnc(float4 v1, float4 v2);
+
 #define FN_FUNC_FN_F(fnc)                                                   \
-_RS_RUNTIME float2 __attribute__((overloadable)) fnc(float2 v1, float v2);   \
-_RS_RUNTIME float3 __attribute__((overloadable)) fnc(float3 v1, float v2);   \
+_RS_RUNTIME float2 __attribute__((overloadable)) fnc(float2 v1, float v2);  \
+_RS_RUNTIME float3 __attribute__((overloadable)) fnc(float3 v1, float v2);  \
 _RS_RUNTIME float4 __attribute__((overloadable)) fnc(float4 v1, float v2);
 
 #define FN_FUNC_FN_IN(fnc)                                                  \
-_RS_RUNTIME float2 __attribute__((overloadable)) fnc(float2 v1, int2 v2);    \
-_RS_RUNTIME float3 __attribute__((overloadable)) fnc(float3 v1, int3 v2);    \
-_RS_RUNTIME float4 __attribute__((overloadable)) fnc(float4 v1, int4 v2);    \
+_RS_RUNTIME float2 __attribute__((overloadable)) fnc(float2 v1, int2 v2);   \
+_RS_RUNTIME float3 __attribute__((overloadable)) fnc(float3 v1, int3 v2);   \
+_RS_RUNTIME float4 __attribute__((overloadable)) fnc(float4 v1, int4 v2);   \
 
 #define FN_FUNC_FN_I(fnc)                                                   \
-_RS_RUNTIME float2 __attribute__((overloadable)) fnc(float2 v1, int v2);     \
-_RS_RUNTIME float3 __attribute__((overloadable)) fnc(float3 v1, int v2);     \
+_RS_RUNTIME float2 __attribute__((overloadable)) fnc(float2 v1, int v2);    \
+_RS_RUNTIME float3 __attribute__((overloadable)) fnc(float3 v1, int v2);    \
 _RS_RUNTIME float4 __attribute__((overloadable)) fnc(float4 v1, int v2);
 
-#define FN_FUNC_FN_PFN(fnc)                     \
-_RS_RUNTIME float2 __attribute__((overloadable)) \
-        fnc(float2 v1, float2 *v2);             \
-_RS_RUNTIME float3 __attribute__((overloadable)) \
-        fnc(float3 v1, float3 *v2);             \
-_RS_RUNTIME float4 __attribute__((overloadable)) \
+#define FN_FUNC_FN_PFN(fnc)                         \
+_RS_RUNTIME float2 __attribute__((overloadable))    \
+        fnc(float2 v1, float2 *v2);                 \
+_RS_RUNTIME float3 __attribute__((overloadable))    \
+        fnc(float3 v1, float3 *v2);                 \
+_RS_RUNTIME float4 __attribute__((overloadable))    \
         fnc(float4 v1, float4 *v2);
 
 #define FN_FUNC_FN_PIN(fnc)                                                 \
-_RS_RUNTIME float2 __attribute__((overloadable)) fnc(float2 v1, int2 *v2);   \
-_RS_RUNTIME float3 __attribute__((overloadable)) fnc(float3 v1, int3 *v2);   \
+_RS_RUNTIME float2 __attribute__((overloadable)) fnc(float2 v1, int2 *v2);  \
+_RS_RUNTIME float3 __attribute__((overloadable)) fnc(float3 v1, int3 *v2);  \
 _RS_RUNTIME float4 __attribute__((overloadable)) fnc(float4 v1, int4 *v2);
 
-#define FN_FUNC_FN_FN_FN(fnc)                   \
-_RS_RUNTIME float2 __attribute__((overloadable)) \
-        fnc(float2 v1, float2 v2, float2 v3);   \
-_RS_RUNTIME float3 __attribute__((overloadable)) \
-        fnc(float3 v1, float3 v2, float3 v3);   \
-_RS_RUNTIME float4 __attribute__((overloadable)) \
+#define FN_FUNC_FN_FN_FN(fnc)                       \
+_RS_RUNTIME float2 __attribute__((overloadable))    \
+        fnc(float2 v1, float2 v2, float2 v3);       \
+_RS_RUNTIME float3 __attribute__((overloadable))    \
+        fnc(float3 v1, float3 v2, float3 v3);       \
+_RS_RUNTIME float4 __attribute__((overloadable))    \
         fnc(float4 v1, float4 v2, float4 v3);
 
-#define FN_FUNC_FN_FN_PIN(fnc)                  \
-_RS_RUNTIME float2 __attribute__((overloadable)) \
-        fnc(float2 v1, float2 v2, int2 *v3);    \
-_RS_RUNTIME float3 __attribute__((overloadable)) \
-        fnc(float3 v1, float3 v2, int3 *v3);    \
-_RS_RUNTIME float4 __attribute__((overloadable)) \
+#define FN_FUNC_FN_FN_F(fnc)                        \
+_RS_RUNTIME float2 __attribute__((overloadable))    \
+        fnc(float2 v1, float2 v2, float v3);        \
+_RS_RUNTIME float3 __attribute__((overloadable))    \
+        fnc(float3 v1, float3 v2, float v3);        \
+_RS_RUNTIME float4 __attribute__((overloadable))    \
+        fnc(float4 v1, float4 v2, float v3);
+
+#define FN_FUNC_FN_F_F(fnc)                         \
+_RS_RUNTIME float2 __attribute__((overloadable))    \
+        fnc(float2 v1, float v2, float v3);         \
+_RS_RUNTIME float3 __attribute__((overloadable))    \
+        fnc(float3 v1, float v2, float v3);         \
+_RS_RUNTIME float4 __attribute__((overloadable))    \
+        fnc(float4 v1, float v2, float v3);
+
+#define FN_FUNC_FN_FN_PIN(fnc)                      \
+_RS_RUNTIME float2 __attribute__((overloadable))    \
+        fnc(float2 v1, float2 v2, int2 *v3);        \
+_RS_RUNTIME float3 __attribute__((overloadable))    \
+        fnc(float3 v1, float3 v2, int3 *v3);        \
+_RS_RUNTIME float4 __attribute__((overloadable))    \
         fnc(float4 v1, float4 v2, int4 *v3);
 
 
@@ -380,6 +447,13 @@
 extern float __attribute__((overloadable)) fmod(float x, float y);
 FN_FUNC_FN_FN(fmod)
 
+/**
+ * Return fractional part of v
+ *
+ * Supports float, float2, float3, float4.
+ */
+_RS_RUNTIME float __attribute__((overloadable)) fract(float v);
+FN_FUNC_FN(fract)
 
 /**
  * Return fractional part of v
@@ -491,7 +565,7 @@
 FN_FUNC_FN_FN_FN(mad)
 
 /**
- * Return the integral and fractional components of a number
+ * Return the integral and fractional components of a number.
  * Supports 1,2,3,4 components
  *
  * @param x Source value
@@ -501,7 +575,7 @@
 extern float __attribute__((overloadable)) modf(float x, float *iret);
 FN_FUNC_FN_PFN(modf);
 
-//extern float __attribute__((overloadable)) nan(uint);
+extern float __attribute__((overloadable)) nan(uint);
 
 /**
  * Return the next floating point number from x towards y.
@@ -575,7 +649,6 @@
 /**
  * Return (1 / sqrt(value)).
  *
- * @param v The incoming value in radians
  * Supports 1,2,3,4 components
  */
 _RS_RUNTIME float __attribute__((overloadable)) rsqrt(float v);
@@ -663,8 +736,8 @@
 
 #define XN_FUNC_YN(typeout, fnc, typein)                                \
 extern typeout __attribute__((overloadable)) fnc(typein);               \
-_RS_RUNTIME typeout##2 __attribute__((overloadable)) fnc(typein##2 v);   \
-_RS_RUNTIME typeout##3 __attribute__((overloadable)) fnc(typein##3 v);   \
+_RS_RUNTIME typeout##2 __attribute__((overloadable)) fnc(typein##2 v);  \
+_RS_RUNTIME typeout##3 __attribute__((overloadable)) fnc(typein##3 v);  \
 _RS_RUNTIME typeout##4 __attribute__((overloadable)) fnc(typein##4 v);
 
 #define UIN_FUNC_IN(fnc)          \
@@ -682,25 +755,36 @@
 
 
 #define XN_FUNC_XN_XN_BODY(type, fnc, body)         \
-_RS_RUNTIME type __attribute__((overloadable))       \
+_RS_RUNTIME type __attribute__((overloadable))      \
         fnc(type v1, type v2);                      \
-_RS_RUNTIME type##2 __attribute__((overloadable))    \
+_RS_RUNTIME type##2 __attribute__((overloadable))   \
         fnc(type##2 v1, type##2 v2);                \
-_RS_RUNTIME type##3 __attribute__((overloadable))    \
+_RS_RUNTIME type##3 __attribute__((overloadable))   \
         fnc(type##3 v1, type##3 v2);                \
-_RS_RUNTIME type##4 __attribute__((overloadable))    \
+_RS_RUNTIME type##4 __attribute__((overloadable))   \
         fnc(type##4 v1, type##4 v2);
 
-#define IN_FUNC_IN_IN_BODY(fnc, body) \
-XN_FUNC_XN_XN_BODY(uchar, fnc, body)  \
-XN_FUNC_XN_XN_BODY(char, fnc, body)   \
-XN_FUNC_XN_XN_BODY(ushort, fnc, body) \
-XN_FUNC_XN_XN_BODY(short, fnc, body)  \
-XN_FUNC_XN_XN_BODY(uint, fnc, body)   \
-XN_FUNC_XN_XN_BODY(int, fnc, body)    \
+#define IN_FUNC_IN_IN_BODY(fnc, body)   \
+XN_FUNC_XN_XN_BODY(uchar, fnc, body)    \
+XN_FUNC_XN_XN_BODY(char, fnc, body)     \
+XN_FUNC_XN_XN_BODY(ushort, fnc, body)   \
+XN_FUNC_XN_XN_BODY(short, fnc, body)    \
+XN_FUNC_XN_XN_BODY(uint, fnc, body)     \
+XN_FUNC_XN_XN_BODY(int, fnc, body)      \
 XN_FUNC_XN_XN_BODY(float, fnc, body)
 
+/**
+ * Return the absolute value of a value.
+ *
+ * Supports 1,2,3,4 components of char, short, int.
+ */
 UIN_FUNC_IN(abs)
+
+/**
+ * Return the number of leading 0-bits in a value.
+ *
+ * Supports 1,2,3,4 components of uchar, char, ushort, short, uint, int.
+ */
 IN_FUNC_IN(clz)
 
 /**
@@ -727,12 +811,8 @@
  * @param high High bound, must match type of low
  */
 _RS_RUNTIME float __attribute__((overloadable)) clamp(float amount, float low, float high);
-_RS_RUNTIME float2 __attribute__((overloadable)) clamp(float2 amount, float2 low, float2 high);
-_RS_RUNTIME float3 __attribute__((overloadable)) clamp(float3 amount, float3 low, float3 high);
-_RS_RUNTIME float4 __attribute__((overloadable)) clamp(float4 amount, float4 low, float4 high);
-_RS_RUNTIME float2 __attribute__((overloadable)) clamp(float2 amount, float low, float high);
-_RS_RUNTIME float3 __attribute__((overloadable)) clamp(float3 amount, float low, float high);
-_RS_RUNTIME float4 __attribute__((overloadable)) clamp(float4 amount, float low, float high);
+FN_FUNC_FN_FN_FN(clamp)
+FN_FUNC_FN_F_F(clamp)
 
 /**
  * Convert from radians to degrees.
@@ -748,12 +828,8 @@
  * Supports 1,2,3,4 components
  */
 _RS_RUNTIME float __attribute__((overloadable)) mix(float start, float stop, float amount);
-_RS_RUNTIME float2 __attribute__((overloadable)) mix(float2 start, float2 stop, float2 amount);
-_RS_RUNTIME float3 __attribute__((overloadable)) mix(float3 start, float3 stop, float3 amount);
-_RS_RUNTIME float4 __attribute__((overloadable)) mix(float4 start, float4 stop, float4 amount);
-_RS_RUNTIME float2 __attribute__((overloadable)) mix(float2 start, float2 stop, float amount);
-_RS_RUNTIME float3 __attribute__((overloadable)) mix(float3 start, float3 stop, float amount);
-_RS_RUNTIME float4 __attribute__((overloadable)) mix(float4 start, float4 stop, float amount);
+FN_FUNC_FN_FN_FN(mix)
+FN_FUNC_FN_FN_F(mix)
 
 /**
  * Convert from degrees to radians.
@@ -772,12 +848,8 @@
  * Supports 1,2,3,4 components
  */
 _RS_RUNTIME float __attribute__((overloadable)) step(float edge, float v);
-_RS_RUNTIME float2 __attribute__((overloadable)) step(float2 edge, float2 v);
-_RS_RUNTIME float3 __attribute__((overloadable)) step(float3 edge, float3 v);
-_RS_RUNTIME float4 __attribute__((overloadable)) step(float4 edge, float4 v);
-_RS_RUNTIME float2 __attribute__((overloadable)) step(float2 edge, float v);
-_RS_RUNTIME float3 __attribute__((overloadable)) step(float3 edge, float v);
-_RS_RUNTIME float4 __attribute__((overloadable)) step(float4 edge, float v);
+FN_FUNC_FN_FN(step)
+FN_FUNC_FN_F(step)
 
 // not implemented
 extern float __attribute__((overloadable)) smoothstep(float, float, float);
@@ -789,6 +861,8 @@
 extern float4 __attribute__((overloadable)) smoothstep(float, float, float4);
 
 /**
+ * Return the sign of a value.
+ *
  * if (v < 0) return -1.f;
  * else if (v > 0) return 1.f;
  * else return 0.f;
@@ -812,9 +886,7 @@
  * Supports 1,2,3,4 components
  */
 _RS_RUNTIME float __attribute__((overloadable)) dot(float lhs, float rhs);
-_RS_RUNTIME float __attribute__((overloadable)) dot(float2 lhs, float2 rhs);
-_RS_RUNTIME float __attribute__((overloadable)) dot(float3 lhs, float3 rhs);
-_RS_RUNTIME float __attribute__((overloadable)) dot(float4 lhs, float4 rhs);
+F_FUNC_FN_FN(dot)
 
 /**
  * Compute the length of a vector.
@@ -822,9 +894,7 @@
  * Supports 1,2,3,4 components
  */
 _RS_RUNTIME float __attribute__((overloadable)) length(float v);
-_RS_RUNTIME float __attribute__((overloadable)) length(float2 v);
-_RS_RUNTIME float __attribute__((overloadable)) length(float3 v);
-_RS_RUNTIME float __attribute__((overloadable)) length(float4 v);
+F_FUNC_FN(length)
 
 /**
  * Compute the distance between two points.
@@ -832,9 +902,7 @@
  * Supports 1,2,3,4 components
  */
 _RS_RUNTIME float __attribute__((overloadable)) distance(float lhs, float rhs);
-_RS_RUNTIME float __attribute__((overloadable)) distance(float2 lhs, float2 rhs);
-_RS_RUNTIME float __attribute__((overloadable)) distance(float3 lhs, float3 rhs);
-_RS_RUNTIME float __attribute__((overloadable)) distance(float4 lhs, float4 rhs);
+F_FUNC_FN_FN(distance)
 
 /**
  * Normalize a vector.
@@ -842,21 +910,78 @@
  * Supports 1,2,3,4 components
  */
 _RS_RUNTIME float __attribute__((overloadable)) normalize(float v);
-_RS_RUNTIME float2 __attribute__((overloadable)) normalize(float2 v);
-_RS_RUNTIME float3 __attribute__((overloadable)) normalize(float3 v);
-_RS_RUNTIME float4 __attribute__((overloadable)) normalize(float4 v);
+FN_FUNC_FN(normalize)
+
+
+// New approx API functions
+#if (defined(RS_VERSION) && (RS_VERSION >= 17))
+
+/**
+ * Return the approximate reciprocal of a value.
+ *
+ * Supports 1,2,3,4 components
+ */
+_RS_RUNTIME float __attribute__((overloadable)) half_recip(float);
+FN_FUNC_FN(half_recip)
+
+/**
+ * Return the approximate square root of a value.
+ *
+ * Supports 1,2,3,4 components
+ */
+_RS_RUNTIME float __attribute__((overloadable)) half_sqrt(float);
+FN_FUNC_FN(half_sqrt)
+
+/**
+ * Return the approximate value of (1 / sqrt(value)).
+ *
+ * Supports 1,2,3,4 components
+ */
+_RS_RUNTIME float __attribute__((overloadable)) half_rsqrt(float v);
+FN_FUNC_FN(half_rsqrt)
+
+/**
+ * Compute the approximate length of a vector.
+ *
+ * Supports 1,2,3,4 components
+ */
+_RS_RUNTIME float __attribute__((overloadable)) fast_length(float v);
+F_FUNC_FN(fast_length)
+
+/**
+ * Compute the approximate distance between two points.
+ *
+ * Supports 1,2,3,4 components
+ */
+_RS_RUNTIME float __attribute__((overloadable)) fast_distance(float lhs, float rhs);
+F_FUNC_FN_FN(fast_distance)
+
+/**
+ * Approximately normalize a vector.
+ *
+ * Supports 1,2,3,4 components
+ */
+_RS_RUNTIME float __attribute__((overloadable)) fast_normalize(float v);
+F_FUNC_FN(fast_normalize)
+
+#endif  // (defined(RS_VERSION) && (RS_VERSION >= 17))
+
 
 #undef CVT_FUNC
 #undef CVT_FUNC_2
 #undef FN_FUNC_FN
+#undef F_FUNC_FN
 #undef IN_FUNC_FN
 #undef FN_FUNC_FN_FN
+#undef F_FUNC_FN_FN
 #undef FN_FUNC_FN_F
 #undef FN_FUNC_FN_IN
 #undef FN_FUNC_FN_I
 #undef FN_FUNC_FN_PFN
 #undef FN_FUNC_FN_PIN
 #undef FN_FUNC_FN_FN_FN
+#undef FN_FUNC_FN_FN_F
+#undef FN_FUNC_FN_F_F
 #undef FN_FUNC_FN_FN_PIN
 #undef XN_FUNC_YN
 #undef UIN_FUNC_IN
diff --git a/renderscript/include/rs_core.rsh b/renderscript/include/rs_core.rsh
index be900cb..2aab509 100644
--- a/renderscript/include/rs_core.rsh
+++ b/renderscript/include/rs_core.rsh
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2011 The Android Open Source Project
+ * Copyright (C) 2011-2012 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -14,6 +14,31 @@
  * limitations under the License.
  */
 
+ /*! \mainpage notitle
+  *
+  * Renderscript is a high-performance runtime that provides graphics rendering and
+  * compute operations at the native level. Renderscript code is compiled on devices
+  * at runtime to allow platform-independence as well.
+  * This reference documentation describes the Renderscript runtime APIs, which you
+  * can utilize to write Renderscript code in C99. The Renderscript header
+  * files are automatically included for you, except for the rs_graphics.rsh header. If
+  * you are doing graphics rendering, include the graphics header file like this:
+  *
+  * <code>#include "rs_graphics.rsh"</code>
+  *
+  * To use Renderscript, you need to utilize the Renderscript runtime APIs documented here
+  * as well as the Android framework APIs for Renderscript.
+  * For documentation on the Android framework APIs, see the <a target="_parent" href=
+  * "http://developer.android.com/reference/android/renderscript/package-summary.html">
+  * android.renderscript</a> package reference.
+  * For more information on how to develop with Renderscript and how the runtime and
+  * Android framework APIs interact, see the <a target="_parent" href=
+  * "http://developer.android.com/guide/topics/renderscript/index.html">Renderscript
+  * developer guide</a> and the <a target="_parent" href=
+  * "http://developer.android.com/resources/samples/RenderScript/index.html">
+  * Renderscript samples</a>.
+  */
+
 /** @file rs_core.rsh
  *  \brief todo-jsams
  *
@@ -31,14 +56,14 @@
 #include "rs_atomic.rsh"
 #include "rs_cl.rsh"
 #include "rs_debug.rsh"
+#include "rs_element.rsh"
 #include "rs_math.rsh"
 #include "rs_matrix.rsh"
 #include "rs_object.rsh"
 #include "rs_quaternion.rsh"
+#include "rs_sampler.rsh"
 #include "rs_time.rsh"
 
-
-
 /**
  * Send a message back to the client.  Will not block and returns true
  * if the message was sendable and false if the fifo was full.
@@ -72,12 +97,12 @@
  * This is a hint and implementations may not obey the order.
  */
 enum rs_for_each_strategy {
-    RS_FOR_EACH_STRATEGY_SERIAL,
-    RS_FOR_EACH_STRATEGY_DONT_CARE,
-    RS_FOR_EACH_STRATEGY_DST_LINEAR,
-    RS_FOR_EACH_STRATEGY_TILE_SMALL,
-    RS_FOR_EACH_STRATEGY_TILE_MEDIUM,
-    RS_FOR_EACH_STRATEGY_TILE_LARGE
+    RS_FOR_EACH_STRATEGY_SERIAL = 0,
+    RS_FOR_EACH_STRATEGY_DONT_CARE = 1,
+    RS_FOR_EACH_STRATEGY_DST_LINEAR = 2,
+    RS_FOR_EACH_STRATEGY_TILE_SMALL= 3,
+    RS_FOR_EACH_STRATEGY_TILE_MEDIUM = 4,
+    RS_FOR_EACH_STRATEGY_TILE_LARGE = 5
 };
 
 
diff --git a/renderscript/include/rs_debug.rsh b/renderscript/include/rs_debug.rsh
index 074c28f..7a13c9d 100644
--- a/renderscript/include/rs_debug.rsh
+++ b/renderscript/include/rs_debug.rsh
@@ -27,7 +27,6 @@
 #define __RS_DEBUG_RSH__
 
 
-
 /**
  * Debug function.  Prints a string and value to the log.
  */
@@ -52,6 +51,21 @@
  * Debug function.  Prints a string and value to the log.
  */
 extern void __attribute__((overloadable))
+    rsDebug(const char *, float2);
+/**
+ * Debug function.  Prints a string and value to the log.
+ */
+extern void __attribute__((overloadable))
+    rsDebug(const char *, float3);
+/**
+ * Debug function.  Prints a string and value to the log.
+ */
+extern void __attribute__((overloadable))
+    rsDebug(const char *, float4);
+/**
+ * Debug function.  Prints a string and value to the log.
+ */
+extern void __attribute__((overloadable))
     rsDebug(const char *, double);
 /**
  * Debug function.  Prints a string and value to the log.
@@ -103,21 +117,151 @@
  */
 extern void __attribute__((overloadable))
     rsDebug(const char *, const void *);
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 17))
+/**
+ * Debug function.  Prints a string and value to the log.
+ */
+extern void __attribute__((overloadable))
+    rsDebug(const char *, char);
+/**
+ * Debug function.  Prints a string and value to the log.
+ */
+extern void __attribute__((overloadable))
+    rsDebug(const char *, char2);
+/**
+ * Debug function.  Prints a string and value to the log.
+ */
+extern void __attribute__((overloadable))
+    rsDebug(const char *, char3);
+/**
+ * Debug function.  Prints a string and value to the log.
+ */
+extern void __attribute__((overloadable))
+    rsDebug(const char *, char4);
+/**
+ * Debug function.  Prints a string and value to the log.
+ */
+extern void __attribute__((overloadable))
+    rsDebug(const char *, unsigned char);
+/**
+ * Debug function.  Prints a string and value to the log.
+ */
+extern void __attribute__((overloadable))
+    rsDebug(const char *, uchar2);
+/**
+ * Debug function.  Prints a string and value to the log.
+ */
+extern void __attribute__((overloadable))
+    rsDebug(const char *, uchar3);
+/**
+ * Debug function.  Prints a string and value to the log.
+ */
+extern void __attribute__((overloadable))
+    rsDebug(const char *, uchar4);
+/**
+ * Debug function.  Prints a string and value to the log.
+ */
+extern void __attribute__((overloadable))
+    rsDebug(const char *, short);
+/**
+ * Debug function.  Prints a string and value to the log.
+ */
+extern void __attribute__((overloadable))
+    rsDebug(const char *, short2);
+/**
+ * Debug function.  Prints a string and value to the log.
+ */
+extern void __attribute__((overloadable))
+    rsDebug(const char *, short3);
+/**
+ * Debug function.  Prints a string and value to the log.
+ */
+extern void __attribute__((overloadable))
+    rsDebug(const char *, short4);
+/**
+ * Debug function.  Prints a string and value to the log.
+ */
+extern void __attribute__((overloadable))
+    rsDebug(const char *, unsigned short);
+/**
+ * Debug function.  Prints a string and value to the log.
+ */
+extern void __attribute__((overloadable))
+    rsDebug(const char *, ushort2);
+/**
+ * Debug function.  Prints a string and value to the log.
+ */
+extern void __attribute__((overloadable))
+    rsDebug(const char *, ushort3);
+/**
+ * Debug function.  Prints a string and value to the log.
+ */
+extern void __attribute__((overloadable))
+    rsDebug(const char *, ushort4);
+/**
+ * Debug function.  Prints a string and value to the log.
+ */
+extern void __attribute__((overloadable))
+    rsDebug(const char *, int2);
+/**
+ * Debug function.  Prints a string and value to the log.
+ */
+extern void __attribute__((overloadable))
+    rsDebug(const char *, int3);
+/**
+ * Debug function.  Prints a string and value to the log.
+ */
+extern void __attribute__((overloadable))
+    rsDebug(const char *, int4);
+/**
+ * Debug function.  Prints a string and value to the log.
+ */
+extern void __attribute__((overloadable))
+    rsDebug(const char *, uint2);
+/**
+ * Debug function.  Prints a string and value to the log.
+ */
+extern void __attribute__((overloadable))
+    rsDebug(const char *, uint3);
+/**
+ * Debug function.  Prints a string and value to the log.
+ */
+extern void __attribute__((overloadable))
+    rsDebug(const char *, uint4);
+/**
+ * Debug function.  Prints a string and value to the log.
+ */
+extern void __attribute__((overloadable))
+    rsDebug(const char *, long2);
+/**
+ * Debug function.  Prints a string and value to the log.
+ */
+extern void __attribute__((overloadable))
+    rsDebug(const char *, long3);
+/**
+ * Debug function.  Prints a string and value to the log.
+ */
+extern void __attribute__((overloadable))
+    rsDebug(const char *, long4);
+/**
+ * Debug function.  Prints a string and value to the log.
+ */
+extern void __attribute__((overloadable))
+    rsDebug(const char *, ulong2);
+/**
+ * Debug function.  Prints a string and value to the log.
+ */
+extern void __attribute__((overloadable))
+    rsDebug(const char *, ulong3);
+/**
+ * Debug function.  Prints a string and value to the log.
+ */
+extern void __attribute__((overloadable))
+    rsDebug(const char *, ulong4);
+#endif  // (defined(RS_VERSION) && (RS_VERSION >= 17))
+
 #define RS_DEBUG(a) rsDebug(#a, a)
 #define RS_DEBUG_MARKER rsDebug(__FILE__, __LINE__)
 
-
-/**
- * Debug function.  Prints a string and value to the log.
- */
-_RS_RUNTIME void __attribute__((overloadable)) rsDebug(const char *s, float2 v);
-/**
- * Debug function.  Prints a string and value to the log.
- */
-_RS_RUNTIME void __attribute__((overloadable)) rsDebug(const char *s, float3 v);
-/**
- * Debug function.  Prints a string and value to the log.
- */
-_RS_RUNTIME void __attribute__((overloadable)) rsDebug(const char *s, float4 v);
-
 #endif
diff --git a/renderscript/include/rs_element.rsh b/renderscript/include/rs_element.rsh
new file mode 100644
index 0000000..0230f10
--- /dev/null
+++ b/renderscript/include/rs_element.rsh
@@ -0,0 +1,143 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/** @file rs_element.rsh
+ *  \brief Element routines
+ *
+ *
+ */
+
+#ifndef __RS_ELEMENT_RSH__
+#define __RS_ELEMENT_RSH__
+
+// New API's
+#if (defined(RS_VERSION) && (RS_VERSION >= 16))
+
+/**
+ * Elements could be simple, such as an int or a float, or a
+ * structure with multiple sub elements, such as a collection of
+ * floats, float2, float4. This function returns zero for simple
+ * elements or the number of sub-elements otherwise.
+ *
+ * @param e element to get data from
+ * @return number of sub-elements in this element
+ */
+extern uint32_t __attribute__((overloadable))
+    rsElementGetSubElementCount(rs_element e);
+
+/**
+ * For complex elements, this function will return the
+ * sub-element at index
+ *
+ * @param e element to get data from
+ * @param index index of the sub-element to return
+ * @return sub-element in this element at given index
+ */
+extern rs_element __attribute__((overloadable))
+    rsElementGetSubElement(rs_element, uint32_t index);
+
+/**
+ * For complex elements, this function will return the length of
+ * sub-element name at index
+ *
+ * @param e element to get data from
+ * @param index index of the sub-element to return
+ * @return length of the sub-element name including the null
+ *         terminator (size of buffer needed to write the name)
+ */
+extern uint32_t __attribute__((overloadable))
+    rsElementGetSubElementNameLength(rs_element e, uint32_t index);
+
+/**
+ * For complex elements, this function will return the
+ * sub-element name at index
+ *
+ * @param e element to get data from
+ * @param index index of the sub-element
+ * @param name array to store the name into
+ * @param nameLength length of the provided name array
+ * @return number of characters actually written, excluding the
+ *         null terminator
+ */
+extern uint32_t __attribute__((overloadable))
+    rsElementGetSubElementName(rs_element e, uint32_t index, char *name, uint32_t nameLength);
+
+/**
+ * For complex elements, some sub-elements could be statically
+ * sized arrays. This function will return the array size for
+ * sub-element at index
+ *
+ * @param e element to get data from
+ * @param index index of the sub-element
+ * @return array size of sub-element in this element at given
+ *         index
+ */
+extern uint32_t __attribute__((overloadable))
+    rsElementGetSubElementArraySize(rs_element e, uint32_t index);
+
+/**
+ * This function specifies the location of a sub-element within
+ * the element
+ *
+ * @param e element to get data from
+ * @param index index of the sub-element
+ * @return offset in bytes of sub-element in this element at
+ *         given index
+ */
+extern uint32_t __attribute__((overloadable))
+    rsElementGetSubElementOffsetBytes(rs_element e, uint32_t index);
+
+/**
+ * Returns the size of element in bytes
+ *
+ * @param e element to get data from
+ * @return total size of the element in bytes
+ */
+extern uint32_t __attribute__((overloadable))
+    rsElementGetBytesSize(rs_element e);
+
+/**
+ * Returns the element's data type
+ *
+ * @param e element to get data from
+ * @return element's data type
+ */
+extern rs_data_type __attribute__((overloadable))
+    rsElementGetDataType(rs_element e);
+
+/**
+ * Returns the element's data kind
+ *
+ * @param e element to get data from
+ * @return element's data size
+ */
+extern rs_data_kind __attribute__((overloadable))
+    rsElementGetDataKind(rs_element e);
+
+/**
+ * Returns the element's vector size
+ *
+ * @param e element to get data from
+ * @return length of the element vector (for float2, float3,
+ *         etc.)
+ */
+extern uint32_t __attribute__((overloadable))
+    rsElementGetVectorSize(rs_element e);
+
+#endif // (defined(RS_VERSION) && (RS_VERSION >= 16))
+
+#endif // __RS_ELEMENT_RSH__
+
diff --git a/renderscript/include/rs_graphics.rsh b/renderscript/include/rs_graphics.rsh
index 2581953..44ee99f 100644
--- a/renderscript/include/rs_graphics.rsh
+++ b/renderscript/include/rs_graphics.rsh
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2011 The Android Open Source Project
+ * Copyright (C) 2011-2012 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -22,6 +22,10 @@
  */
 #ifndef __RS_GRAPHICS_RSH__
 #define __RS_GRAPHICS_RSH__
+
+#include "rs_mesh.rsh"
+#include "rs_program.rsh"
+
 #if (defined(RS_VERSION) && (RS_VERSION >= 14))
 /**
  * Set the color target used for all subsequent rendering calls
@@ -164,6 +168,28 @@
     rsgProgramFragmentConstantColor(rs_program_fragment pf, float r, float g, float b, float a);
 
 /**
+ * Bind a new Allocation object to a ProgramFragment.  The
+ * Allocation must be a valid constant input for the Program.
+ *
+ * @param ps program object
+ * @param slot index of the constant buffer on the program
+ * @param c constants to bind
+ */
+extern void __attribute__((overloadable))
+    rsgBindConstant(rs_program_fragment ps, uint slot, rs_allocation c);
+
+/**
+ * Bind a new Allocation object to a ProgramVertex.  The
+ * Allocation must be a valid constant input for the Program.
+ *
+ * @param pv program object
+ * @param slot index of the constant buffer on the program
+ * @param c constants to bind
+ */
+extern void __attribute__((overloadable))
+    rsgBindConstant(rs_program_vertex pv, uint slot, rs_allocation c);
+
+/**
  * Get the width of the current rendering surface.
  *
  * @return uint
@@ -288,6 +314,9 @@
 extern void __attribute__((overloadable))
     rsgDrawSpriteScreenspace(float x, float y, float z, float w, float h);
 
+extern void __attribute__((overloadable))
+    rsgDrawPath(rs_path p);
+
 /**
  * Draw a mesh using the current context state.  The whole mesh is
  * rendered.
diff --git a/renderscript/include/rs_math.rsh b/renderscript/include/rs_math.rsh
index 8117ca8..73040b3 100644
--- a/renderscript/include/rs_math.rsh
+++ b/renderscript/include/rs_math.rsh
@@ -244,5 +244,8 @@
  */
 _RS_RUNTIME float4 rsUnpackColor8888(uchar4 c);
 
+_RS_RUNTIME uchar4 __attribute__((overloadable)) rsYuvToRGBA_uchar4(uchar y, uchar u, uchar v);
+_RS_RUNTIME float4 __attribute__((overloadable)) rsYuvToRGBA_float4(uchar y, uchar u, uchar v);
+
 
 #endif
diff --git a/renderscript/include/rs_mesh.rsh b/renderscript/include/rs_mesh.rsh
new file mode 100644
index 0000000..0ecd786
--- /dev/null
+++ b/renderscript/include/rs_mesh.rsh
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/** @file rs_mesh.rsh
+ *  \brief Mesh routines
+ *
+ *
+ */
+
+#ifndef __RS_MESH_RSH__
+#define __RS_MESH_RSH__
+
+// New API's
+#if (defined(RS_VERSION) && (RS_VERSION >= 16))
+
+/**
+ * Returns the number of allocations in the mesh that contain
+ * vertex data
+ *
+ * @param m mesh to get data from
+ * @return number of allocations in the mesh that contain vertex
+ *         data
+ */
+extern uint32_t __attribute__((overloadable))
+    rsgMeshGetVertexAllocationCount(rs_mesh m);
+
+/**
+ * Meshes could have multiple index sets, this function returns
+ * the number.
+ *
+ * @param m mesh to get data from
+ * @return number of primitive groups in the mesh. This would
+ *         include simple primitives as well as allocations
+ *         containing index data
+ */
+extern uint32_t __attribute__((overloadable))
+    rsgMeshGetPrimitiveCount(rs_mesh m);
+
+/**
+ * Returns an allocation that is part of the mesh and contains
+ * vertex data, e.g. positions, normals, texcoords
+ *
+ * @param m mesh to get data from
+ * @param index index of the vertex allocation
+ * @return allocation containing vertex data
+ */
+extern rs_allocation __attribute__((overloadable))
+    rsgMeshGetVertexAllocation(rs_mesh m, uint32_t index);
+
+/**
+ * Returns an allocation containing index data or a null
+ * allocation if only the primitive is specified
+ *
+ * @param m mesh to get data from
+ * @param index index of the index allocation
+ * @return allocation containing index data
+ */
+extern rs_allocation __attribute__((overloadable))
+    rsgMeshGetIndexAllocation(rs_mesh m, uint32_t index);
+
+/**
+ * Returns the primitive describing how a part of the mesh is
+ * rendered
+ *
+ * @param m mesh to get data from
+ * @param index index of the primitive
+ * @return primitive describing how the mesh is rendered
+ */
+extern rs_primitive __attribute__((overloadable))
+    rsgMeshGetPrimitive(rs_mesh m, uint32_t index);
+
+#endif // (defined(RS_VERSION) && (RS_VERSION >= 16))
+
+#endif // __RS_MESH_RSH__
+
diff --git a/renderscript/include/rs_object.rsh b/renderscript/include/rs_object.rsh
index a431219..1fc3f83 100644
--- a/renderscript/include/rs_object.rsh
+++ b/renderscript/include/rs_object.rsh
@@ -56,6 +56,11 @@
  * \overload
  */
 extern void __attribute__((overloadable))
+    rsSetObject(rs_path *dst, rs_path src);
+/**
+ * \overload
+ */
+extern void __attribute__((overloadable))
     rsSetObject(rs_mesh *dst, rs_mesh src);
 /**
  * \overload
@@ -114,6 +119,11 @@
  * \overload
  */
 extern void __attribute__((overloadable))
+    rsClearObject(rs_path *dst);
+/**
+ * \overload
+ */
+extern void __attribute__((overloadable))
     rsClearObject(rs_mesh *dst);
 /**
  * \overload
@@ -175,6 +185,11 @@
  * \overload
  */
 extern bool __attribute__((overloadable))
+    rsIsObject(rs_path);
+/**
+ * \overload
+ */
+extern bool __attribute__((overloadable))
     rsIsObject(rs_mesh);
 /**
  * \overload
diff --git a/renderscript/include/rs_program.rsh b/renderscript/include/rs_program.rsh
new file mode 100644
index 0000000..299aae6
--- /dev/null
+++ b/renderscript/include/rs_program.rsh
@@ -0,0 +1,118 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/** @file rs_program.rsh
+ *  \brief Program object routines
+ *
+ *
+ */
+
+#ifndef __RS_PROGRAM_RSH__
+#define __RS_PROGRAM_RSH__
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 16))
+
+/**
+ * Get program store depth function
+ *
+ * @param ps program store to query
+ */
+extern rs_depth_func __attribute__((overloadable))
+    rsgProgramStoreGetDepthFunc(rs_program_store ps);
+
+/**
+ * Get program store depth mask
+ *
+ * @param ps program store to query
+ */
+extern bool __attribute__((overloadable))
+    rsgProgramStoreIsDepthMaskEnabled(rs_program_store ps);
+/**
+ * Get program store red component color mask
+ *
+ * @param ps program store to query
+ */
+extern bool __attribute__((overloadable))
+    rsgProgramStoreIsColorMaskRedEnabled(rs_program_store ps);
+
+/**
+ * Get program store green component color mask
+ *
+ * @param ps program store to query
+ */
+extern bool __attribute__((overloadable))
+    rsgProgramStoreIsColorMaskGreenEnabled(rs_program_store ps);
+
+/**
+ * Get program store blur component color mask
+ *
+ * @param ps program store to query
+ */
+extern bool __attribute__((overloadable))
+    rsgProgramStoreIsColorMaskBlueEnabled(rs_program_store ps);
+
+/**
+ * Get program store alpha component color mask
+ *
+ * @param ps program store to query
+ */
+extern bool __attribute__((overloadable))
+    rsgProgramStoreIsColorMaskAlphaEnabled(rs_program_store ps);
+
+/**
+ * Get program store blend source function
+ *
+ * @param ps program store to query
+ */
+extern rs_blend_src_func __attribute__((overloadable))
+        rsgProgramStoreGetBlendSrcFunc(rs_program_store ps);
+
+/**
+ * Get program store blend destination function
+ *
+ * @param ps program store to query
+ */
+extern rs_blend_dst_func __attribute__((overloadable))
+    rsgProgramStoreGetBlendDstFunc(rs_program_store ps);
+
+/**
+ * Get program store dither state
+ *
+ * @param ps program store to query
+ */
+extern bool __attribute__((overloadable))
+    rsgProgramStoreIsDitherEnabled(rs_program_store ps);
+
+/**
+ * Get program raster point sprite state
+ *
+ * @param pr program raster to query
+ */
+extern bool __attribute__((overloadable))
+    rsgProgramRasterIsPointSpriteEnabled(rs_program_raster pr);
+
+/**
+ * Get program raster cull mode
+ *
+ * @param pr program raster to query
+ */
+extern rs_cull_mode __attribute__((overloadable))
+    rsgProgramRasterGetCullMode(rs_program_raster pr);
+
+#endif // (defined(RS_VERSION) && (RS_VERSION >= 16))
+
+#endif // __RS_PROGRAM_RSH__
+
diff --git a/renderscript/include/rs_sampler.rsh b/renderscript/include/rs_sampler.rsh
new file mode 100644
index 0000000..2ff426c
--- /dev/null
+++ b/renderscript/include/rs_sampler.rsh
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/** @file rs_sampler.rsh
+ *  \brief Sampler routines
+ *
+ *
+ */
+
+#ifndef __RS_SAMPLER_RSH__
+#define __RS_SAMPLER_RSH__
+
+// New API's
+#if (defined(RS_VERSION) && (RS_VERSION >= 16))
+
+/**
+ * Get sampler minification value
+ *
+ * @param s sampler to query
+ * @return minification value
+ */
+extern rs_sampler_value __attribute__((overloadable))
+    rsSamplerGetMinification(rs_sampler s);
+
+/**
+ * Get sampler magnification value
+ *
+ * @param s sampler to query
+ * @return magnification value
+ */
+extern rs_sampler_value __attribute__((overloadable))
+    rsSamplerGetMagnification(rs_sampler s);
+
+/**
+ * Get sampler wrap S value
+ *
+ * @param s sampler to query
+ * @return wrap S value
+ */
+extern rs_sampler_value __attribute__((overloadable))
+    rsSamplerGetWrapS(rs_sampler s);
+
+/**
+ * Get sampler wrap T value
+ *
+ * @param s sampler to query
+ * @return wrap T value
+ */
+extern rs_sampler_value __attribute__((overloadable))
+    rsSamplerGetWrapT(rs_sampler s);
+
+/**
+  Get sampler anisotropy
+ *
+ * @param s sampler to query
+ * @return anisotropy
+ */
+extern float __attribute__((overloadable))
+    rsSamplerGetAnisotropy(rs_sampler s);
+
+#endif // (defined(RS_VERSION) && (RS_VERSION >= 16))
+
+#endif // __RS_SAMPLER_RSH__
+
diff --git a/renderscript/include/rs_types.rsh b/renderscript/include/rs_types.rsh
index e9c3c5e..5c99313 100644
--- a/renderscript/include/rs_types.rsh
+++ b/renderscript/include/rs_types.rsh
@@ -41,7 +41,20 @@
 #ifndef __RS_TYPES_RSH__
 #define __RS_TYPES_RSH__
 
-#define M_PI        3.14159265358979323846264338327950288f   /* pi */
+/* Constants */
+#define M_E         2.718281828459045235360287471352662498f     /* e */
+#define M_LOG2E     1.442695040888963407359924681001892137f     /* log_2 e */
+#define M_LOG10E    0.434294481903251827651128918916605082f     /* log_10 e */
+#define M_LN2       0.693147180559945309417232121458176568f     /* log_e 2 */
+#define M_LN10      2.302585092994045684017991454684364208f     /* log_e 10 */
+#define M_PI        3.141592653589793238462643383279502884f     /* pi */
+#define M_PI_2      1.570796326794896619231321691639751442f     /* pi/2 */
+#define M_PI_4      0.785398163397448309615660845819875721f     /* pi/4 */
+#define M_1_PI      0.318309886183790671537767526745028724f     /* 1/pi */
+#define M_2_PIl     0.636619772367581343075535053490057448f     /* 2/pi */
+#define M_2_SQRTPI  1.128379167095512573896158903121545172f     /* 2/sqrt(pi) */
+#define M_SQRT2     1.414213562373095048801688724209698079f     /* sqrt(2) */
+#define M_SQRT1_2   0.707106781186547524400844362104849039f     /* 1/sqrt(2) */
 
 #include "stdbool.h"
 /**
@@ -138,6 +151,12 @@
  */
 typedef struct { const int* const p; } __attribute__((packed, aligned(4))) rs_mesh;
 /**
+ * \brief Opaque handle to a Renderscript Path object.
+ *
+ * See: android.renderscript.Path
+ */
+typedef struct { const int* const p; } __attribute__((packed, aligned(4))) rs_path;
+/**
  * \brief Opaque handle to a Renderscript ProgramFragment object.
  *
  * See: android.renderscript.ProgramFragment
@@ -364,7 +383,7 @@
 typedef float4 rs_quaternion;
 
 #define RS_PACKED __attribute__((packed, aligned(4)))
-#define NULL ((const void *)0)
+#define NULL ((void *)0)
 
 #if (defined(RS_VERSION) && (RS_VERSION >= 14))
 
@@ -396,4 +415,212 @@
 
 #endif //defined(RS_VERSION) && (RS_VERSION >= 14)
 
-#endif
+// New API's
+#if (defined(RS_VERSION) && (RS_VERSION >= 16))
+
+/**
+ * Describes the way mesh vertex data is interpreted when rendering
+ *
+ **/
+typedef enum {
+    /**
+    * Vertex data will be rendered as a series of points
+    */
+    RS_PRIMITIVE_POINT              = 0,
+    /**
+    * Vertex pairs will be rendered as lines
+    */
+    RS_PRIMITIVE_LINE               = 1,
+    /**
+    * Vertex data will be rendered as a connected line strip
+    */
+    RS_PRIMITIVE_LINE_STRIP         = 2,
+    /**
+    * Vertices will be rendered as individual triangles
+    */
+    RS_PRIMITIVE_TRIANGLE           = 3,
+    /**
+    * Vertices will be rendered as a connected triangle strip
+    * defined by the first three vertices with each additional
+    * triangle defined by a new vertex
+    */
+    RS_PRIMITIVE_TRIANGLE_STRIP     = 4,
+    /**
+    * Vertices will be rendered as a sequence of triangles that all
+    * share first vertex as the origin
+    */
+    RS_PRIMITIVE_TRIANGLE_FAN       = 5,
+
+    /**
+    * Invalid primitive
+    */
+    RS_PRIMITIVE_INVALID            = 100,
+} rs_primitive;
+
+/**
+ * \brief Enumeration for possible element data types
+ *
+ * DataType represents the basic type information for a basic element.  The
+ * naming convention follows.  For numeric types it is FLOAT,
+ * SIGNED, or UNSIGNED followed by the _BITS where BITS is the
+ * size of the data.  BOOLEAN is a true / false (1,0)
+ * represented in an 8 bit container.  The UNSIGNED variants
+ * with multiple bit definitions are for packed graphical data
+ * formats and represent vectors with per vector member sizes
+ * which are treated as a single unit for packing and alignment
+ * purposes.
+ *
+ * MATRIX the three matrix types contain FLOAT_32 elements and are treated
+ * as 32 bits for alignment purposes.
+ *
+ * RS_* objects.  32 bit opaque handles.
+ */
+typedef enum {
+    RS_TYPE_NONE             = 0,
+    RS_TYPE_FLOAT_32         = 2,
+    RS_TYPE_FLOAT_64         = 3,
+    RS_TYPE_SIGNED_8         = 4,
+    RS_TYPE_SIGNED_16        = 5,
+    RS_TYPE_SIGNED_32        = 6,
+    RS_TYPE_SIGNED_64        = 7,
+    RS_TYPE_UNSIGNED_8       = 8,
+    RS_TYPE_UNSIGNED_16      = 9,
+    RS_TYPE_UNSIGNED_32      = 10,
+    RS_TYPE_UNSIGNED_64      = 11,
+
+    RS_TYPE_BOOLEAN          = 12,
+
+    RS_TYPE_UNSIGNED_5_6_5   = 13,
+    RS_TYPE_UNSIGNED_5_5_5_1 = 14,
+    RS_TYPE_UNSIGNED_4_4_4_4 = 15,
+
+    RS_TYPE_MATRIX_4X4       = 16,
+    RS_TYPE_MATRIX_3X3       = 17,
+    RS_TYPE_MATRIX_2X2       = 18,
+
+    RS_TYPE_ELEMENT          = 1000,
+    RS_TYPE_TYPE             = 1001,
+    RS_TYPE_ALLOCATION       = 1002,
+    RS_TYPE_SAMPLER          = 1003,
+    RS_TYPE_SCRIPT           = 1004,
+    RS_TYPE_MESH             = 1005,
+    RS_TYPE_PROGRAM_FRAGMENT = 1006,
+    RS_TYPE_PROGRAM_VERTEX   = 1007,
+    RS_TYPE_PROGRAM_RASTER   = 1008,
+    RS_TYPE_PROGRAM_STORE    = 1009,
+    RS_TYPE_FONT             = 1010,
+
+    RS_TYPE_INVALID          = 10000,
+} rs_data_type;
+
+/**
+ * \brief Enumeration for possible element data kind
+ *
+ * The special interpretation of the data if required.  This is primarly
+ * useful for graphical data.  USER indicates no special interpretation is
+ * expected.  PIXEL is used in conjunction with the standard data types for
+ * representing texture formats.
+ */
+typedef enum {
+    RS_KIND_USER         = 0,
+
+    RS_KIND_PIXEL_L      = 7,
+    RS_KIND_PIXEL_A      = 8,
+    RS_KIND_PIXEL_LA     = 9,
+    RS_KIND_PIXEL_RGB    = 10,
+    RS_KIND_PIXEL_RGBA   = 11,
+    RS_KIND_PIXEL_DEPTH  = 12,
+
+    RS_KIND_INVALID      = 100,
+} rs_data_kind;
+
+typedef enum {
+    /**
+    * Always drawn
+    */
+    RS_DEPTH_FUNC_ALWAYS        = 0,
+    /**
+    * Drawn if the incoming depth value is less than that in the
+    * depth buffer
+    */
+    RS_DEPTH_FUNC_LESS          = 1,
+    /**
+    * Drawn if the incoming depth value is less or equal to that in
+    * the depth buffer
+    */
+    RS_DEPTH_FUNC_LEQUAL        = 2,
+    /**
+    * Drawn if the incoming depth value is greater than that in the
+    * depth buffer
+    */
+    RS_DEPTH_FUNC_GREATER       = 3,
+    /**
+    * Drawn if the incoming depth value is greater or equal to that
+    * in the depth buffer
+    */
+    RS_DEPTH_FUNC_GEQUAL        = 4,
+    /**
+    * Drawn if the incoming depth value is equal to that in the
+    * depth buffer
+    */
+    RS_DEPTH_FUNC_EQUAL         = 5,
+    /**
+    * Drawn if the incoming depth value is not equal to that in the
+    * depth buffer
+    */
+    RS_DEPTH_FUNC_NOTEQUAL      = 6,
+    /**
+    * Invalid depth function
+    */
+    RS_DEPTH_FUNC_INVALID       = 100,
+} rs_depth_func;
+
+typedef enum {
+    RS_BLEND_SRC_ZERO                   = 0,
+    RS_BLEND_SRC_ONE                    = 1,
+    RS_BLEND_SRC_DST_COLOR              = 2,
+    RS_BLEND_SRC_ONE_MINUS_DST_COLOR    = 3,
+    RS_BLEND_SRC_SRC_ALPHA              = 4,
+    RS_BLEND_SRC_ONE_MINUS_SRC_ALPHA    = 5,
+    RS_BLEND_SRC_DST_ALPHA              = 6,
+    RS_BLEND_SRC_ONE_MINUS_DST_ALPHA    = 7,
+    RS_BLEND_SRC_SRC_ALPHA_SATURATE     = 8,
+
+    RS_BLEND_SRC_INVALID                = 100,
+} rs_blend_src_func;
+
+typedef enum {
+    RS_BLEND_DST_ZERO                   = 0,
+    RS_BLEND_DST_ONE                    = 1,
+    RS_BLEND_DST_SRC_COLOR              = 2,
+    RS_BLEND_DST_ONE_MINUS_SRC_COLOR    = 3,
+    RS_BLEND_DST_SRC_ALPHA              = 4,
+    RS_BLEND_DST_ONE_MINUS_SRC_ALPHA    = 5,
+    RS_BLEND_DST_DST_ALPHA              = 6,
+    RS_BLEND_DST_ONE_MINUS_DST_ALPHA    = 7,
+
+    RS_BLEND_DST_INVALID                = 100,
+} rs_blend_dst_func;
+
+typedef enum {
+    RS_CULL_BACK     = 0,
+    RS_CULL_FRONT    = 1,
+    RS_CULL_NONE     = 2,
+
+    RS_CULL_INVALID  = 100,
+} rs_cull_mode;
+
+typedef enum {
+    RS_SAMPLER_NEAREST              = 0,
+    RS_SAMPLER_LINEAR               = 1,
+    RS_SAMPLER_LINEAR_MIP_LINEAR    = 2,
+    RS_SAMPLER_WRAP                 = 3,
+    RS_SAMPLER_CLAMP                = 4,
+    RS_SAMPLER_LINEAR_MIP_NEAREST   = 5,
+
+    RS_SAMPLER_INVALID              = 100,
+} rs_sampler_value;
+
+#endif // (defined(RS_VERSION) && (RS_VERSION >= 16))
+
+#endif // __RS_TYPES_RSH__
diff --git a/tools/darwin/llvm-rs-cc b/tools/darwin/llvm-rs-cc
index a44b92d..35a093e 100755
--- a/tools/darwin/llvm-rs-cc
+++ b/tools/darwin/llvm-rs-cc
Binary files differ
diff --git a/tools/linux/llvm-rs-cc b/tools/linux/llvm-rs-cc
index 6aa7641..c8bef23 100755
--- a/tools/linux/llvm-rs-cc
+++ b/tools/linux/llvm-rs-cc
Binary files differ