Split the main for-each-use loop again, this time for GenerateTruncates,
as it also peeks at which registers are being used by other uses. This
makes LSR less sensitive to use-list order.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@96308 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/lib/Transforms/Scalar/LoopStrengthReduce.cpp b/lib/Transforms/Scalar/LoopStrengthReduce.cpp
index 3e03781..240b298 100644
--- a/lib/Transforms/Scalar/LoopStrengthReduce.cpp
+++ b/lib/Transforms/Scalar/LoopStrengthReduce.cpp
@@ -2427,7 +2427,7 @@
 /// GenerateAllReuseFormulae - Generate formulae for each use.
 void
 LSRInstance::GenerateAllReuseFormulae() {
-  // This is split into two loops so that hasRegsUsedByUsesOtherThan
+  // This is split into multiple loops so that hasRegsUsedByUsesOtherThan
   // queries are more precise.
   for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) {
     LSRUse &LU = Uses[LUIdx];
@@ -2446,6 +2446,9 @@
       GenerateICmpZeroScales(LU, LUIdx, LU.Formulae[i]);
     for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i)
       GenerateScales(LU, LUIdx, LU.Formulae[i]);
+  }
+  for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) {
+    LSRUse &LU = Uses[LUIdx];
     for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i)
       GenerateTruncates(LU, LUIdx, LU.Formulae[i]);
   }
diff --git a/test/CodeGen/X86/lsr-reuse-trunc.ll b/test/CodeGen/X86/lsr-reuse-trunc.ll
new file mode 100644
index 0000000..d1d7144
--- /dev/null
+++ b/test/CodeGen/X86/lsr-reuse-trunc.ll
@@ -0,0 +1,59 @@
+; RUN: llc < %s -march=x86-64 | FileCheck %s
+
+; Full strength reduction wouldn't reduce register pressure, so LSR should
+; stick with indexing here.
+
+; CHECK: movaps        (%rsi,%rax,4), %xmm3
+; CHECK: movaps        %xmm3, (%rdi,%rax,4)
+; CHECK: addq  $4, %rax
+; CHECK: cmpl  %eax, (%rdx)
+; CHECK-NEXT: jg
+
+define void @vvfloorf(float* nocapture %y, float* nocapture %x, i32* nocapture %n) nounwind {
+entry:
+  %0 = load i32* %n, align 4
+  %1 = icmp sgt i32 %0, 0
+  br i1 %1, label %bb, label %return
+
+bb:
+  %indvar = phi i64 [ %indvar.next, %bb ], [ 0, %entry ]
+  %tmp = shl i64 %indvar, 2
+  %scevgep = getelementptr float* %y, i64 %tmp
+  %scevgep9 = bitcast float* %scevgep to <4 x float>*
+  %scevgep10 = getelementptr float* %x, i64 %tmp
+  %scevgep1011 = bitcast float* %scevgep10 to <4 x float>*
+  %2 = load <4 x float>* %scevgep1011, align 16
+  %3 = bitcast <4 x float> %2 to <4 x i32>
+  %4 = and <4 x i32> %3, <i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647>
+  %5 = bitcast <4 x i32> %4 to <4 x float>
+  %6 = and <4 x i32> %3, <i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648>
+  %7 = tail call <4 x float> @llvm.x86.sse.cmp.ps(<4 x float> %5, <4 x float> <float 8.388608e+06, float 8.388608e+06, float 8.388608e+06, float 8.388608e+06>, i8 5) nounwind
+  %tmp.i4 = bitcast <4 x float> %7 to <4 x i32>
+  %8 = xor <4 x i32> %tmp.i4, <i32 -1, i32 -1, i32 -1, i32 -1>
+  %9 = and <4 x i32> %8, <i32 1258291200, i32 1258291200, i32 1258291200, i32 1258291200>
+  %10 = or <4 x i32> %9, %6
+  %11 = bitcast <4 x i32> %10 to <4 x float>
+  %12 = fadd <4 x float> %2, %11
+  %13 = fsub <4 x float> %12, %11
+  %14 = tail call <4 x float> @llvm.x86.sse.cmp.ps(<4 x float> %2, <4 x float> %13, i8 1) nounwind
+  %15 = bitcast <4 x float> %14 to <4 x i32>
+  %16 = tail call <4 x float> @llvm.x86.sse2.cvtdq2ps(<4 x i32> %15) nounwind readnone
+  %17 = fadd <4 x float> %13, %16
+  %tmp.i = bitcast <4 x float> %17 to <4 x i32>
+  %18 = or <4 x i32> %tmp.i, %6
+  %19 = bitcast <4 x i32> %18 to <4 x float>
+  store <4 x float> %19, <4 x float>* %scevgep9, align 16
+  %tmp12 = add i64 %tmp, 4
+  %tmp13 = trunc i64 %tmp12 to i32
+  %20 = load i32* %n, align 4
+  %21 = icmp sgt i32 %20, %tmp13
+  %indvar.next = add i64 %indvar, 1
+  br i1 %21, label %bb, label %return
+
+return:
+  ret void
+}
+
+declare <4 x float> @llvm.x86.sse.cmp.ps(<4 x float>, <4 x float>, i8) nounwind readnone
+
+declare <4 x float> @llvm.x86.sse2.cvtdq2ps(<4 x i32>) nounwind readnone