Fix a bug that prevented x86-64 from using rep.movsq for
8-byte-aligned data.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@49571 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp
index 1ab2727..66384f9 100644
--- a/lib/Target/X86/X86ISelLowering.cpp
+++ b/lib/Target/X86/X86ISelLowering.cpp
@@ -4621,7 +4621,7 @@
ValReg = X86::EAX;
Val = (Val << 8) | Val;
Val = (Val << 16) | Val;
- if (Subtarget->is64Bit() && ((Align & 0xF) == 0)) { // QWORD aligned
+ if (Subtarget->is64Bit() && ((Align & 0x7) == 0)) { // QWORD aligned
AVT = MVT::i64;
ValReg = X86::RAX;
Val = (Val << 32) | Val;
@@ -4740,7 +4740,7 @@
break;
case 0: // DWORD aligned
AVT = MVT::i32;
- if (Subtarget->is64Bit() && ((Align & 0xF) == 0)) // QWORD aligned
+ if (Subtarget->is64Bit() && ((Align & 0x7) == 0)) // QWORD aligned
AVT = MVT::i64;
break;
default: // Byte aligned
diff --git a/test/CodeGen/X86/byval2.ll b/test/CodeGen/X86/byval2.ll
index d017aa5..f438160 100644
--- a/test/CodeGen/X86/byval2.ll
+++ b/test/CodeGen/X86/byval2.ll
@@ -1,4 +1,4 @@
-; RUN: llvm-as < %s | llc -march=x86-64 | grep rep.movsl | count 2
+; RUN: llvm-as < %s | llc -march=x86-64 | grep rep.movsq | count 2
; RUN: llvm-as < %s | llc -march=x86 | grep rep.movsl | count 2
%struct.s = type { i64, i64, i64 }