llvm.org GIT mirror llvm / 710e719
Merging r214800: ------------------------------------------------------------------------ r214800 | wschmidt | 2014-08-04 18:21:01 -0500 (Mon, 04 Aug 2014) | 13 lines [PPC64LE] Fix wrong IR for vec_sld and vec_vsldoi My original LE implementation of the vsldoi instruction, with its altivec.h interfaces vec_sld and vec_vsldoi, produces incorrect shufflevector operations in the LLVM IR. Correct code is generated because the back end handles the incorrect shufflevector in a consistent manner. This patch and a companion patch for Clang correct this problem by removing the fixup from altivec.h and the corresponding fixup from the PowerPC back end. Several test cases are also modified to reflect the now-correct LLVM IR. ------------------------------------------------------------------------ git-svn-id: https://llvm.org/svn/llvm-project/llvm/branches/release_35@214821 91177308-0d34-0410-b5e6-96231b3b80d8 Bill Schmidt 5 years ago
2 changed file(s) with 14 addition(s) and 32 deletion(s). Raw diff Collapse all Expand all
10061006
10071007 if (DAG.getTarget().getDataLayout()->isLittleEndian()) {
10081008
1009 ShiftAmt += i;
1010
1011 if (!isUnary) {
1012 // Check the rest of the elements to see if they are consecutive.
1013 for (++i; i != 16; ++i)
1014 if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt - i))
1015 return -1;
1016 } else {
1017 // Check the rest of the elements to see if they are consecutive.
1018 for (++i; i != 16; ++i)
1019 if (!isConstantOrUndef(SVOp->getMaskElt(i), (ShiftAmt - i) & 15))
1020 return -1;
1021 }
1022
1023 } else { // Big Endian
1024
1025 ShiftAmt -= i;
1026
1027 if (!isUnary) {
1028 // Check the rest of the elements to see if they are consecutive.
1029 for (++i; i != 16; ++i)
1030 if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i))
1031 return -1;
1032 } else {
1033 // Check the rest of the elements to see if they are consecutive.
1034 for (++i; i != 16; ++i)
1035 if (!isConstantOrUndef(SVOp->getMaskElt(i), (ShiftAmt+i) & 15))
1036 return -1;
1037 }
1038 }
1009 if (!isUnary) {
1010 // Check the rest of the elements to see if they are consecutive.
1011 for (++i; i != 16; ++i)
1012 if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i))
1013 return -1;
1014 } else {
1015 // Check the rest of the elements to see if they are consecutive.
1016 for (++i; i != 16; ++i)
1017 if (!isConstantOrUndef(SVOp->getMaskElt(i), (ShiftAmt+i) & 15))
1018 return -1;
1019 }
1020
10391021 return ShiftAmt;
10401022 }
10411023
188188 ; CHECK: VSLDOI_xy:
189189 %tmp = load <16 x i8>* %A
190190 %tmp2 = load <16 x i8>* %B
191 %tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2, <16 x i32> 9, i32 18, i32 17, i32 16, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4>
191 %tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2, <16 x i32> 2, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27>
192192 ; CHECK: vsldoi
193193 store <16 x i8> %tmp3, <16 x i8>* %A
194194 ret void
198198 entry:
199199 ; CHECK: VSLDOI_xx:
200200 %tmp = load <16 x i8>* %A
201 %tmp2 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp, <16 x i32> 3, i32 2, i32 1, i32 0, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4>
201 %tmp2 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp, <16 x i32> 12, i32 13, i32 14, i32 15, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11>
202202 ; CHECK: vsldoi
203203 store <16 x i8> %tmp2, <16 x i8>* %A
204204 ret void