llvm.org GIT mirror llvm / ed79607
[PR29121] Don't fold if it would produce atomic vector loads or stores The instcombine code which folds loads and stores into their use types can trip up if the use is a bitcast to a type which we can't directly load or store in the IR. In principle, such types shouldn't exist, but in practice they do today. This is a workaround to avoid a bug while we work towards the long term goal. Differential Revision: https://reviews.llvm.org/D24365 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@288415 91177308-0d34-0410-b5e6-96231b3b80d8 Philip Reames 3 years ago
2 changed file(s) with 50 addition(s) and 16 deletion(s). Raw diff Collapse all Expand all
307307 return visitAllocSite(AI);
308308 }
309309
310 // Are we allowed to form a atomic load or store of this type?
311 static bool isSupportedAtomicType(Type *Ty) {
312 return Ty->isIntegerTy() || Ty->isPointerTy() || Ty->isFloatingPointTy();
313 }
314
310315 /// \brief Helper to combine a load to a new type.
311316 ///
312317 /// This just does the work of combining a load to a new type. It handles
318323 /// point the \c InstCombiner currently is using.
319324 static LoadInst *combineLoadToNewType(InstCombiner &IC, LoadInst &LI, Type *NewTy,
320325 const Twine &Suffix = "") {
326 assert((!LI.isAtomic() || isSupportedAtomicType(NewTy)) &&
327 "can't fold an atomic load to requested type");
328
321329 Value *Ptr = LI.getPointerOperand();
322330 unsigned AS = LI.getPointerAddressSpace();
323331 SmallVector, 8> MD;
399407 ///
400408 /// Returns the newly created store instruction.
401409 static StoreInst *combineStoreToNewValue(InstCombiner &IC, StoreInst &SI, Value *V) {
410 assert((!SI.isAtomic() || isSupportedAtomicType(V->getType())) &&
411 "can't fold an atomic store of requested type");
412
402413 Value *Ptr = SI.getPointerOperand();
403414 unsigned AS = SI.getPointerAddressSpace();
404415 SmallVector, 8> MD;
513524 // as long as those are noops (i.e., the source or dest type have the same
514525 // bitwidth as the target's pointers).
515526 if (LI.hasOneUse())
516 if (auto* CI = dyn_cast(LI.user_back())) {
517 if (CI->isNoopCast(DL)) {
518 LoadInst *NewLoad = combineLoadToNewType(IC, LI, CI->getDestTy());
519 CI->replaceAllUsesWith(NewLoad);
520 IC.eraseInstFromFunction(*CI);
521 return &LI;
522 }
523 }
527 if (auto* CI = dyn_cast(LI.user_back()))
528 if (CI->isNoopCast(DL))
529 if (!LI.isAtomic() || isSupportedAtomicType(CI->getDestTy())) {
530 LoadInst *NewLoad = combineLoadToNewType(IC, LI, CI->getDestTy());
531 CI->replaceAllUsesWith(NewLoad);
532 IC.eraseInstFromFunction(*CI);
533 return &LI;
534 }
524535
525536 // FIXME: We should also canonicalize loads of vectors when their elements are
526537 // cast to other types.
10251036 // Fold away bit casts of the stored value by storing the original type.
10261037 if (auto *BC = dyn_cast(V)) {
10271038 V = BC->getOperand(0);
1028 combineStoreToNewValue(IC, SI, V);
1029 return true;
1030 }
1031
1032 if (Value *U = likeBitCastFromVector(IC, V)) {
1033 combineStoreToNewValue(IC, SI, U);
1034 return true;
1035 }
1039 if (!SI.isAtomic() || isSupportedAtomicType(V->getType())) {
1040 combineStoreToNewValue(IC, SI, V);
1041 return true;
1042 }
1043 }
1044
1045 if (Value *U = likeBitCastFromVector(IC, V))
1046 if (!SI.isAtomic() || isSupportedAtomicType(U->getType())) {
1047 combineStoreToNewValue(IC, SI, U);
1048 return true;
1049 }
10361050
10371051 // FIXME: We should also canonicalize stores of vectors when their elements
10381052 // are cast to other types.
266266 store atomic i8* %l, i8** %p2 seq_cst, align 8
267267 ret void
268268 }
269
270 ;; At the moment, we can't form atomic vectors by folding since these are
271 ;; not representable in the IR. This was pr29121. The right long term
272 ;; solution is to extend the IR to handle this case.
273 define <2 x float> @no_atomic_vector_load(i64* %p) {
274 ; CHECK-LABEL @no_atomic_vector_load
275 ; CHECK: load atomic i64, i64* %p unordered, align 8
276 %load = load atomic i64, i64* %p unordered, align 8
277 %.cast = bitcast i64 %load to <2 x float>
278 ret <2 x float> %.cast
279 }
280
281 define void @no_atomic_vector_store(<2 x float> %p, i8* %p2) {
282 ; CHECK-LABEL: @no_atomic_vector_store
283 ; CHECK: store atomic i64 %1, i64* %2 unordered, align 8
284 %1 = bitcast <2 x float> %p to i64
285 %2 = bitcast i8* %p2 to i64*
286 store atomic i64 %1, i64* %2 unordered, align 8
287 ret void
288 }