llvm.org GIT mirror llvm / a69b827
TTI: Improve default costs for addrspacecast For some reason multiple places need to do this, and the variant the loop unroller and inliner use was not handling it. Also, introduce a new wrapper to be slightly more precise, since on AMDGPU some addrspacecasts are free, but not no-ops. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@362436 91177308-0d34-0410-b5e6-96231b3b80d8 Matt Arsenault 3 months ago
7 changed file(s) with 119 addition(s) and 14 deletion(s). Raw diff Collapse all Expand all
412412 if (TLI->isZExtFree(OpTy, Ty))
413413 return TargetTransformInfo::TCC_Free;
414414 return TargetTransformInfo::TCC_Basic;
415
416 case Instruction::AddrSpaceCast:
417 if (TLI->isFreeAddrSpaceCast(OpTy->getPointerAddressSpace(),
418 Ty->getPointerAddressSpace()))
419 return TargetTransformInfo::TCC_Free;
420 return TargetTransformInfo::TCC_Basic;
415421 }
416422
417423 return BaseT::getOperationCost(Opcode, Ty, OpTy);
655661 return 0;
656662
657663 if (Opcode == Instruction::AddrSpaceCast &&
658 TLI->isNoopAddrSpaceCast(Src->getPointerAddressSpace(),
664 TLI->isFreeAddrSpaceCast(Src->getPointerAddressSpace(),
659665 Dst->getPointerAddressSpace()))
660666 return 0;
661667
15901590 }
15911591
15921592 /// Returns true if a cast from SrcAS to DestAS is "cheap", such that e.g. we
1593 /// are happy to sink it into basic blocks.
1594 virtual bool isCheapAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const {
1593 /// are happy to sink it into basic blocks. A cast may be free, but not
1594 /// necessarily a no-op. e.g. a free truncate from a 64-bit to 32-bit pointer.
1595 virtual bool isFreeAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const {
15951596 return isNoopAddrSpaceCast(SrcAS, DestAS);
15961597 }
15971598
11391139 // Sink only "cheap" (or nop) address-space casts. This is a weaker condition
11401140 // than sinking only nop casts, but is helpful on some platforms.
11411141 if (auto *ASC = dyn_cast(CI)) {
1142 if (!TLI.isCheapAddrSpaceCast(ASC->getSrcAddressSpace(),
1143 ASC->getDestAddressSpace()))
1142 if (!TLI.isFreeAddrSpaceCast(ASC->getSrcAddressSpace(),
1143 ASC->getDestAddressSpace()))
11441144 return false;
11451145 }
11461146
12601260 return I && I->getMetadata("amdgpu.noclobber");
12611261 }
12621262
1263 bool SITargetLowering::isCheapAddrSpaceCast(unsigned SrcAS,
1264 unsigned DestAS) const {
1263 bool SITargetLowering::isFreeAddrSpaceCast(unsigned SrcAS,
1264 unsigned DestAS) const {
12651265 // Flat -> private/local is a simple truncate.
12661266 // Flat -> global is no-op
12671267 if (SrcAS == AMDGPUAS::FLAT_ADDRESS)
245245 bool isMemOpUniform(const SDNode *N) const;
246246 bool isMemOpHasNoClobberedMemOperand(const SDNode *N) const;
247247 bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override;
248 bool isCheapAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override;
248 bool isFreeAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override;
249249
250250 TargetLoweringBase::LegalizeTypeAction
251251 getPreferredVectorAction(MVT VT) const override;
0 ; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa -mcpu=kaveri < %s | FileCheck %s
11
2 ; CHECK: 'addrspacecast_global_to_flat'
2 ; CHECK-LABEL: 'addrspacecast_global_to_flat'
33 ; CHECK: estimated cost of 0 for {{.*}} addrspacecast i8 addrspace(1)* %ptr to i8*
44 define i8* @addrspacecast_global_to_flat(i8 addrspace(1)* %ptr) #0 {
55 %cast = addrspacecast i8 addrspace(1)* %ptr to i8*
66 ret i8* %cast
77 }
88
9 ; CHECK: 'addrspacecast_global_to_flat_v2'
9 ; CHECK-LABEL: 'addrspacecast_global_to_flat_v2'
1010 ; CHECK: estimated cost of 0 for {{.*}} addrspacecast <2 x i8 addrspace(1)*> %ptr to <2 x i8*>
1111 define <2 x i8*> @addrspacecast_global_to_flat_v2(<2 x i8 addrspace(1)*> %ptr) #0 {
1212 %cast = addrspacecast <2 x i8 addrspace(1)*> %ptr to <2 x i8*>
1313 ret <2 x i8*> %cast
1414 }
1515
16 ; CHECK: 'addrspacecast_global_to_flat_v32'
16 ; CHECK-LABEL: 'addrspacecast_global_to_flat_v32'
1717 ; CHECK: estimated cost of 0 for {{.*}} addrspacecast <32 x i8 addrspace(1)*> %ptr to <32 x i8*>
1818 define <32 x i8*> @addrspacecast_global_to_flat_v32(<32 x i8 addrspace(1)*> %ptr) #0 {
1919 %cast = addrspacecast <32 x i8 addrspace(1)*> %ptr to <32 x i8*>
2020 ret <32 x i8*> %cast
2121 }
2222
23 ; CHECK: 'addrspacecast_local_to_flat'
23 ; CHECK-LABEL: 'addrspacecast_local_to_flat'
2424 ; CHECK: estimated cost of 1 for {{.*}} addrspacecast i8 addrspace(3)* %ptr to i8*
2525 define i8* @addrspacecast_local_to_flat(i8 addrspace(3)* %ptr) #0 {
2626 %cast = addrspacecast i8 addrspace(3)* %ptr to i8*
2727 ret i8* %cast
2828 }
2929
30 ; CHECK: 'addrspacecast_local_to_flat_v2'
30 ; CHECK-LABEL: 'addrspacecast_local_to_flat_v2'
3131 ; CHECK: estimated cost of 2 for {{.*}} addrspacecast <2 x i8 addrspace(3)*> %ptr to <2 x i8*>
3232 define <2 x i8*> @addrspacecast_local_to_flat_v2(<2 x i8 addrspace(3)*> %ptr) #0 {
3333 %cast = addrspacecast <2 x i8 addrspace(3)*> %ptr to <2 x i8*>
3434 ret <2 x i8*> %cast
3535 }
3636
37 ; CHECK: 'addrspacecast_local_to_flat_v32'
37 ; CHECK-LABEL: 'addrspacecast_local_to_flat_v32'
3838 ; CHECK: estimated cost of 32 for {{.*}} addrspacecast <32 x i8 addrspace(3)*> %ptr to <32 x i8*>
3939 define <32 x i8*> @addrspacecast_local_to_flat_v32(<32 x i8 addrspace(3)*> %ptr) #0 {
4040 %cast = addrspacecast <32 x i8 addrspace(3)*> %ptr to <32 x i8*>
4141 ret <32 x i8*> %cast
4242 }
4343
44 ; CHECK-LABEL: 'addrspacecast_flat_to_local'
45 ; CHECK: estimated cost of 0 for {{.*}} addrspacecast i8* %ptr to i8 addrspace(3)*
46 define i8 addrspace(3)* @addrspacecast_flat_to_local(i8* %ptr) #0 {
47 %cast = addrspacecast i8* %ptr to i8 addrspace(3)*
48 ret i8 addrspace(3)* %cast
49 }
50
51 ; CHECK-LABEL: 'addrspacecast_flat_to_local_v2'
52 ; CHECK: estimated cost of 0 for {{.*}} addrspacecast <2 x i8*> %ptr to <2 x i8 addrspace(3)*>
53 define <2 x i8 addrspace(3)*> @addrspacecast_flat_to_local_v2(<2 x i8*> %ptr) #0 {
54 %cast = addrspacecast <2 x i8*> %ptr to <2 x i8 addrspace(3)*>
55 ret <2 x i8 addrspace(3)*> %cast
56 }
57
58 ; CHECK-LABEL: 'addrspacecast_flat_to_local_v32'
59 ; CHECK: estimated cost of 0 for {{.*}} addrspacecast <32 x i8*> %ptr to <32 x i8 addrspace(3)*>
60 define <32 x i8 addrspace(3)*> @addrspacecast_flat_to_local_v32(<32 x i8*> %ptr) #0 {
61 %cast = addrspacecast <32 x i8*> %ptr to <32 x i8 addrspace(3)*>
62 ret <32 x i8 addrspace(3)*> %cast
63 }
64
4465 attributes #0 = { nounwind readnone }
0 ; RUN: opt -S -mtriple=amdgcn-unknown-amdhsa -mcpu=hawaii -loop-unroll -unroll-threshold=75 -unroll-peel-count=0 -unroll-allow-partial=false -unroll-max-iteration-count-to-analyze=16 < %s | FileCheck %s
1
2 ; CHECK-LABEL: @test_func_addrspacecast_cost_noop(
3 ; CHECK-NOT: br i1
4 define amdgpu_kernel void @test_func_addrspacecast_cost_noop(float addrspace(1)* noalias nocapture %out, float addrspace(1)* noalias nocapture %in) #0 {
5 entry:
6 br label %for.body
7
8 for.body:
9 %indvars.iv = phi i32 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
10 %sum.02 = phi float [ %fmul, %for.body ], [ 0.0, %entry ]
11 %arrayidx.in = getelementptr inbounds float, float addrspace(1)* %in, i32 %indvars.iv
12 %arrayidx.out = getelementptr inbounds float, float addrspace(1)* %out, i32 %indvars.iv
13 %cast.in = addrspacecast float addrspace(1)* %arrayidx.in to float*
14 %cast.out = addrspacecast float addrspace(1)* %arrayidx.out to float*
15 %load = load float, float* %cast.in
16 %fmul = fmul float %load, %sum.02
17 store float %fmul, float* %cast.out
18 %indvars.iv.next = add i32 %indvars.iv, 1
19 %exitcond = icmp eq i32 %indvars.iv.next, 16
20 br i1 %exitcond, label %for.end, label %for.body
21
22 for.end:
23 ret void
24 }
25
26 ; Free, but not a no-op
27 ; CHECK-LABEL: @test_func_addrspacecast_cost_free(
28 ; CHECK-NOT: br i1
29 define amdgpu_kernel void @test_func_addrspacecast_cost_free(float* noalias nocapture %out, float* noalias nocapture %in) #0 {
30 entry:
31 br label %for.body
32
33 for.body:
34 %indvars.iv = phi i32 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
35 %sum.02 = phi float [ %fmul, %for.body ], [ 0.0, %entry ]
36 %arrayidx.in = getelementptr inbounds float, float* %in, i32 %indvars.iv
37 %arrayidx.out = getelementptr inbounds float, float* %out, i32 %indvars.iv
38 %cast.in = addrspacecast float* %arrayidx.in to float addrspace(3)*
39 %cast.out = addrspacecast float* %arrayidx.out to float addrspace(3)*
40 %load = load float, float addrspace(3)* %cast.in
41 %fmul = fmul float %load, %sum.02
42 store float %fmul, float addrspace(3)* %cast.out
43 %indvars.iv.next = add i32 %indvars.iv, 1
44 %exitcond = icmp eq i32 %indvars.iv.next, 16
45 br i1 %exitcond, label %for.end, label %for.body
46
47 for.end:
48 ret void
49 }
50
51 ; CHECK-LABEL: @test_func_addrspacecast_cost_nonfree(
52 ; CHECK: br i1 %exitcond
53 define amdgpu_kernel void @test_func_addrspacecast_cost_nonfree(float addrspace(3)* noalias nocapture %out, float addrspace(3)* noalias nocapture %in) #0 {
54 entry:
55 br label %for.body
56
57 for.body:
58 %indvars.iv = phi i32 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
59 %sum.02 = phi float [ %fmul, %for.body ], [ 0.0, %entry ]
60 %arrayidx.in = getelementptr inbounds float, float addrspace(3)* %in, i32 %indvars.iv
61 %arrayidx.out = getelementptr inbounds float, float addrspace(3)* %out, i32 %indvars.iv
62 %cast.in = addrspacecast float addrspace(3)* %arrayidx.in to float*
63 %cast.out = addrspacecast float addrspace(3)* %arrayidx.out to float*
64 %load = load float, float* %cast.in
65 %fmul = fmul float %load, %sum.02
66 store float %fmul, float* %cast.out
67 %indvars.iv.next = add i32 %indvars.iv, 1
68 %exitcond = icmp eq i32 %indvars.iv.next, 16
69 br i1 %exitcond, label %for.end, label %for.body
70
71 for.end:
72 ret void
73 }
74
75 attributes #0 = { nounwind }
76 attributes #1 = { nounwind readnone speculatable }