llvm.org GIT mirror llvm / release_80 test / CodeGen / AMDGPU / GlobalISel / inst-select-ashr.mir
release_80

Tree @release_80 (Download .tar.gz)

inst-select-ashr.mir @release_80raw · history · blame

# RUN: llc -march=amdgcn -mcpu=tahiti -run-pass=instruction-select -verify-machineinstrs -global-isel %s -o - | FileCheck %s -check-prefixes=GCN,SI
# RUN: llc -march=amdgcn -mcpu=fiji -run-pass=instruction-select -verify-machineinstrs -global-isel %s -o - | FileCheck %s -check-prefixes=GCN,VI

--- |
  define void @ashr(i32 addrspace(1)* %global0) {ret void}
...
---

name:            ashr
legalized:       true
regBankSelected: true

# GCN-LABEL: name: ashr
body: |
  bb.0:
    liveins: $sgpr0, $sgpr1, $vgpr0, $vgpr3_vgpr4
    ; GCN: [[SGPR0:%[0-9]+]]:sreg_32 = COPY $sgpr0
    ; GCN: [[SGPR1:%[0-9]+]]:sreg_32 = COPY $sgpr1
    ; GCN: [[VGPR0:%[0-9]+]]:vgpr_32 = COPY $vgpr0
    %0:sgpr(s32) = COPY $sgpr0
    %1:sgpr(s32) = COPY $sgpr1
    %2:vgpr(s32) = COPY $vgpr0
    %3:vgpr(s64) = COPY $vgpr3_vgpr4

    ; GCN: [[C1:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 1
    ; GCN: [[C4096:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 4096
    %4:sgpr(s32) = G_CONSTANT i32 1
    %5:sgpr(s32) = G_CONSTANT i32 4096

    ; ashr ss
    ; GCN: [[SS:%[0-9]+]]:sreg_32 = S_ASHR_I32 [[SGPR0]], [[SGPR1]]
    %6:sgpr(s32) = G_ASHR %0, %1

    ; ashr si
    ; GCN: [[SI:%[0-9]+]]:sreg_32 = S_ASHR_I32 [[SS]], [[C1]]
    %7:sgpr(s32) = G_ASHR %6, %4

    ; ashr is
    ; GCN: [[IS:%[0-9]+]]:sreg_32 = S_ASHR_I32 [[C1]], [[SI]]
    %8:sgpr(s32) = G_ASHR %4, %7

    ; ashr sc
    ; GCN: [[SC:%[0-9]+]]:sreg_32 = S_ASHR_I32 [[IS]], [[C4096]]
    %9:sgpr(s32) = G_ASHR %8, %5

    ; ashr cs
    ; GCN: [[CS:%[0-9]+]]:sreg_32_xm0 = S_ASHR_I32 [[C4096]], [[SC]]
    %10:sgpr(s32) = G_ASHR %5, %9

    ; ashr vs
    ; GCN: [[VS:%[0-9]+]]:vgpr_32 = V_ASHRREV_I32_e32 [[CS]], [[VGPR0]]
    %11:vgpr(s32) = G_ASHR %2, %10

    ; ashr sv
    ; SI: [[SV:%[0-9]+]]:vgpr_32 = V_ASHR_I32_e32 [[CS]], [[VS]]
    ; VI: [[SV:%[0-9]+]]:vgpr_32 = V_ASHRREV_I32_e64 [[VS]], [[CS]]
    %12:vgpr(s32) = G_ASHR %10, %11

    ; ashr vv
    ; SI: [[VV:%[0-9]+]]:vgpr_32 = V_ASHR_I32_e32 [[SV]], [[VGPR0]]
    ; VI: [[VV:%[0-9]+]]:vgpr_32 = V_ASHRREV_I32_e32 [[VGPR0]], [[SV]]
    %13:vgpr(s32) = G_ASHR %12, %2

    ; ashr iv
    ; SI: [[IV:%[0-9]+]]:vgpr_32 = V_ASHR_I32_e32 [[C1]], [[VV]]
    ; VI: [[IV:%[0-9]+]]:vgpr_32 = V_ASHRREV_I32_e64 [[VV]], [[C1]]
    %14:vgpr(s32) = G_ASHR %4, %13

    ; ashr vi
    ; GCN: [[VI:%[0-9]+]]:vgpr_32 = V_ASHRREV_I32_e32 [[C1]], [[IV]]
    %15:vgpr(s32) = G_ASHR %14, %4

    ; ashr cv
    ; SI: [[CV:%[0-9]+]]:vgpr_32 = V_ASHR_I32_e32 [[C4096]], [[VI]]
    ; VI: [[CV:%[0-9]+]]:vgpr_32 = V_ASHRREV_I32_e64 [[VI]], [[C4096]]
    %16:vgpr(s32) = G_ASHR %5, %15

    ; ashr vc
    ; GCN: [[VC:%[-1-9]+]]:vgpr_32 = V_ASHRREV_I32_e32 [[C4096]], [[CV]]
    %17:vgpr(s32) = G_ASHR %16, %5


    G_STORE %17, %3 :: (store 4 into %ir.global0)

...
---