Tree @release_37 (Download .tar.gz)
- ..
- 32-bit-local-address-space.ll
- add-debug.ll
- add.ll
- add_i64.ll
- address-space.ll
- and.ll
- anyext.ll
- array-ptr-calc-i32.ll
- array-ptr-calc-i64.ll
- atomic_cmp_swap_local.ll
- atomic_load_add.ll
- atomic_load_sub.ll
- basic-branch.ll
- basic-loop.ll
- bfe_uint.ll
- bfi_int.ll
- big_alu.ll
- bitcast.ll
- bswap.ll
- build_vector.ll
- call.ll
- call_fs.ll
- cayman-loop-bug.ll
- cf-stack-bug.ll
- cf_end.ll
- cgp-addressing-modes.ll
- coalescer_remat.ll
- codegen-prepare-addrmode-sext.ll
- combine_vloads.ll
- commute-compares.ll
- commute-shifts.ll
- commute_modifiers.ll
- complex-folding.ll
- concat_vectors.ll
- copy-illegal-type.ll
- copy-to-reg.ll
- ctlz_zero_undef.ll
- ctpop.ll
- ctpop64.ll
- cttz_zero_undef.ll
- cvt_f32_ubyte.ll
- cvt_flr_i32_f32.ll
- cvt_rpi_i32_f32.ll
- dagcombiner-bug-illegal-vec4-int-to-fp.ll
- debug.ll
- default-fp-mode.ll
- disconnected-predset-break-bug.ll
- dot4-folding.ll
- ds-negative-offset-addressing-mode-loop.ll
- ds_read2.ll
- ds_read2_offset_order.ll
- ds_read2_superreg.ll
- ds_read2st64.ll
- ds_write2.ll
- ds_write2st64.ll
- elf.ll
- elf.r600.ll
- empty-function.ll
- endcf-loop-header.ll
- extload-private.ll
- extload.ll
- extract_vector_elt_i16.ll
- fabs.f64.ll
- fabs.ll
- fadd.ll
- fadd64.ll
- fceil.ll
- fceil64.ll
- fcmp-cnd.ll
- fcmp-cnde-int-args.ll
- fcmp.ll
- fcmp64.ll
- fconst64.ll
- fcopysign.f32.ll
- fcopysign.f64.ll
- fdiv.f64.ll
- fdiv.ll
- fetch-limits.r600.ll
- fetch-limits.r700+.ll
- ffloor.f64.ll
- ffloor.ll
- flat-address-space.ll
- floor.ll
- fma-combine.ll
- fma.f64.ll
- fma.ll
- fmad.ll
- fmax.ll
- fmax3.f64.ll
- fmax3.ll
- fmax_legacy.f64.ll
- fmax_legacy.ll
- fmaxnum.f64.ll
- fmaxnum.ll
- fmin.ll
- fmin3.ll
- fmin_legacy.f64.ll
- fmin_legacy.ll
- fminnum.f64.ll
- fminnum.ll
- fmul.ll
- fmul64.ll
- fmuladd.ll
- fnearbyint.ll
- fneg-fabs.f64.ll
- fneg-fabs.ll
- fneg.f64.ll
- fneg.ll
- fp-classify.ll
- fp16_to_fp.ll
- fp32_to_fp16.ll
- fp_to_sint.f64.ll
- fp_to_sint.ll
- fp_to_uint.f64.ll
- fp_to_uint.ll
- fpext.ll
- fptrunc.ll
- frem.ll
- fsqrt.ll
- fsub.ll
- fsub64.ll
- ftrunc.f64.ll
- ftrunc.ll
- gep-address-space.ll
- global-directive.ll
- global-extload-i1.ll
- global-extload-i16.ll
- global-extload-i32.ll
- global-extload-i8.ll
- global-zero-initializer.ll
- global_atomics.ll
- gv-const-addrspace-fail.ll
- gv-const-addrspace.ll
- half.ll
- hsa.ll
- i1-copy-implicit-def.ll
- i1-copy-phi.ll
- i8-to-double-to-float.ll
- icmp-select-sete-reverse-args.ll
- icmp64.ll
- imm.ll
- indirect-addressing-si.ll
- indirect-private-64.ll
- infinite-loop-evergreen.ll
- infinite-loop.ll
- inline-asm.ll
- inline-calls.ll
- input-mods.ll
- insert_subreg.ll
- insert_vector_elt.ll
- invariant-load-no-alias-store.ll
- jump-address.ll
- kcache-fold.ll
- kernel-args.ll
- large-alloca.ll
- large-constant-initializer.ll
- lds-initializer.ll
- lds-oqap-crash.ll
- lds-output-queue.ll
- lds-size.ll
- lds-zero-initializer.ll
- legalizedag-bug-expand-setcc.ll
- lit.local.cfg
- literals.ll
- llvm.AMDGPU.abs.ll
- llvm.AMDGPU.barrier.global.ll
- llvm.AMDGPU.barrier.local.ll
- llvm.AMDGPU.bfe.i32.ll
- llvm.AMDGPU.bfe.u32.ll
- llvm.AMDGPU.bfi.ll
- llvm.AMDGPU.bfm.ll
- llvm.AMDGPU.brev.ll
- llvm.AMDGPU.clamp.ll
- llvm.AMDGPU.class.ll
- llvm.AMDGPU.cube.ll
- llvm.AMDGPU.cvt_f32_ubyte.ll
- llvm.AMDGPU.div_fixup.ll
- llvm.AMDGPU.div_fmas.ll
- llvm.AMDGPU.div_scale.ll
- llvm.amdgpu.dp4.ll
- llvm.AMDGPU.flbit.i32.ll
- llvm.AMDGPU.fract.f64.ll
- llvm.AMDGPU.fract.ll
- llvm.AMDGPU.imad24.ll
- llvm.AMDGPU.imax.ll
- llvm.AMDGPU.imin.ll
- llvm.AMDGPU.imul24.ll
- llvm.AMDGPU.kill.ll
- llvm.amdgpu.kilp.ll
- llvm.AMDGPU.ldexp.ll
- llvm.AMDGPU.legacy.rsq.ll
- llvm.amdgpu.lrp.ll
- llvm.AMDGPU.mul.ll
- llvm.AMDGPU.rcp.f64.ll
- llvm.AMDGPU.rcp.ll
- llvm.AMDGPU.rsq.clamped.f64.ll
- llvm.AMDGPU.rsq.clamped.ll
- llvm.AMDGPU.rsq.ll
- llvm.AMDGPU.tex.ll
- llvm.AMDGPU.trig_preop.ll
- llvm.AMDGPU.trunc.ll
- llvm.AMDGPU.umad24.ll
- llvm.AMDGPU.umax.ll
- llvm.AMDGPU.umin.ll
- llvm.AMDGPU.umul24.ll
- llvm.cos.ll
- llvm.dbg.value.ll
- llvm.exp2.ll
- llvm.log2.ll
- llvm.memcpy.ll
- llvm.pow.ll
- llvm.rint.f64.ll
- llvm.rint.ll
- llvm.round.f64.ll
- llvm.round.ll
- llvm.SI.fs.interp.ll
- llvm.SI.gather4.ll
- llvm.SI.getlod.ll
- llvm.SI.image.ll
- llvm.SI.image.sample.ll
- llvm.SI.image.sample.o.ll
- llvm.SI.imageload.ll
- llvm.SI.load.dword.ll
- llvm.SI.resinfo.ll
- llvm.SI.sample-masked.ll
- llvm.SI.sample.ll
- llvm.SI.sampled.ll
- llvm.SI.sendmsg-m0.ll
- llvm.SI.sendmsg.ll
- llvm.SI.tbuffer.store.ll
- llvm.SI.tid.ll
- llvm.sin.ll
- llvm.sqrt.ll
- load-i1.ll
- load-input-fold.ll
- load.ll
- load.vec.ll
- load64.ll
- local-64.ll
- local-atomics.ll
- local-atomics64.ll
- local-memory-two-objects.ll
- local-memory.ll
- loop-address.ll
- loop-idiom.ll
- lshl.ll
- lshr.ll
- m0-spill.ll
- mad-combine.ll
- mad-sub.ll
- mad_int24.ll
- mad_uint24.ll
- madak.ll
- madmk.ll
- max-literals.ll
- max.ll
- max3.ll
- merge-stores.ll
- min.ll
- min3.ll
- missing-store.ll
- mubuf.ll
- mul.ll
- mul_int24.ll
- mul_uint24.ll
- mulhu.ll
- no-initializer-constant-addrspace.ll
- no-shrink-extloads.ll
- operand-folding.ll
- operand-spacing.ll
- or.ll
- packetizer.ll
- parallelandifcollapse.ll
- parallelorifcollapse.ll
- predicate-dp4.ll
- predicates.ll
- private-memory-atomics.ll
- private-memory-broken.ll
- private-memory.ll
- promote-alloca-bitcast-function.ll
- promote-alloca-stored-pointer-value.ll
- pv-packing.ll
- pv.ll
- r600-encoding.ll
- r600-export-fix.ll
- r600-infinite-loop-bug-while-reorganizing-vector.ll
- r600cfg.ll
- README
- reciprocal.ll
- register-count-comments.ll
- reorder-stores.ll
- rotl.i64.ll
- rotl.ll
- rotr.i64.ll
- rotr.ll
- rsq.ll
- rv7x0_count3.ll
- s_movk_i32.ll
- saddo.ll
- salu-to-valu.ll
- scalar_to_vector.ll
- schedule-fs-loop-nested-if.ll
- schedule-fs-loop-nested.ll
- schedule-fs-loop.ll
- schedule-global-loads.ll
- schedule-if-2.ll
- schedule-if.ll
- schedule-kernel-arg-loads.ll
- schedule-vs-if-nested-loop-failure.ll
- schedule-vs-if-nested-loop.ll
- scratch-buffer.ll
- sdiv.ll
- sdivrem24.ll
- sdivrem64.ll
- select-i1.ll
- select-vectors.ll
- select.ll
- select64.ll
- selectcc-cnd.ll
- selectcc-cnde-int.ll
- selectcc-icmp-select-float.ll
- selectcc-opt.ll
- selectcc.ll
- set-dx10.ll
- setcc-equivalent.ll
- setcc-opt.ll
- setcc.ll
- setcc64.ll
- seto.ll
- setuo.ll
- sext-eliminate.ll
- sext-in-reg.ll
- sgpr-control-flow.ll
- sgpr-copy-duplicate-operand.ll
- sgpr-copy.ll
- shared-op-cycle.ll
- shl.ll
- shl_add_constant.ll
- shl_add_ptr.ll
- si-annotate-cf-assertion.ll
- si-annotate-cf.ll
- si-lod-bias.ll
- si-sgpr-spill.ll
- si-spill-cf.ll
- si-triv-disjoint-mem-access.ll
- si-vector-hang.ll
- sign_extend.ll
- simplify-demanded-bits-build-pair.ll
- sint_to_fp.f64.ll
- sint_to_fp.ll
- smrd.ll
- split-scalar-i64-add.ll
- sra.ll
- srem.ll
- srl.ll
- ssubo.ll
- store-barrier.ll
- store-v3i32.ll
- store-v3i64.ll
- store-vector-ptrs.ll
- store.ll
- store.r600.ll
- structurize.ll
- structurize1.ll
- sub.ll
- subreg-coalescer-crash.ll
- subreg-coalescer-undef-use.ll
- subreg-eliminate-dead.ll
- swizzle-export.ll
- tex-clause-antidep.ll
- texture-input-merge.ll
- trunc-cmp-constant.ll
- trunc-store-f64-to-f16.ll
- trunc-store-i1.ll
- trunc-store.ll
- trunc-vector-store-assertion-failure.ll
- trunc.ll
- tti-unroll-prefs.ll
- uaddo.ll
- udiv.ll
- udivrem.ll
- udivrem24.ll
- udivrem64.ll
- uint_to_fp.f64.ll
- uint_to_fp.ll
- unaligned-load-store.ll
- unhandled-loop-condition-assertion.ll
- unroll.ll
- unsupported-cc.ll
- urecip.ll
- urem.ll
- use-sgpr-multiple-times.ll
- usubo.ll
- v1i64-kernel-arg.ll
- v_cndmask.ll
- v_mac.ll
- valu-i1.ll
- vector-alloca.ll
- vertex-fetch-encoding.ll
- vop-shrink.ll
- vselect.ll
- vselect64.ll
- vtx-fetch-branch.ll
- vtx-schedule.ll
- wait.ll
- work-item-intrinsics.ll
- wrong-transalu-pos-fix.ll
- xor.ll
- zero_extend.ll
xor.ll @release_37 — raw · history · blame
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 | ; RUN: llc -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
; FUNC-LABEL: {{^}}xor_v2i32:
; EG: XOR_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; EG: XOR_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; SI: v_xor_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
; SI: v_xor_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
define void @xor_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in0, <2 x i32> addrspace(1)* %in1) {
%a = load <2 x i32>, <2 x i32> addrspace(1) * %in0
%b = load <2 x i32>, <2 x i32> addrspace(1) * %in1
%result = xor <2 x i32> %a, %b
store <2 x i32> %result, <2 x i32> addrspace(1)* %out
ret void
}
; FUNC-LABEL: {{^}}xor_v4i32:
; EG: XOR_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; EG: XOR_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; EG: XOR_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; EG: XOR_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; SI: v_xor_b32_e32 {{v[0-9]+, v[0-9]+, v[0-9]+}}
; SI: v_xor_b32_e32 {{v[0-9]+, v[0-9]+, v[0-9]+}}
; SI: v_xor_b32_e32 {{v[0-9]+, v[0-9]+, v[0-9]+}}
; SI: v_xor_b32_e32 {{v[0-9]+, v[0-9]+, v[0-9]+}}
define void @xor_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in0, <4 x i32> addrspace(1)* %in1) {
%a = load <4 x i32>, <4 x i32> addrspace(1) * %in0
%b = load <4 x i32>, <4 x i32> addrspace(1) * %in1
%result = xor <4 x i32> %a, %b
store <4 x i32> %result, <4 x i32> addrspace(1)* %out
ret void
}
; FUNC-LABEL: {{^}}xor_i1:
; EG: XOR_INT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW], PS}}
; SI-DAG: v_cmp_le_f32_e32 [[CMP0:vcc]], 0, {{v[0-9]+}}
; SI-DAG: v_cmp_le_f32_e64 [[CMP1:s\[[0-9]+:[0-9]+\]]], 1.0, {{v[0-9]+}}
; SI: s_xor_b64 [[XOR:vcc]], [[CMP0]], [[CMP1]]
; SI: v_cndmask_b32_e32 [[RESULT:v[0-9]+]], {{v[0-9]+}}, {{v[0-9]+}}
; SI: buffer_store_dword [[RESULT]]
; SI: s_endpgm
define void @xor_i1(float addrspace(1)* %out, float addrspace(1)* %in0, float addrspace(1)* %in1) {
%a = load float, float addrspace(1) * %in0
%b = load float, float addrspace(1) * %in1
%acmp = fcmp oge float %a, 0.000000e+00
%bcmp = fcmp oge float %b, 1.000000e+00
%xor = xor i1 %acmp, %bcmp
%result = select i1 %xor, float %a, float %b
store float %result, float addrspace(1)* %out
ret void
}
; FUNC-LABEL: {{^}}v_xor_i1:
; SI: buffer_load_ubyte [[B:v[0-9]+]]
; SI: buffer_load_ubyte [[A:v[0-9]+]]
; SI: v_xor_b32_e32 [[XOR:v[0-9]+]], [[A]], [[B]]
; SI: v_and_b32_e32 [[RESULT:v[0-9]+]], 1, [[XOR]]
; SI: buffer_store_byte [[RESULT]]
define void @v_xor_i1(i1 addrspace(1)* %out, i1 addrspace(1)* %in0, i1 addrspace(1)* %in1) {
%a = load i1, i1 addrspace(1)* %in0
%b = load i1, i1 addrspace(1)* %in1
%xor = xor i1 %a, %b
store i1 %xor, i1 addrspace(1)* %out
ret void
}
; FUNC-LABEL: {{^}}vector_xor_i32:
; SI: v_xor_b32_e32
define void @vector_xor_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in0, i32 addrspace(1)* %in1) {
%a = load i32, i32 addrspace(1)* %in0
%b = load i32, i32 addrspace(1)* %in1
%result = xor i32 %a, %b
store i32 %result, i32 addrspace(1)* %out
ret void
}
; FUNC-LABEL: {{^}}scalar_xor_i32:
; SI: s_xor_b32
define void @scalar_xor_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) {
%result = xor i32 %a, %b
store i32 %result, i32 addrspace(1)* %out
ret void
}
; FUNC-LABEL: {{^}}scalar_not_i32:
; SI: s_not_b32
define void @scalar_not_i32(i32 addrspace(1)* %out, i32 %a) {
%result = xor i32 %a, -1
store i32 %result, i32 addrspace(1)* %out
ret void
}
; FUNC-LABEL: {{^}}vector_not_i32:
; SI: v_not_b32
define void @vector_not_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in0, i32 addrspace(1)* %in1) {
%a = load i32, i32 addrspace(1)* %in0
%b = load i32, i32 addrspace(1)* %in1
%result = xor i32 %a, -1
store i32 %result, i32 addrspace(1)* %out
ret void
}
; FUNC-LABEL: {{^}}vector_xor_i64:
; SI: v_xor_b32_e32
; SI: v_xor_b32_e32
; SI: s_endpgm
define void @vector_xor_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in0, i64 addrspace(1)* %in1) {
%a = load i64, i64 addrspace(1)* %in0
%b = load i64, i64 addrspace(1)* %in1
%result = xor i64 %a, %b
store i64 %result, i64 addrspace(1)* %out
ret void
}
; FUNC-LABEL: {{^}}scalar_xor_i64:
; SI: s_xor_b64
; SI: s_endpgm
define void @scalar_xor_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) {
%result = xor i64 %a, %b
store i64 %result, i64 addrspace(1)* %out
ret void
}
; FUNC-LABEL: {{^}}scalar_not_i64:
; SI: s_not_b64
define void @scalar_not_i64(i64 addrspace(1)* %out, i64 %a) {
%result = xor i64 %a, -1
store i64 %result, i64 addrspace(1)* %out
ret void
}
; FUNC-LABEL: {{^}}vector_not_i64:
; SI: v_not_b32
; SI: v_not_b32
define void @vector_not_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in0, i64 addrspace(1)* %in1) {
%a = load i64, i64 addrspace(1)* %in0
%b = load i64, i64 addrspace(1)* %in1
%result = xor i64 %a, -1
store i64 %result, i64 addrspace(1)* %out
ret void
}
; Test that we have a pattern to match xor inside a branch.
; Note that in the future the backend may be smart enough to
; use an SALU instruction for this.
; FUNC-LABEL: {{^}}xor_cf:
; SI: s_xor_b64
define void @xor_cf(i64 addrspace(1)* %out, i64 addrspace(1)* %in, i64 %a, i64 %b) {
entry:
%0 = icmp eq i64 %a, 0
br i1 %0, label %if, label %else
if:
%1 = xor i64 %a, %b
br label %endif
else:
%2 = load i64, i64 addrspace(1)* %in
br label %endif
endif:
%3 = phi i64 [%1, %if], [%2, %else]
store i64 %3, i64 addrspace(1)* %out
ret void
}
|