llvm.org GIT mirror llvm / f27f35e
[X86] Add test cases showing failure to fold load into vpsrlw when EVEX encoded instructions are used. There's a bad bitcast being used in the isel patterns for the vXi16 shift instructions. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@344562 91177308-0d34-0410-b5e6-96231b3b80d8 Craig Topper 1 year, 1 month ago
3 changed file(s) with 115 addition(s) and 27 deletion(s). Raw diff Collapse all Expand all
530530 declare <16 x i16> @llvm.x86.avx2.psrl.w(<16 x i16>, <8 x i16>) nounwind readnone
531531
532532
533 define <16 x i16> @test_x86_avx2_psrl_w_load(<16 x i16> %a0, <8 x i16>* %p) {
534 ; X86-AVX-LABEL: test_x86_avx2_psrl_w_load:
535 ; X86-AVX: ## %bb.0:
536 ; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
537 ; X86-AVX-NEXT: vpsrlw (%eax), %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xd1,0x00]
538 ; X86-AVX-NEXT: retl ## encoding: [0xc3]
539 ;
540 ; X86-AVX512VL-LABEL: test_x86_avx2_psrl_w_load:
541 ; X86-AVX512VL: ## %bb.0:
542 ; X86-AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
543 ; X86-AVX512VL-NEXT: vmovdqa (%eax), %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0x08]
544 ; X86-AVX512VL-NEXT: vpsrlw %xmm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd1,0xc1]
545 ; X86-AVX512VL-NEXT: retl ## encoding: [0xc3]
546 ;
547 ; X64-AVX-LABEL: test_x86_avx2_psrl_w_load:
548 ; X64-AVX: ## %bb.0:
549 ; X64-AVX-NEXT: vpsrlw (%rdi), %ymm0, %ymm0 ## encoding: [0xc5,0xfd,0xd1,0x07]
550 ; X64-AVX-NEXT: retq ## encoding: [0xc3]
551 ;
552 ; X64-AVX512VL-LABEL: test_x86_avx2_psrl_w_load:
553 ; X64-AVX512VL: ## %bb.0:
554 ; X64-AVX512VL-NEXT: vmovdqa (%rdi), %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0x0f]
555 ; X64-AVX512VL-NEXT: vpsrlw %xmm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd1,0xc1]
556 ; X64-AVX512VL-NEXT: retq ## encoding: [0xc3]
557 %a1 = load <8 x i16>, <8 x i16>* %p
558 %res = call <16 x i16> @llvm.x86.avx2.psrl.w(<16 x i16> %a0, <8 x i16> %a1) ; <<16 x i16>> [#uses=1]
559 ret <16 x i16> %res
560 }
561
562
533563 define <8 x i32> @test_x86_avx2_psrli_d(<8 x i32> %a0) {
534564 ; AVX2-LABEL: test_x86_avx2_psrli_d:
535565 ; AVX2: ## %bb.0:
819849 ; X86-AVX: ## %bb.0:
820850 ; X86-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [0,0,0,0,255,32767,65535,0,0,0,0,0,0,0,0,0]
821851 ; X86-AVX-NEXT: ## encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
822 ; X86-AVX-NEXT: ## fixup A - offset: 4, value: LCPI50_0, kind: FK_Data_4
852 ; X86-AVX-NEXT: ## fixup A - offset: 4, value: LCPI51_0, kind: FK_Data_4
823853 ; X86-AVX-NEXT: retl ## encoding: [0xc3]
824854 ;
825855 ; X86-AVX512VL-LABEL: test_x86_avx2_packusdw_fold:
826856 ; X86-AVX512VL: ## %bb.0:
827 ; X86-AVX512VL-NEXT: vmovaps LCPI50_0, %ymm0 ## EVEX TO VEX Compression ymm0 = [0,0,0,0,255,32767,65535,0,0,0,0,0,0,0,0,0]
857 ; X86-AVX512VL-NEXT: vmovaps LCPI51_0, %ymm0 ## EVEX TO VEX Compression ymm0 = [0,0,0,0,255,32767,65535,0,0,0,0,0,0,0,0,0]
828858 ; X86-AVX512VL-NEXT: ## encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
829 ; X86-AVX512VL-NEXT: ## fixup A - offset: 4, value: LCPI50_0, kind: FK_Data_4
859 ; X86-AVX512VL-NEXT: ## fixup A - offset: 4, value: LCPI51_0, kind: FK_Data_4
830860 ; X86-AVX512VL-NEXT: retl ## encoding: [0xc3]
831861 ;
832862 ; X64-AVX-LABEL: test_x86_avx2_packusdw_fold:
833863 ; X64-AVX: ## %bb.0:
834864 ; X64-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [0,0,0,0,255,32767,65535,0,0,0,0,0,0,0,0,0]
835865 ; X64-AVX-NEXT: ## encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
836 ; X64-AVX-NEXT: ## fixup A - offset: 4, value: LCPI50_0-4, kind: reloc_riprel_4byte
866 ; X64-AVX-NEXT: ## fixup A - offset: 4, value: LCPI51_0-4, kind: reloc_riprel_4byte
837867 ; X64-AVX-NEXT: retq ## encoding: [0xc3]
838868 ;
839869 ; X64-AVX512VL-LABEL: test_x86_avx2_packusdw_fold:
840870 ; X64-AVX512VL: ## %bb.0:
841871 ; X64-AVX512VL-NEXT: vmovaps {{.*}}(%rip), %ymm0 ## EVEX TO VEX Compression ymm0 = [0,0,0,0,255,32767,65535,0,0,0,0,0,0,0,0,0]
842872 ; X64-AVX512VL-NEXT: ## encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
843 ; X64-AVX512VL-NEXT: ## fixup A - offset: 4, value: LCPI50_0-4, kind: reloc_riprel_4byte
873 ; X64-AVX512VL-NEXT: ## fixup A - offset: 4, value: LCPI51_0-4, kind: reloc_riprel_4byte
844874 ; X64-AVX512VL-NEXT: retq ## encoding: [0xc3]
845875 %res = call <16 x i16> @llvm.x86.avx2.packusdw(<8 x i32> zeroinitializer, <8 x i32> )
846876 ret <16 x i16> %res
13471377 ; X86-AVX: ## %bb.0:
13481378 ; X86-AVX-NEXT: vmovdqa {{.*#+}} xmm0 = [2,9,4294967284,23]
13491379 ; X86-AVX-NEXT: ## encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
1350 ; X86-AVX-NEXT: ## fixup A - offset: 4, value: LCPI82_0, kind: FK_Data_4
1351 ; X86-AVX-NEXT: vpsravd LCPI82_1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x46,0x05,A,A,A,A]
1352 ; X86-AVX-NEXT: ## fixup A - offset: 5, value: LCPI82_1, kind: FK_Data_4
1380 ; X86-AVX-NEXT: ## fixup A - offset: 4, value: LCPI83_0, kind: FK_Data_4
1381 ; X86-AVX-NEXT: vpsravd LCPI83_1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x46,0x05,A,A,A,A]
1382 ; X86-AVX-NEXT: ## fixup A - offset: 5, value: LCPI83_1, kind: FK_Data_4
13531383 ; X86-AVX-NEXT: retl ## encoding: [0xc3]
13541384 ;
13551385 ; X86-AVX512VL-LABEL: test_x86_avx2_psrav_d_const:
13561386 ; X86-AVX512VL: ## %bb.0:
1357 ; X86-AVX512VL-NEXT: vmovdqa LCPI82_0, %xmm0 ## EVEX TO VEX Compression xmm0 = [2,9,4294967284,23]
1387 ; X86-AVX512VL-NEXT: vmovdqa LCPI83_0, %xmm0 ## EVEX TO VEX Compression xmm0 = [2,9,4294967284,23]
13581388 ; X86-AVX512VL-NEXT: ## encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
1359 ; X86-AVX512VL-NEXT: ## fixup A - offset: 4, value: LCPI82_0, kind: FK_Data_4
1360 ; X86-AVX512VL-NEXT: vpsravd LCPI82_1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x46,0x05,A,A,A,A]
1361 ; X86-AVX512VL-NEXT: ## fixup A - offset: 5, value: LCPI82_1, kind: FK_Data_4
1389 ; X86-AVX512VL-NEXT: ## fixup A - offset: 4, value: LCPI83_0, kind: FK_Data_4
1390 ; X86-AVX512VL-NEXT: vpsravd LCPI83_1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x46,0x05,A,A,A,A]
1391 ; X86-AVX512VL-NEXT: ## fixup A - offset: 5, value: LCPI83_1, kind: FK_Data_4
13621392 ; X86-AVX512VL-NEXT: retl ## encoding: [0xc3]
13631393 ;
13641394 ; X64-AVX-LABEL: test_x86_avx2_psrav_d_const:
13651395 ; X64-AVX: ## %bb.0:
13661396 ; X64-AVX-NEXT: vmovdqa {{.*#+}} xmm0 = [2,9,4294967284,23]
13671397 ; X64-AVX-NEXT: ## encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
1368 ; X64-AVX-NEXT: ## fixup A - offset: 4, value: LCPI82_0-4, kind: reloc_riprel_4byte
1398 ; X64-AVX-NEXT: ## fixup A - offset: 4, value: LCPI83_0-4, kind: reloc_riprel_4byte
13691399 ; X64-AVX-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x46,0x05,A,A,A,A]
1370 ; X64-AVX-NEXT: ## fixup A - offset: 5, value: LCPI82_1-4, kind: reloc_riprel_4byte
1400 ; X64-AVX-NEXT: ## fixup A - offset: 5, value: LCPI83_1-4, kind: reloc_riprel_4byte
13711401 ; X64-AVX-NEXT: retq ## encoding: [0xc3]
13721402 ;
13731403 ; X64-AVX512VL-LABEL: test_x86_avx2_psrav_d_const:
13741404 ; X64-AVX512VL: ## %bb.0:
13751405 ; X64-AVX512VL-NEXT: vmovdqa {{.*}}(%rip), %xmm0 ## EVEX TO VEX Compression xmm0 = [2,9,4294967284,23]
13761406 ; X64-AVX512VL-NEXT: ## encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
1377 ; X64-AVX512VL-NEXT: ## fixup A - offset: 4, value: LCPI82_0-4, kind: reloc_riprel_4byte
1407 ; X64-AVX512VL-NEXT: ## fixup A - offset: 4, value: LCPI83_0-4, kind: reloc_riprel_4byte
13781408 ; X64-AVX512VL-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x46,0x05,A,A,A,A]
1379 ; X64-AVX512VL-NEXT: ## fixup A - offset: 5, value: LCPI82_1-4, kind: reloc_riprel_4byte
1409 ; X64-AVX512VL-NEXT: ## fixup A - offset: 5, value: LCPI83_1-4, kind: reloc_riprel_4byte
13801410 ; X64-AVX512VL-NEXT: retq ## encoding: [0xc3]
13811411 %res = call <4 x i32> @llvm.x86.avx2.psrav.d(<4 x i32> , <4 x i32> )
13821412 ret <4 x i32> %res
14021432 ; X86-AVX: ## %bb.0:
14031433 ; X86-AVX-NEXT: vmovdqa {{.*#+}} ymm0 = [2,9,4294967284,23,4294967270,37,4294967256,51]
14041434 ; X86-AVX-NEXT: ## encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
1405 ; X86-AVX-NEXT: ## fixup A - offset: 4, value: LCPI84_0, kind: FK_Data_4
1406 ; X86-AVX-NEXT: vpsravd LCPI84_1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x46,0x05,A,A,A,A]
1407 ; X86-AVX-NEXT: ## fixup A - offset: 5, value: LCPI84_1, kind: FK_Data_4
1435 ; X86-AVX-NEXT: ## fixup A - offset: 4, value: LCPI85_0, kind: FK_Data_4
1436 ; X86-AVX-NEXT: vpsravd LCPI85_1, %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x46,0x05,A,A,A,A]
1437 ; X86-AVX-NEXT: ## fixup A - offset: 5, value: LCPI85_1, kind: FK_Data_4
14081438 ; X86-AVX-NEXT: retl ## encoding: [0xc3]
14091439 ;
14101440 ; X86-AVX512VL-LABEL: test_x86_avx2_psrav_d_256_const:
14111441 ; X86-AVX512VL: ## %bb.0:
1412 ; X86-AVX512VL-NEXT: vmovdqa LCPI84_0, %ymm0 ## EVEX TO VEX Compression ymm0 = [2,9,4294967284,23,4294967270,37,4294967256,51]
1442 ; X86-AVX512VL-NEXT: vmovdqa LCPI85_0, %ymm0 ## EVEX TO VEX Compression ymm0 = [2,9,4294967284,23,4294967270,37,4294967256,51]
14131443 ; X86-AVX512VL-NEXT: ## encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
1414 ; X86-AVX512VL-NEXT: ## fixup A - offset: 4, value: LCPI84_0, kind: FK_Data_4
1415 ; X86-AVX512VL-NEXT: vpsravd LCPI84_1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x46,0x05,A,A,A,A]
1416 ; X86-AVX512VL-NEXT: ## fixup A - offset: 5, value: LCPI84_1, kind: FK_Data_4
1444 ; X86-AVX512VL-NEXT: ## fixup A - offset: 4, value: LCPI85_0, kind: FK_Data_4
1445 ; X86-AVX512VL-NEXT: vpsravd LCPI85_1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x46,0x05,A,A,A,A]
1446 ; X86-AVX512VL-NEXT: ## fixup A - offset: 5, value: LCPI85_1, kind: FK_Data_4
14171447 ; X86-AVX512VL-NEXT: retl ## encoding: [0xc3]
14181448 ;
14191449 ; X64-AVX-LABEL: test_x86_avx2_psrav_d_256_const:
14201450 ; X64-AVX: ## %bb.0:
14211451 ; X64-AVX-NEXT: vmovdqa {{.*#+}} ymm0 = [2,9,4294967284,23,4294967270,37,4294967256,51]
14221452 ; X64-AVX-NEXT: ## encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
1423 ; X64-AVX-NEXT: ## fixup A - offset: 4, value: LCPI84_0-4, kind: reloc_riprel_4byte
1453 ; X64-AVX-NEXT: ## fixup A - offset: 4, value: LCPI85_0-4, kind: reloc_riprel_4byte
14241454 ; X64-AVX-NEXT: vpsravd {{.*}}(%rip), %ymm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x46,0x05,A,A,A,A]
1425 ; X64-AVX-NEXT: ## fixup A - offset: 5, value: LCPI84_1-4, kind: reloc_riprel_4byte
1455 ; X64-AVX-NEXT: ## fixup A - offset: 5, value: LCPI85_1-4, kind: reloc_riprel_4byte
14261456 ; X64-AVX-NEXT: retq ## encoding: [0xc3]
14271457 ;
14281458 ; X64-AVX512VL-LABEL: test_x86_avx2_psrav_d_256_const:
14291459 ; X64-AVX512VL: ## %bb.0:
14301460 ; X64-AVX512VL-NEXT: vmovdqa {{.*}}(%rip), %ymm0 ## EVEX TO VEX Compression ymm0 = [2,9,4294967284,23,4294967270,37,4294967256,51]
14311461 ; X64-AVX512VL-NEXT: ## encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
1432 ; X64-AVX512VL-NEXT: ## fixup A - offset: 4, value: LCPI84_0-4, kind: reloc_riprel_4byte
1462 ; X64-AVX512VL-NEXT: ## fixup A - offset: 4, value: LCPI85_0-4, kind: reloc_riprel_4byte
14331463 ; X64-AVX512VL-NEXT: vpsravd {{.*}}(%rip), %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x46,0x05,A,A,A,A]
1434 ; X64-AVX512VL-NEXT: ## fixup A - offset: 5, value: LCPI84_1-4, kind: reloc_riprel_4byte
1464 ; X64-AVX512VL-NEXT: ## fixup A - offset: 5, value: LCPI85_1-4, kind: reloc_riprel_4byte
14351465 ; X64-AVX512VL-NEXT: retq ## encoding: [0xc3]
14361466 %res = call <8 x i32> @llvm.x86.avx2.psrav.d.256(<8 x i32> , <8 x i32> )
14371467 ret <8 x i32> %res
19471947 }
19481948 declare <32 x i16> @llvm.x86.avx512.psrl.w.512(<32 x i16>, <8 x i16>) nounwind readnone
19491949
1950 define <32 x i16> @test_x86_avx512_psrl_w_512_load(<32 x i16> %a0, <8 x i16>* %p) {
1951 ; X86-LABEL: test_x86_avx512_psrl_w_512_load:
1952 ; X86: # %bb.0:
1953 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
1954 ; X86-NEXT: vmovdqa (%eax), %xmm1 # encoding: [0xc5,0xf9,0x6f,0x08]
1955 ; X86-NEXT: vpsrlw %xmm1, %zmm0, %zmm0 # encoding: [0x62,0xf1,0x7d,0x48,0xd1,0xc1]
1956 ; X86-NEXT: retl # encoding: [0xc3]
1957 ;
1958 ; X64-LABEL: test_x86_avx512_psrl_w_512_load:
1959 ; X64: # %bb.0:
1960 ; X64-NEXT: vmovdqa (%rdi), %xmm1 # encoding: [0xc5,0xf9,0x6f,0x0f]
1961 ; X64-NEXT: vpsrlw %xmm1, %zmm0, %zmm0 # encoding: [0x62,0xf1,0x7d,0x48,0xd1,0xc1]
1962 ; X64-NEXT: retq # encoding: [0xc3]
1963 %a1 = load <8 x i16>, <8 x i16>* %p
1964 %res = call <32 x i16> @llvm.x86.avx512.psrl.w.512(<32 x i16> %a0, <8 x i16> %a1) ; <<32 x i16>> [#uses=1]
1965 ret <32 x i16> %res
1966 }
19501967
19511968 define <32 x i16> @test_x86_avx512_psrli_w_512(<32 x i16> %a0) {
19521969 ; CHECK-LABEL: test_x86_avx512_psrli_w_512:
14171417 declare <8 x i16> @llvm.x86.sse2.psrl.w(<8 x i16>, <8 x i16>) nounwind readnone
14181418
14191419
1420 define <8 x i16> @test_x86_sse2_psrl_w_load(<8 x i16> %a0, <8 x i16>* %p) {
1421 ; X86-SSE-LABEL: test_x86_sse2_psrl_w_load:
1422 ; X86-SSE: ## %bb.0:
1423 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
1424 ; X86-SSE-NEXT: psrlw (%eax), %xmm0 ## encoding: [0x66,0x0f,0xd1,0x00]
1425 ; X86-SSE-NEXT: retl ## encoding: [0xc3]
1426 ;
1427 ; X86-AVX1-LABEL: test_x86_sse2_psrl_w_load:
1428 ; X86-AVX1: ## %bb.0:
1429 ; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
1430 ; X86-AVX1-NEXT: vpsrlw (%eax), %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xd1,0x00]
1431 ; X86-AVX1-NEXT: retl ## encoding: [0xc3]
1432 ;
1433 ; X86-AVX512-LABEL: test_x86_sse2_psrl_w_load:
1434 ; X86-AVX512: ## %bb.0:
1435 ; X86-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
1436 ; X86-AVX512-NEXT: vmovdqa (%eax), %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0x08]
1437 ; X86-AVX512-NEXT: vpsrlw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd1,0xc1]
1438 ; X86-AVX512-NEXT: retl ## encoding: [0xc3]
1439 ;
1440 ; X64-SSE-LABEL: test_x86_sse2_psrl_w_load:
1441 ; X64-SSE: ## %bb.0:
1442 ; X64-SSE-NEXT: psrlw (%rdi), %xmm0 ## encoding: [0x66,0x0f,0xd1,0x07]
1443 ; X64-SSE-NEXT: retq ## encoding: [0xc3]
1444 ;
1445 ; X64-AVX1-LABEL: test_x86_sse2_psrl_w_load:
1446 ; X64-AVX1: ## %bb.0:
1447 ; X64-AVX1-NEXT: vpsrlw (%rdi), %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xd1,0x07]
1448 ; X64-AVX1-NEXT: retq ## encoding: [0xc3]
1449 ;
1450 ; X64-AVX512-LABEL: test_x86_sse2_psrl_w_load:
1451 ; X64-AVX512: ## %bb.0:
1452 ; X64-AVX512-NEXT: vmovdqa (%rdi), %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0x0f]
1453 ; X64-AVX512-NEXT: vpsrlw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd1,0xc1]
1454 ; X64-AVX512-NEXT: retq ## encoding: [0xc3]
1455 %a1 = load <8 x i16>, <8 x i16>* %p
1456 %res = call <8 x i16> @llvm.x86.sse2.psrl.w(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1]
1457 ret <8 x i16> %res
1458 }
1459
1460
14201461 define <4 x i32> @test_x86_sse2_psrli_d(<4 x i32> %a0) {
14211462 ; SSE-LABEL: test_x86_sse2_psrli_d:
14221463 ; SSE: ## %bb.0: