; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=SSE --check-prefix=SSE2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE --check-prefix=SSE41 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX --check-prefix=AVX2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=AVX --check-prefix=AVX512 --check-prefix=AVX512F ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw | FileCheck %s --check-prefix=AVX --check-prefix=AVX512 --check-prefix=AVX512BW define <16 x i8> @mul_v16i8c(<16 x i8> %i) nounwind { ; SSE2-LABEL: mul_v16i8c: ; SSE2: # BB#0: # %entry ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117] ; SSE2-NEXT: psraw $8, %xmm1 ; SSE2-NEXT: movdqa %xmm0, %xmm2 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] ; SSE2-NEXT: psraw $8, %xmm2 ; SSE2-NEXT: pmullw %xmm1, %xmm2 ; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255] ; SSE2-NEXT: pand %xmm3, %xmm2 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE2-NEXT: psraw $8, %xmm0 ; SSE2-NEXT: pmullw %xmm1, %xmm0 ; SSE2-NEXT: pand %xmm3, %xmm0 ; SSE2-NEXT: packuswb %xmm2, %xmm0 ; SSE2-NEXT: retq ; ; SSE41-LABEL: mul_v16i8c: ; SSE41: # BB#0: # %entry ; SSE41-NEXT: pmovsxbw %xmm0, %xmm1 ; SSE41-NEXT: pmovsxbw {{.*}}(%rip), %xmm2 ; SSE41-NEXT: pmullw %xmm2, %xmm1 ; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255] ; SSE41-NEXT: pand %xmm3, %xmm1 ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] ; SSE41-NEXT: pmovsxbw %xmm0, %xmm0 ; SSE41-NEXT: pmullw %xmm2, %xmm0 ; SSE41-NEXT: pand %xmm3, %xmm0 ; SSE41-NEXT: packuswb %xmm0, %xmm1 ; SSE41-NEXT: movdqa %xmm1, %xmm0 ; SSE41-NEXT: retq ; ; AVX2-LABEL: mul_v16i8c: ; AVX2: # BB#0: # %entry ; AVX2-NEXT: vpmovsxbw %xmm0, %ymm0 ; AVX2-NEXT: vpmovsxbw {{.*}}(%rip), %ymm1 ; AVX2-NEXT: vpmullw %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u> ; AVX2-NEXT: vpshufb %xmm2, %xmm1, %xmm1 ; AVX2-NEXT: vpshufb %xmm2, %xmm0, %xmm0 ; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; AVX512F-LABEL: mul_v16i8c: ; AVX512F: # BB#0: # %entry ; AVX512F-NEXT: vpmovsxbw %xmm0, %ymm0 ; AVX512F-NEXT: vpmovsxbw {{.*}}(%rip), %ymm1 ; AVX512F-NEXT: vpmullw %ymm1, %ymm0, %ymm0 ; AVX512F-NEXT: vpmovsxwd %ymm0, %zmm0 ; AVX512F-NEXT: vpmovdb %zmm0, %xmm0 ; AVX512F-NEXT: retq ; ; AVX512BW-LABEL: mul_v16i8c: ; AVX512BW: # BB#0: # %entry ; AVX512BW-NEXT: vpmovsxbw %xmm0, %ymm0 ; AVX512BW-NEXT: vpmovsxbw {{.*}}(%rip), %ymm1 ; AVX512BW-NEXT: vpmullw %ymm1, %ymm0, %ymm0 ; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0 ; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill> ; AVX512BW-NEXT: retq entry: %A = mul <16 x i8> %i, < i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117 > ret <16 x i8> %A } define <8 x i16> @mul_v8i16c(<8 x i16> %i) nounwind { ; SSE-LABEL: mul_v8i16c: ; SSE: # BB#0: # %entry ; SSE-NEXT: pmullw {{.*}}(%rip), %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: mul_v8i16c: ; AVX: # BB#0: # %entry ; AVX-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0 ; AVX-NEXT: retq entry: %A = mul <8 x i16> %i, < i16 117, i16 117, i16 117, i16 117, i16 117, i16 117, i16 117, i16 117 > ret <8 x i16> %A } define <4 x i32> @mul_v4i32c(<4 x i32> %i) nounwind { ; SSE2-LABEL: mul_v4i32c: ; SSE2: # BB#0: # %entry ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [117,117,117,117] ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] ; SSE2-NEXT: pmuludq %xmm1, %xmm0 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] ; SSE2-NEXT: pmuludq %xmm1, %xmm2 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3] ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; SSE2-NEXT: retq ; ; SSE41-LABEL: mul_v4i32c: ; SSE41: # BB#0: # %entry ; SSE41-NEXT: pmulld {{.*}}(%rip), %xmm0 ; SSE41-NEXT: retq ; ; AVX-LABEL: mul_v4i32c: ; AVX: # BB#0: # %entry ; AVX-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1 ; AVX-NEXT: vpmulld %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq entry: %A = mul <4 x i32> %i, < i32 117, i32 117, i32 117, i32 117 > ret <4 x i32> %A } define <2 x i64> @mul_v2i64c(<2 x i64> %i) nounwind { ; SSE-LABEL: mul_v2i64c: ; SSE: # BB#0: # %entry ; SSE-NEXT: movdqa {{.*#+}} xmm1 = [117,117] ; SSE-NEXT: movdqa %xmm0, %xmm2 ; SSE-NEXT: pmuludq %xmm1, %xmm2 ; SSE-NEXT: psrlq $32, %xmm0 ; SSE-NEXT: pmuludq %xmm1, %xmm0 ; SSE-NEXT: psllq $32, %xmm0 ; SSE-NEXT: paddq %xmm2, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: mul_v2i64c: ; AVX: # BB#0: # %entry ; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [117,117] ; AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm2 ; AVX-NEXT: vpsrlq $32, %xmm0, %xmm0 ; AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vpsllq $32, %xmm0, %xmm0 ; AVX-NEXT: vpaddq %xmm0, %xmm2, %xmm0 ; AVX-NEXT: retq entry: %A = mul <2 x i64> %i, < i64 117, i64 117 > ret <2 x i64> %A } define <16 x i8> @mul_v16i8(<16 x i8> %i, <16 x i8> %j) nounwind { ; SSE2-LABEL: mul_v16i8: ; SSE2: # BB#0: # %entry ; SSE2-NEXT: movdqa %xmm1, %xmm2 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] ; SSE2-NEXT: psraw $8, %xmm2 ; SSE2-NEXT: movdqa %xmm0, %xmm3 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] ; SSE2-NEXT: psraw $8, %xmm3 ; SSE2-NEXT: pmullw %xmm2, %xmm3 ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255] ; SSE2-NEXT: pand %xmm2, %xmm3 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE2-NEXT: psraw $8, %xmm1 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE2-NEXT: psraw $8, %xmm0 ; SSE2-NEXT: pmullw %xmm1, %xmm0 ; SSE2-NEXT: pand %xmm2, %xmm0 ; SSE2-NEXT: packuswb %xmm3, %xmm0 ; SSE2-NEXT: retq ; ; SSE41-LABEL: mul_v16i8: ; SSE41: # BB#0: # %entry ; SSE41-NEXT: pmovsxbw %xmm1, %xmm3 ; SSE41-NEXT: pmovsxbw %xmm0, %xmm2 ; SSE41-NEXT: pmullw %xmm3, %xmm2 ; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255] ; SSE41-NEXT: pand %xmm3, %xmm2 ; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] ; SSE41-NEXT: pmovsxbw %xmm1, %xmm1 ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] ; SSE41-NEXT: pmovsxbw %xmm0, %xmm0 ; SSE41-NEXT: pmullw %xmm1, %xmm0 ; SSE41-NEXT: pand %xmm3, %xmm0 ; SSE41-NEXT: packuswb %xmm0, %xmm2 ; SSE41-NEXT: movdqa %xmm2, %xmm0 ; SSE41-NEXT: retq ; ; AVX2-LABEL: mul_v16i8: ; AVX2: # BB#0: # %entry ; AVX2-NEXT: vpmovsxbw %xmm1, %ymm1 ; AVX2-NEXT: vpmovsxbw %xmm0, %ymm0 ; AVX2-NEXT: vpmullw %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u> ; AVX2-NEXT: vpshufb %xmm2, %xmm1, %xmm1 ; AVX2-NEXT: vpshufb %xmm2, %xmm0, %xmm0 ; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; AVX512F-LABEL: mul_v16i8: ; AVX512F: # BB#0: # %entry ; AVX512F-NEXT: vpmovsxbw %xmm1, %ymm1 ; AVX512F-NEXT: vpmovsxbw %xmm0, %ymm0 ; AVX512F-NEXT: vpmullw %ymm1, %ymm0, %ymm0 ; AVX512F-NEXT: vpmovsxwd %ymm0, %zmm0 ; AVX512F-NEXT: vpmovdb %zmm0, %xmm0 ; AVX512F-NEXT: retq ; ; AVX512BW-LABEL: mul_v16i8: ; AVX512BW: # BB#0: # %entry ; AVX512BW-NEXT: vpmovsxbw %xmm1, %ymm1 ; AVX512BW-NEXT: vpmovsxbw %xmm0, %ymm0 ; AVX512BW-NEXT: vpmullw %ymm1, %ymm0, %ymm0 ; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0 ; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill> ; AVX512BW-NEXT: retq entry: %A = mul <16 x i8> %i, %j ret <16 x i8> %A } define <8 x i16> @mul_v8i16(<8 x i16> %i, <8 x i16> %j) nounwind { ; SSE-LABEL: mul_v8i16: ; SSE: # BB#0: # %entry ; SSE-NEXT: pmullw %xmm1, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: mul_v8i16: ; AVX: # BB#0: # %entry ; AVX-NEXT: vpmullw %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq entry: %A = mul <8 x i16> %i, %j ret <8 x i16> %A } define <4 x i32> @mul_v4i32(<4 x i32> %i, <4 x i32> %j) nounwind { ; SSE2-LABEL: mul_v4i32: ; SSE2: # BB#0: # %entry ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] ; SSE2-NEXT: pmuludq %xmm1, %xmm0 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] ; SSE2-NEXT: pmuludq %xmm2, %xmm1 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; SSE2-NEXT: retq ; ; SSE41-LABEL: mul_v4i32: ; SSE41: # BB#0: # %entry ; SSE41-NEXT: pmulld %xmm1, %xmm0 ; SSE41-NEXT: retq ; ; AVX-LABEL: mul_v4i32: ; AVX: # BB#0: # %entry ; AVX-NEXT: vpmulld %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq entry: %A = mul <4 x i32> %i, %j ret <4 x i32> %A } define <2 x i64> @mul_v2i64(<2 x i64> %i, <2 x i64> %j) nounwind { ; SSE-LABEL: mul_v2i64: ; SSE: # BB#0: # %entry ; SSE-NEXT: movdqa %xmm0, %xmm2 ; SSE-NEXT: pmuludq %xmm1, %xmm2 ; SSE-NEXT: movdqa %xmm1, %xmm3 ; SSE-NEXT: psrlq $32, %xmm3 ; SSE-NEXT: pmuludq %xmm0, %xmm3 ; SSE-NEXT: psllq $32, %xmm3 ; SSE-NEXT: paddq %xmm3, %xmm2 ; SSE-NEXT: psrlq $32, %xmm0 ; SSE-NEXT: pmuludq %xmm1, %xmm0 ; SSE-NEXT: psllq $32, %xmm0 ; SSE-NEXT: paddq %xmm2, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: mul_v2i64: ; AVX: # BB#0: # %entry ; AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm2 ; AVX-NEXT: vpsrlq $32, %xmm1, %xmm3 ; AVX-NEXT: vpmuludq %xmm3, %xmm0, %xmm3 ; AVX-NEXT: vpsllq $32, %xmm3, %xmm3 ; AVX-NEXT: vpaddq %xmm3, %xmm2, %xmm2 ; AVX-NEXT: vpsrlq $32, %xmm0, %xmm0 ; AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vpsllq $32, %xmm0, %xmm0 ; AVX-NEXT: vpaddq %xmm0, %xmm2, %xmm0 ; AVX-NEXT: retq entry: %A = mul <2 x i64> %i, %j ret <2 x i64> %A } declare void @foo() define <4 x i32> @mul_v4i32spill(<4 x i32> %i, <4 x i32> %j) nounwind { ; SSE2-LABEL: mul_v4i32spill: ; SSE2: # BB#0: # %entry ; SSE2-NEXT: subq $40, %rsp ; SSE2-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill ; SSE2-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill ; SSE2-NEXT: callq foo ; SSE2-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] ; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm2 # 16-byte Reload ; SSE2-NEXT: pmuludq %xmm2, %xmm0 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] ; SSE2-NEXT: pmuludq %xmm1, %xmm2 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3] ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; SSE2-NEXT: addq $40, %rsp ; SSE2-NEXT: retq ; ; SSE41-LABEL: mul_v4i32spill: ; SSE41: # BB#0: # %entry ; SSE41-NEXT: subq $40, %rsp ; SSE41-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill ; SSE41-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill ; SSE41-NEXT: callq foo ; SSE41-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload ; SSE41-NEXT: pmulld {{[0-9]+}}(%rsp), %xmm0 # 16-byte Folded Reload ; SSE41-NEXT: addq $40, %rsp ; SSE41-NEXT: retq ; ; AVX-LABEL: mul_v4i32spill: ; AVX: # BB#0: # %entry ; AVX-NEXT: subq $40, %rsp ; AVX-NEXT: vmovaps %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill ; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill ; AVX-NEXT: callq foo ; AVX-NEXT: vmovdqa (%rsp), %xmm0 # 16-byte Reload ; AVX-NEXT: vpmulld {{[0-9]+}}(%rsp), %xmm0, %xmm0 # 16-byte Folded Reload ; AVX-NEXT: addq $40, %rsp ; AVX-NEXT: retq entry: ; Use a call to force spills. call void @foo() %A = mul <4 x i32> %i, %j ret <4 x i32> %A } define <2 x i64> @mul_v2i64spill(<2 x i64> %i, <2 x i64> %j) nounwind { ; SSE-LABEL: mul_v2i64spill: ; SSE: # BB#0: # %entry ; SSE-NEXT: subq $40, %rsp ; SSE-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill ; SSE-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill ; SSE-NEXT: callq foo ; SSE-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload ; SSE-NEXT: movdqa %xmm0, %xmm2 ; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm3 # 16-byte Reload ; SSE-NEXT: pmuludq %xmm3, %xmm2 ; SSE-NEXT: movdqa %xmm3, %xmm1 ; SSE-NEXT: psrlq $32, %xmm1 ; SSE-NEXT: pmuludq %xmm0, %xmm1 ; SSE-NEXT: psllq $32, %xmm1 ; SSE-NEXT: paddq %xmm1, %xmm2 ; SSE-NEXT: psrlq $32, %xmm0 ; SSE-NEXT: pmuludq %xmm3, %xmm0 ; SSE-NEXT: psllq $32, %xmm0 ; SSE-NEXT: paddq %xmm2, %xmm0 ; SSE-NEXT: addq $40, %rsp ; SSE-NEXT: retq ; ; AVX-LABEL: mul_v2i64spill: ; AVX: # BB#0: # %entry ; AVX-NEXT: subq $40, %rsp ; AVX-NEXT: vmovaps %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill ; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill ; AVX-NEXT: callq foo ; AVX-NEXT: vmovdqa {{[0-9]+}}(%rsp), %xmm2 # 16-byte Reload ; AVX-NEXT: vmovdqa (%rsp), %xmm3 # 16-byte Reload ; AVX-NEXT: vpmuludq %xmm2, %xmm3, %xmm0 ; AVX-NEXT: vpsrlq $32, %xmm2, %xmm1 ; AVX-NEXT: vpmuludq %xmm1, %xmm3, %xmm1 ; AVX-NEXT: vpsllq $32, %xmm1, %xmm1 ; AVX-NEXT: vpaddq %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vpsrlq $32, %xmm3, %xmm1 ; AVX-NEXT: vpmuludq %xmm2, %xmm1, %xmm1 ; AVX-NEXT: vpsllq $32, %xmm1, %xmm1 ; AVX-NEXT: vpaddq %xmm1, %xmm0, %xmm0 ; AVX-NEXT: addq $40, %rsp ; AVX-NEXT: retq entry: ; Use a call to force spills. call void @foo() %A = mul <2 x i64> %i, %j ret <2 x i64> %A } define <32 x i8> @mul_v32i8c(<32 x i8> %i) nounwind { ; SSE2-LABEL: mul_v32i8c: ; SSE2: # BB#0: # %entry ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117] ; SSE2-NEXT: psraw $8, %xmm2 ; SSE2-NEXT: movdqa %xmm0, %xmm3 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] ; SSE2-NEXT: psraw $8, %xmm3 ; SSE2-NEXT: pmullw %xmm2, %xmm3 ; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255] ; SSE2-NEXT: pand %xmm4, %xmm3 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE2-NEXT: psraw $8, %xmm0 ; SSE2-NEXT: pmullw %xmm2, %xmm0 ; SSE2-NEXT: pand %xmm4, %xmm0 ; SSE2-NEXT: packuswb %xmm3, %xmm0 ; SSE2-NEXT: movdqa %xmm1, %xmm3 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] ; SSE2-NEXT: psraw $8, %xmm3 ; SSE2-NEXT: pmullw %xmm2, %xmm3 ; SSE2-NEXT: pand %xmm4, %xmm3 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE2-NEXT: psraw $8, %xmm1 ; SSE2-NEXT: pmullw %xmm2, %xmm1 ; SSE2-NEXT: pand %xmm4, %xmm1 ; SSE2-NEXT: packuswb %xmm3, %xmm1 ; SSE2-NEXT: retq ; ; SSE41-LABEL: mul_v32i8c: ; SSE41: # BB#0: # %entry ; SSE41-NEXT: pmovsxbw %xmm0, %xmm2 ; SSE41-NEXT: pmovsxbw {{.*}}(%rip), %xmm4 ; SSE41-NEXT: pmullw %xmm4, %xmm2 ; SSE41-NEXT: movdqa {{.*#+}} xmm5 = [255,255,255,255,255,255,255,255] ; SSE41-NEXT: pand %xmm5, %xmm2 ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] ; SSE41-NEXT: pmovsxbw %xmm0, %xmm0 ; SSE41-NEXT: pmullw %xmm4, %xmm0 ; SSE41-NEXT: pand %xmm5, %xmm0 ; SSE41-NEXT: packuswb %xmm0, %xmm2 ; SSE41-NEXT: pmovsxbw %xmm1, %xmm3 ; SSE41-NEXT: pmullw %xmm4, %xmm3 ; SSE41-NEXT: pand %xmm5, %xmm3 ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1] ; SSE41-NEXT: pmovsxbw %xmm0, %xmm0 ; SSE41-NEXT: pmullw %xmm4, %xmm0 ; SSE41-NEXT: pand %xmm5, %xmm0 ; SSE41-NEXT: packuswb %xmm0, %xmm3 ; SSE41-NEXT: movdqa %xmm2, %xmm0 ; SSE41-NEXT: movdqa %xmm3, %xmm1 ; SSE41-NEXT: retq ; ; AVX2-LABEL: mul_v32i8c: ; AVX2: # BB#0: # %entry ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 ; AVX2-NEXT: vpmovsxbw %xmm1, %ymm1 ; AVX2-NEXT: vpmovsxbw {{.*}}(%rip), %ymm2 ; AVX2-NEXT: vpmullw %ymm2, %ymm1, %ymm1 ; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm3 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm4 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u> ; AVX2-NEXT: vpshufb %xmm4, %xmm3, %xmm3 ; AVX2-NEXT: vpshufb %xmm4, %xmm1, %xmm1 ; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0] ; AVX2-NEXT: vpmovsxbw %xmm0, %ymm0 ; AVX2-NEXT: vpmullw %ymm2, %ymm0, %ymm0 ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2 ; AVX2-NEXT: vpshufb %xmm4, %xmm2, %xmm2 ; AVX2-NEXT: vpshufb %xmm4, %xmm0, %xmm0 ; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0] ; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 ; AVX2-NEXT: retq ; ; AVX512F-LABEL: mul_v32i8c: ; AVX512F: # BB#0: # %entry ; AVX512F-NEXT: vpmovsxbw %xmm0, %ymm1 ; AVX512F-NEXT: vpmovsxbw {{.*}}(%rip), %ymm2 ; AVX512F-NEXT: vpmullw %ymm2, %ymm1, %ymm1 ; AVX512F-NEXT: vpmovsxwd %ymm1, %zmm1 ; AVX512F-NEXT: vpmovdb %zmm1, %xmm1 ; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm0 ; AVX512F-NEXT: vpmovsxbw %xmm0, %ymm0 ; AVX512F-NEXT: vpmullw %ymm2, %ymm0, %ymm0 ; AVX512F-NEXT: vpmovsxwd %ymm0, %zmm0 ; AVX512F-NEXT: vpmovdb %zmm0, %xmm0 ; AVX512F-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; AVX512F-NEXT: retq ; ; AVX512BW-LABEL: mul_v32i8c: ; AVX512BW: # BB#0: # %entry ; AVX512BW-NEXT: vmovaps {{.*#+}} ymm1 = [117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117] ; AVX512BW-NEXT: vpmovsxbw %ymm1, %zmm1 ; AVX512BW-NEXT: vpmovsxbw %ymm0, %zmm0 ; AVX512BW-NEXT: vpmullw %zmm1, %zmm0, %zmm0 ; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0 ; AVX512BW-NEXT: retq entry: %A = mul <32 x i8> %i, < i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117 > ret <32 x i8> %A } define <16 x i16> @mul_v16i16c(<16 x i16> %i) nounwind { ; SSE-LABEL: mul_v16i16c: ; SSE: # BB#0: # %entry ; SSE-NEXT: movdqa {{.*#+}} xmm2 = [117,117,117,117,117,117,117,117] ; SSE-NEXT: pmullw %xmm2, %xmm0 ; SSE-NEXT: pmullw %xmm2, %xmm1 ; SSE-NEXT: retq ; ; AVX-LABEL: mul_v16i16c: ; AVX: # BB#0: # %entry ; AVX-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0 ; AVX-NEXT: retq entry: %A = mul <16 x i16> %i, < i16 117, i16 117, i16 117, i16 117, i16 117, i16 117, i16 117, i16 117, i16 117, i16 117, i16 117, i16 117, i16 117, i16 117, i16 117, i16 117 > ret <16 x i16> %A } define <8 x i32> @mul_v8i32c(<8 x i32> %i) nounwind { ; SSE2-LABEL: mul_v8i32c: ; SSE2: # BB#0: # %entry ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [117,117,117,117] ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] ; SSE2-NEXT: pmuludq %xmm2, %xmm0 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] ; SSE2-NEXT: pmuludq %xmm2, %xmm3 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3] ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1] ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,3,3] ; SSE2-NEXT: pmuludq %xmm2, %xmm1 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] ; SSE2-NEXT: pmuludq %xmm2, %xmm3 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[0,2,2,3] ; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] ; SSE2-NEXT: retq ; ; SSE41-LABEL: mul_v8i32c: ; SSE41: # BB#0: # %entry ; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [117,117,117,117] ; SSE41-NEXT: pmulld %xmm2, %xmm0 ; SSE41-NEXT: pmulld %xmm2, %xmm1 ; SSE41-NEXT: retq ; ; AVX-LABEL: mul_v8i32c: ; AVX: # BB#0: # %entry ; AVX-NEXT: vpbroadcastd {{.*}}(%rip), %ymm1 ; AVX-NEXT: vpmulld %ymm1, %ymm0, %ymm0 ; AVX-NEXT: retq entry: %A = mul <8 x i32> %i, < i32 117, i32 117, i32 117, i32 117, i32 117, i32 117, i32 117, i32 117 > ret <8 x i32> %A } define <4 x i64> @mul_v4i64c(<4 x i64> %i) nounwind { ; SSE-LABEL: mul_v4i64c: ; SSE: # BB#0: # %entry ; SSE-NEXT: movdqa {{.*#+}} xmm2 = [117,117] ; SSE-NEXT: movdqa %xmm0, %xmm3 ; SSE-NEXT: pmuludq %xmm2, %xmm3 ; SSE-NEXT: psrlq $32, %xmm0 ; SSE-NEXT: pmuludq %xmm2, %xmm0 ; SSE-NEXT: psllq $32, %xmm0 ; SSE-NEXT: paddq %xmm3, %xmm0 ; SSE-NEXT: movdqa %xmm1, %xmm3 ; SSE-NEXT: pmuludq %xmm2, %xmm3 ; SSE-NEXT: psrlq $32, %xmm1 ; SSE-NEXT: pmuludq %xmm2, %xmm1 ; SSE-NEXT: psllq $32, %xmm1 ; SSE-NEXT: paddq %xmm3, %xmm1 ; SSE-NEXT: retq ; ; AVX-LABEL: mul_v4i64c: ; AVX: # BB#0: # %entry ; AVX-NEXT: vpbroadcastq {{.*}}(%rip), %ymm1 ; AVX-NEXT: vpmuludq %ymm1, %ymm0, %ymm2 ; AVX-NEXT: vpsrlq $32, %ymm0, %ymm0 ; AVX-NEXT: vpmuludq %ymm1, %ymm0, %ymm0 ; AVX-NEXT: vpsllq $32, %ymm0, %ymm0 ; AVX-NEXT: vpaddq %ymm0, %ymm2, %ymm0 ; AVX-NEXT: retq entry: %A = mul <4 x i64> %i, < i64 117, i64 117, i64 117, i64 117 > ret <4 x i64> %A } define <32 x i8> @mul_v32i8(<32 x i8> %i, <32 x i8> %j) nounwind { ; SSE2-LABEL: mul_v32i8: ; SSE2: # BB#0: # %entry ; SSE2-NEXT: movdqa %xmm2, %xmm4 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] ; SSE2-NEXT: psraw $8, %xmm4 ; SSE2-NEXT: movdqa %xmm0, %xmm5 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] ; SSE2-NEXT: psraw $8, %xmm5 ; SSE2-NEXT: pmullw %xmm4, %xmm5 ; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255] ; SSE2-NEXT: pand %xmm4, %xmm5 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE2-NEXT: psraw $8, %xmm2 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE2-NEXT: psraw $8, %xmm0 ; SSE2-NEXT: pmullw %xmm2, %xmm0 ; SSE2-NEXT: pand %xmm4, %xmm0 ; SSE2-NEXT: packuswb %xmm5, %xmm0 ; SSE2-NEXT: movdqa %xmm3, %xmm2 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] ; SSE2-NEXT: psraw $8, %xmm2 ; SSE2-NEXT: movdqa %xmm1, %xmm5 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] ; SSE2-NEXT: psraw $8, %xmm5 ; SSE2-NEXT: pmullw %xmm2, %xmm5 ; SSE2-NEXT: pand %xmm4, %xmm5 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE2-NEXT: psraw $8, %xmm3 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE2-NEXT: psraw $8, %xmm1 ; SSE2-NEXT: pmullw %xmm3, %xmm1 ; SSE2-NEXT: pand %xmm4, %xmm1 ; SSE2-NEXT: packuswb %xmm5, %xmm1 ; SSE2-NEXT: retq ; ; SSE41-LABEL: mul_v32i8: ; SSE41: # BB#0: # %entry ; SSE41-NEXT: pmovsxbw %xmm2, %xmm5 ; SSE41-NEXT: pmovsxbw %xmm0, %xmm4 ; SSE41-NEXT: pmullw %xmm5, %xmm4 ; SSE41-NEXT: movdqa {{.*#+}} xmm5 = [255,255,255,255,255,255,255,255] ; SSE41-NEXT: pand %xmm5, %xmm4 ; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1] ; SSE41-NEXT: pmovsxbw %xmm2, %xmm2 ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] ; SSE41-NEXT: pmovsxbw %xmm0, %xmm0 ; SSE41-NEXT: pmullw %xmm2, %xmm0 ; SSE41-NEXT: pand %xmm5, %xmm0 ; SSE41-NEXT: packuswb %xmm0, %xmm4 ; SSE41-NEXT: pmovsxbw %xmm3, %xmm0 ; SSE41-NEXT: pmovsxbw %xmm1, %xmm2 ; SSE41-NEXT: pmullw %xmm0, %xmm2 ; SSE41-NEXT: pand %xmm5, %xmm2 ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm3[2,3,0,1] ; SSE41-NEXT: pmovsxbw %xmm0, %xmm0 ; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] ; SSE41-NEXT: pmovsxbw %xmm1, %xmm1 ; SSE41-NEXT: pmullw %xmm0, %xmm1 ; SSE41-NEXT: pand %xmm5, %xmm1 ; SSE41-NEXT: packuswb %xmm1, %xmm2 ; SSE41-NEXT: movdqa %xmm4, %xmm0 ; SSE41-NEXT: movdqa %xmm2, %xmm1 ; SSE41-NEXT: retq ; ; AVX2-LABEL: mul_v32i8: ; AVX2: # BB#0: # %entry ; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2 ; AVX2-NEXT: vpmovsxbw %xmm2, %ymm2 ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm3 ; AVX2-NEXT: vpmovsxbw %xmm3, %ymm3 ; AVX2-NEXT: vpmullw %ymm2, %ymm3, %ymm2 ; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm3 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm4 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u> ; AVX2-NEXT: vpshufb %xmm4, %xmm3, %xmm3 ; AVX2-NEXT: vpshufb %xmm4, %xmm2, %xmm2 ; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0] ; AVX2-NEXT: vpmovsxbw %xmm1, %ymm1 ; AVX2-NEXT: vpmovsxbw %xmm0, %ymm0 ; AVX2-NEXT: vpmullw %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 ; AVX2-NEXT: vpshufb %xmm4, %xmm1, %xmm1 ; AVX2-NEXT: vpshufb %xmm4, %xmm0, %xmm0 ; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0 ; AVX2-NEXT: retq ; ; AVX512F-LABEL: mul_v32i8: ; AVX512F: # BB#0: # %entry ; AVX512F-NEXT: vpmovsxbw %xmm1, %ymm2 ; AVX512F-NEXT: vpmovsxbw %xmm0, %ymm3 ; AVX512F-NEXT: vpmullw %ymm2, %ymm3, %ymm2 ; AVX512F-NEXT: vpmovsxwd %ymm2, %zmm2 ; AVX512F-NEXT: vpmovdb %zmm2, %xmm2 ; AVX512F-NEXT: vextracti128 $1, %ymm1, %xmm1 ; AVX512F-NEXT: vpmovsxbw %xmm1, %ymm1 ; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm0 ; AVX512F-NEXT: vpmovsxbw %xmm0, %ymm0 ; AVX512F-NEXT: vpmullw %ymm1, %ymm0, %ymm0 ; AVX512F-NEXT: vpmovsxwd %ymm0, %zmm0 ; AVX512F-NEXT: vpmovdb %zmm0, %xmm0 ; AVX512F-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0 ; AVX512F-NEXT: retq ; ; AVX512BW-LABEL: mul_v32i8: ; AVX512BW: # BB#0: # %entry ; AVX512BW-NEXT: vpmovsxbw %ymm1, %zmm1 ; AVX512BW-NEXT: vpmovsxbw %ymm0, %zmm0 ; AVX512BW-NEXT: vpmullw %zmm1, %zmm0, %zmm0 ; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0 ; AVX512BW-NEXT: retq entry: %A = mul <32 x i8> %i, %j ret <32 x i8> %A } define <16 x i16> @mul_v16i16(<16 x i16> %i, <16 x i16> %j) nounwind { ; SSE-LABEL: mul_v16i16: ; SSE: # BB#0: # %entry ; SSE-NEXT: pmullw %xmm2, %xmm0 ; SSE-NEXT: pmullw %xmm3, %xmm1 ; SSE-NEXT: retq ; ; AVX-LABEL: mul_v16i16: ; AVX: # BB#0: # %entry ; AVX-NEXT: vpmullw %ymm1, %ymm0, %ymm0 ; AVX-NEXT: retq entry: %A = mul <16 x i16> %i, %j ret <16 x i16> %A } define <8 x i32> @mul_v8i32(<8 x i32> %i, <8 x i32> %j) nounwind { ; SSE2-LABEL: mul_v8i32: ; SSE2: # BB#0: # %entry ; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3] ; SSE2-NEXT: pmuludq %xmm2, %xmm0 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] ; SSE2-NEXT: pmuludq %xmm4, %xmm2 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3] ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3] ; SSE2-NEXT: pmuludq %xmm3, %xmm1 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3] ; SSE2-NEXT: pmuludq %xmm2, %xmm3 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[0,2,2,3] ; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] ; SSE2-NEXT: retq ; ; SSE41-LABEL: mul_v8i32: ; SSE41: # BB#0: # %entry ; SSE41-NEXT: pmulld %xmm2, %xmm0 ; SSE41-NEXT: pmulld %xmm3, %xmm1 ; SSE41-NEXT: retq ; ; AVX-LABEL: mul_v8i32: ; AVX: # BB#0: # %entry ; AVX-NEXT: vpmulld %ymm1, %ymm0, %ymm0 ; AVX-NEXT: retq entry: %A = mul <8 x i32> %i, %j ret <8 x i32> %A } define <4 x i64> @mul_v4i64(<4 x i64> %i, <4 x i64> %j) nounwind { ; SSE-LABEL: mul_v4i64: ; SSE: # BB#0: # %entry ; SSE-NEXT: movdqa %xmm0, %xmm4 ; SSE-NEXT: pmuludq %xmm2, %xmm4 ; SSE-NEXT: movdqa %xmm2, %xmm5 ; SSE-NEXT: psrlq $32, %xmm5 ; SSE-NEXT: pmuludq %xmm0, %xmm5 ; SSE-NEXT: psllq $32, %xmm5 ; SSE-NEXT: paddq %xmm5, %xmm4 ; SSE-NEXT: psrlq $32, %xmm0 ; SSE-NEXT: pmuludq %xmm2, %xmm0 ; SSE-NEXT: psllq $32, %xmm0 ; SSE-NEXT: paddq %xmm4, %xmm0 ; SSE-NEXT: movdqa %xmm1, %xmm2 ; SSE-NEXT: pmuludq %xmm3, %xmm2 ; SSE-NEXT: movdqa %xmm3, %xmm4 ; SSE-NEXT: psrlq $32, %xmm4 ; SSE-NEXT: pmuludq %xmm1, %xmm4 ; SSE-NEXT: psllq $32, %xmm4 ; SSE-NEXT: paddq %xmm4, %xmm2 ; SSE-NEXT: psrlq $32, %xmm1 ; SSE-NEXT: pmuludq %xmm3, %xmm1 ; SSE-NEXT: psllq $32, %xmm1 ; SSE-NEXT: paddq %xmm2, %xmm1 ; SSE-NEXT: retq ; ; AVX-LABEL: mul_v4i64: ; AVX: # BB#0: # %entry ; AVX-NEXT: vpmuludq %ymm1, %ymm0, %ymm2 ; AVX-NEXT: vpsrlq $32, %ymm1, %ymm3 ; AVX-NEXT: vpmuludq %ymm3, %ymm0, %ymm3 ; AVX-NEXT: vpsllq $32, %ymm3, %ymm3 ; AVX-NEXT: vpaddq %ymm3, %ymm2, %ymm2 ; AVX-NEXT: vpsrlq $32, %ymm0, %ymm0 ; AVX-NEXT: vpmuludq %ymm1, %ymm0, %ymm0 ; AVX-NEXT: vpsllq $32, %ymm0, %ymm0 ; AVX-NEXT: vpaddq %ymm0, %ymm2, %ymm0 ; AVX-NEXT: retq entry: %A = mul <4 x i64> %i, %j ret <4 x i64> %A } define <64 x i8> @mul_v64i8c(<64 x i8> %i) nounwind { ; SSE2-LABEL: mul_v64i8c: ; SSE2: # BB#0: # %entry ; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117] ; SSE2-NEXT: psraw $8, %xmm4 ; SSE2-NEXT: movdqa %xmm0, %xmm6 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] ; SSE2-NEXT: psraw $8, %xmm6 ; SSE2-NEXT: pmullw %xmm4, %xmm6 ; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [255,255,255,255,255,255,255,255] ; SSE2-NEXT: pand %xmm5, %xmm6 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE2-NEXT: psraw $8, %xmm0 ; SSE2-NEXT: pmullw %xmm4, %xmm0 ; SSE2-NEXT: pand %xmm5, %xmm0 ; SSE2-NEXT: packuswb %xmm6, %xmm0 ; SSE2-NEXT: movdqa %xmm1, %xmm6 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] ; SSE2-NEXT: psraw $8, %xmm6 ; SSE2-NEXT: pmullw %xmm4, %xmm6 ; SSE2-NEXT: pand %xmm5, %xmm6 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE2-NEXT: psraw $8, %xmm1 ; SSE2-NEXT: pmullw %xmm4, %xmm1 ; SSE2-NEXT: pand %xmm5, %xmm1 ; SSE2-NEXT: packuswb %xmm6, %xmm1 ; SSE2-NEXT: movdqa %xmm2, %xmm6 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] ; SSE2-NEXT: psraw $8, %xmm6 ; SSE2-NEXT: pmullw %xmm4, %xmm6 ; SSE2-NEXT: pand %xmm5, %xmm6 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE2-NEXT: psraw $8, %xmm2 ; SSE2-NEXT: pmullw %xmm4, %xmm2 ; SSE2-NEXT: pand %xmm5, %xmm2 ; SSE2-NEXT: packuswb %xmm6, %xmm2 ; SSE2-NEXT: movdqa %xmm3, %xmm6 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] ; SSE2-NEXT: psraw $8, %xmm6 ; SSE2-NEXT: pmullw %xmm4, %xmm6 ; SSE2-NEXT: pand %xmm5, %xmm6 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE2-NEXT: psraw $8, %xmm3 ; SSE2-NEXT: pmullw %xmm4, %xmm3 ; SSE2-NEXT: pand %xmm5, %xmm3 ; SSE2-NEXT: packuswb %xmm6, %xmm3 ; SSE2-NEXT: retq ; ; SSE41-LABEL: mul_v64i8c: ; SSE41: # BB#0: # %entry ; SSE41-NEXT: movdqa %xmm1, %xmm4 ; SSE41-NEXT: movdqa %xmm0, %xmm1 ; SSE41-NEXT: pmovsxbw %xmm1, %xmm0 ; SSE41-NEXT: pmovsxbw {{.*}}(%rip), %xmm6 ; SSE41-NEXT: pmullw %xmm6, %xmm0 ; SSE41-NEXT: movdqa {{.*#+}} xmm7 = [255,255,255,255,255,255,255,255] ; SSE41-NEXT: pand %xmm7, %xmm0 ; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] ; SSE41-NEXT: pmovsxbw %xmm1, %xmm1 ; SSE41-NEXT: pmullw %xmm6, %xmm1 ; SSE41-NEXT: pand %xmm7, %xmm1 ; SSE41-NEXT: packuswb %xmm1, %xmm0 ; SSE41-NEXT: pmovsxbw %xmm4, %xmm1 ; SSE41-NEXT: pmullw %xmm6, %xmm1 ; SSE41-NEXT: pand %xmm7, %xmm1 ; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm4[2,3,0,1] ; SSE41-NEXT: pmovsxbw %xmm4, %xmm4 ; SSE41-NEXT: pmullw %xmm6, %xmm4 ; SSE41-NEXT: pand %xmm7, %xmm4 ; SSE41-NEXT: packuswb %xmm4, %xmm1 ; SSE41-NEXT: pmovsxbw %xmm2, %xmm4 ; SSE41-NEXT: pmullw %xmm6, %xmm4 ; SSE41-NEXT: pand %xmm7, %xmm4 ; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1] ; SSE41-NEXT: pmovsxbw %xmm2, %xmm2 ; SSE41-NEXT: pmullw %xmm6, %xmm2 ; SSE41-NEXT: pand %xmm7, %xmm2 ; SSE41-NEXT: packuswb %xmm2, %xmm4 ; SSE41-NEXT: pmovsxbw %xmm3, %xmm5 ; SSE41-NEXT: pmullw %xmm6, %xmm5 ; SSE41-NEXT: pand %xmm7, %xmm5 ; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm3[2,3,0,1] ; SSE41-NEXT: pmovsxbw %xmm2, %xmm2 ; SSE41-NEXT: pmullw %xmm6, %xmm2 ; SSE41-NEXT: pand %xmm7, %xmm2 ; SSE41-NEXT: packuswb %xmm2, %xmm5 ; SSE41-NEXT: movdqa %xmm4, %xmm2 ; SSE41-NEXT: movdqa %xmm5, %xmm3 ; SSE41-NEXT: retq ; ; AVX2-LABEL: mul_v64i8c: ; AVX2: # BB#0: # %entry ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2 ; AVX2-NEXT: vpmovsxbw %xmm2, %ymm2 ; AVX2-NEXT: vpmovsxbw {{.*}}(%rip), %ymm3 ; AVX2-NEXT: vpmullw %ymm3, %ymm2, %ymm2 ; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm4 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm5 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u> ; AVX2-NEXT: vpshufb %xmm5, %xmm4, %xmm4 ; AVX2-NEXT: vpshufb %xmm5, %xmm2, %xmm2 ; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm4[0] ; AVX2-NEXT: vpmovsxbw %xmm0, %ymm0 ; AVX2-NEXT: vpmullw %ymm3, %ymm0, %ymm0 ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm4 ; AVX2-NEXT: vpshufb %xmm5, %xmm4, %xmm4 ; AVX2-NEXT: vpshufb %xmm5, %xmm0, %xmm0 ; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm4[0] ; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0 ; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2 ; AVX2-NEXT: vpmovsxbw %xmm2, %ymm2 ; AVX2-NEXT: vpmullw %ymm3, %ymm2, %ymm2 ; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm4 ; AVX2-NEXT: vpshufb %xmm5, %xmm4, %xmm4 ; AVX2-NEXT: vpshufb %xmm5, %xmm2, %xmm2 ; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm4[0] ; AVX2-NEXT: vpmovsxbw %xmm1, %ymm1 ; AVX2-NEXT: vpmullw %ymm3, %ymm1, %ymm1 ; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm3 ; AVX2-NEXT: vpshufb %xmm5, %xmm3, %xmm3 ; AVX2-NEXT: vpshufb %xmm5, %xmm1, %xmm1 ; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0] ; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1 ; AVX2-NEXT: retq ; ; AVX512F-LABEL: mul_v64i8c: ; AVX512F: # BB#0: # %entry ; AVX512F-NEXT: vpmovsxbw %xmm0, %ymm2 ; AVX512F-NEXT: vpmovsxbw {{.*}}(%rip), %ymm3 ; AVX512F-NEXT: vpmullw %ymm3, %ymm2, %ymm2 ; AVX512F-NEXT: vpmovsxwd %ymm2, %zmm2 ; AVX512F-NEXT: vpmovdb %zmm2, %xmm2 ; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm0 ; AVX512F-NEXT: vpmovsxbw %xmm0, %ymm0 ; AVX512F-NEXT: vpmullw %ymm3, %ymm0, %ymm0 ; AVX512F-NEXT: vpmovsxwd %ymm0, %zmm0 ; AVX512F-NEXT: vpmovdb %zmm0, %xmm0 ; AVX512F-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0 ; AVX512F-NEXT: vpmovsxbw %xmm1, %ymm2 ; AVX512F-NEXT: vpmullw %ymm3, %ymm2, %ymm2 ; AVX512F-NEXT: vpmovsxwd %ymm2, %zmm2 ; AVX512F-NEXT: vpmovdb %zmm2, %xmm2 ; AVX512F-NEXT: vextracti128 $1, %ymm1, %xmm1 ; AVX512F-NEXT: vpmovsxbw %xmm1, %ymm1 ; AVX512F-NEXT: vpmullw %ymm3, %ymm1, %ymm1 ; AVX512F-NEXT: vpmovsxwd %ymm1, %zmm1 ; AVX512F-NEXT: vpmovdb %zmm1, %xmm1 ; AVX512F-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1 ; AVX512F-NEXT: retq ; ; AVX512BW-LABEL: mul_v64i8c: ; AVX512BW: # BB#0: # %entry ; AVX512BW-NEXT: vmovaps {{.*#+}} ymm1 = [117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117] ; AVX512BW-NEXT: vpmovsxbw %ymm1, %zmm1 ; AVX512BW-NEXT: vpmovsxbw %ymm0, %zmm2 ; AVX512BW-NEXT: vpmullw %zmm1, %zmm2, %zmm2 ; AVX512BW-NEXT: vpmovwb %zmm2, %ymm2 ; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm0 ; AVX512BW-NEXT: vpmovsxbw %ymm0, %zmm0 ; AVX512BW-NEXT: vpmullw %zmm1, %zmm0, %zmm0 ; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0 ; AVX512BW-NEXT: vinserti64x4 $1, %ymm0, %zmm2, %zmm0 ; AVX512BW-NEXT: retq entry: %A = mul <64 x i8> %i, < i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117 > ret <64 x i8> %A } define <64 x i8> @mul_v64i8(<64 x i8> %i, <64 x i8> %j) nounwind { ; SSE2-LABEL: mul_v64i8: ; SSE2: # BB#0: # %entry ; SSE2-NEXT: movdqa %xmm4, %xmm8 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm8 = xmm8[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] ; SSE2-NEXT: psraw $8, %xmm8 ; SSE2-NEXT: movdqa %xmm0, %xmm9 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] ; SSE2-NEXT: psraw $8, %xmm9 ; SSE2-NEXT: pmullw %xmm8, %xmm9 ; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [255,255,255,255,255,255,255,255] ; SSE2-NEXT: pand %xmm8, %xmm9 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE2-NEXT: psraw $8, %xmm4 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE2-NEXT: psraw $8, %xmm0 ; SSE2-NEXT: pmullw %xmm4, %xmm0 ; SSE2-NEXT: pand %xmm8, %xmm0 ; SSE2-NEXT: packuswb %xmm9, %xmm0 ; SSE2-NEXT: movdqa %xmm5, %xmm9 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] ; SSE2-NEXT: psraw $8, %xmm9 ; SSE2-NEXT: movdqa %xmm1, %xmm4 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] ; SSE2-NEXT: psraw $8, %xmm4 ; SSE2-NEXT: pmullw %xmm9, %xmm4 ; SSE2-NEXT: pand %xmm8, %xmm4 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE2-NEXT: psraw $8, %xmm5 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE2-NEXT: psraw $8, %xmm1 ; SSE2-NEXT: pmullw %xmm5, %xmm1 ; SSE2-NEXT: pand %xmm8, %xmm1 ; SSE2-NEXT: packuswb %xmm4, %xmm1 ; SSE2-NEXT: movdqa %xmm6, %xmm4 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] ; SSE2-NEXT: psraw $8, %xmm4 ; SSE2-NEXT: movdqa %xmm2, %xmm5 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] ; SSE2-NEXT: psraw $8, %xmm5 ; SSE2-NEXT: pmullw %xmm4, %xmm5 ; SSE2-NEXT: pand %xmm8, %xmm5 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE2-NEXT: psraw $8, %xmm6 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE2-NEXT: psraw $8, %xmm2 ; SSE2-NEXT: pmullw %xmm6, %xmm2 ; SSE2-NEXT: pand %xmm8, %xmm2 ; SSE2-NEXT: packuswb %xmm5, %xmm2 ; SSE2-NEXT: movdqa %xmm7, %xmm4 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] ; SSE2-NEXT: psraw $8, %xmm4 ; SSE2-NEXT: movdqa %xmm3, %xmm5 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] ; SSE2-NEXT: psraw $8, %xmm5 ; SSE2-NEXT: pmullw %xmm4, %xmm5 ; SSE2-NEXT: pand %xmm8, %xmm5 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE2-NEXT: psraw $8, %xmm7 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE2-NEXT: psraw $8, %xmm3 ; SSE2-NEXT: pmullw %xmm7, %xmm3 ; SSE2-NEXT: pand %xmm8, %xmm3 ; SSE2-NEXT: packuswb %xmm5, %xmm3 ; SSE2-NEXT: retq ; ; SSE41-LABEL: mul_v64i8: ; SSE41: # BB#0: # %entry ; SSE41-NEXT: movdqa %xmm1, %xmm8 ; SSE41-NEXT: movdqa %xmm0, %xmm1 ; SSE41-NEXT: pmovsxbw %xmm4, %xmm9 ; SSE41-NEXT: pmovsxbw %xmm1, %xmm0 ; SSE41-NEXT: pmullw %xmm9, %xmm0 ; SSE41-NEXT: movdqa {{.*#+}} xmm9 = [255,255,255,255,255,255,255,255] ; SSE41-NEXT: pand %xmm9, %xmm0 ; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm4[2,3,0,1] ; SSE41-NEXT: pmovsxbw %xmm4, %xmm4 ; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] ; SSE41-NEXT: pmovsxbw %xmm1, %xmm1 ; SSE41-NEXT: pmullw %xmm4, %xmm1 ; SSE41-NEXT: pand %xmm9, %xmm1 ; SSE41-NEXT: packuswb %xmm1, %xmm0 ; SSE41-NEXT: pmovsxbw %xmm5, %xmm4 ; SSE41-NEXT: pmovsxbw %xmm8, %xmm1 ; SSE41-NEXT: pmullw %xmm4, %xmm1 ; SSE41-NEXT: pand %xmm9, %xmm1 ; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm5[2,3,0,1] ; SSE41-NEXT: pmovsxbw %xmm4, %xmm4 ; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm8[2,3,0,1] ; SSE41-NEXT: pmovsxbw %xmm5, %xmm5 ; SSE41-NEXT: pmullw %xmm4, %xmm5 ; SSE41-NEXT: pand %xmm9, %xmm5 ; SSE41-NEXT: packuswb %xmm5, %xmm1 ; SSE41-NEXT: pmovsxbw %xmm6, %xmm5 ; SSE41-NEXT: pmovsxbw %xmm2, %xmm4 ; SSE41-NEXT: pmullw %xmm5, %xmm4 ; SSE41-NEXT: pand %xmm9, %xmm4 ; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm6[2,3,0,1] ; SSE41-NEXT: pmovsxbw %xmm5, %xmm5 ; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1] ; SSE41-NEXT: pmovsxbw %xmm2, %xmm2 ; SSE41-NEXT: pmullw %xmm5, %xmm2 ; SSE41-NEXT: pand %xmm9, %xmm2 ; SSE41-NEXT: packuswb %xmm2, %xmm4 ; SSE41-NEXT: pmovsxbw %xmm7, %xmm2 ; SSE41-NEXT: pmovsxbw %xmm3, %xmm5 ; SSE41-NEXT: pmullw %xmm2, %xmm5 ; SSE41-NEXT: pand %xmm9, %xmm5 ; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm7[2,3,0,1] ; SSE41-NEXT: pmovsxbw %xmm2, %xmm2 ; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,3,0,1] ; SSE41-NEXT: pmovsxbw %xmm3, %xmm3 ; SSE41-NEXT: pmullw %xmm2, %xmm3 ; SSE41-NEXT: pand %xmm9, %xmm3 ; SSE41-NEXT: packuswb %xmm3, %xmm5 ; SSE41-NEXT: movdqa %xmm4, %xmm2 ; SSE41-NEXT: movdqa %xmm5, %xmm3 ; SSE41-NEXT: retq ; ; AVX2-LABEL: mul_v64i8: ; AVX2: # BB#0: # %entry ; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm4 ; AVX2-NEXT: vpmovsxbw %xmm4, %ymm4 ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm5 ; AVX2-NEXT: vpmovsxbw %xmm5, %ymm5 ; AVX2-NEXT: vpmullw %ymm4, %ymm5, %ymm5 ; AVX2-NEXT: vextracti128 $1, %ymm5, %xmm6 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm4 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u> ; AVX2-NEXT: vpshufb %xmm4, %xmm6, %xmm6 ; AVX2-NEXT: vpshufb %xmm4, %xmm5, %xmm5 ; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm5 = xmm5[0],xmm6[0] ; AVX2-NEXT: vpmovsxbw %xmm2, %ymm2 ; AVX2-NEXT: vpmovsxbw %xmm0, %ymm0 ; AVX2-NEXT: vpmullw %ymm2, %ymm0, %ymm0 ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2 ; AVX2-NEXT: vpshufb %xmm4, %xmm2, %xmm2 ; AVX2-NEXT: vpshufb %xmm4, %xmm0, %xmm0 ; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0] ; AVX2-NEXT: vinserti128 $1, %xmm5, %ymm0, %ymm0 ; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm2 ; AVX2-NEXT: vpmovsxbw %xmm2, %ymm2 ; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm5 ; AVX2-NEXT: vpmovsxbw %xmm5, %ymm5 ; AVX2-NEXT: vpmullw %ymm2, %ymm5, %ymm2 ; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm5 ; AVX2-NEXT: vpshufb %xmm4, %xmm5, %xmm5 ; AVX2-NEXT: vpshufb %xmm4, %xmm2, %xmm2 ; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm5[0] ; AVX2-NEXT: vpmovsxbw %xmm3, %ymm3 ; AVX2-NEXT: vpmovsxbw %xmm1, %ymm1 ; AVX2-NEXT: vpmullw %ymm3, %ymm1, %ymm1 ; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm3 ; AVX2-NEXT: vpshufb %xmm4, %xmm3, %xmm3 ; AVX2-NEXT: vpshufb %xmm4, %xmm1, %xmm1 ; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0] ; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1 ; AVX2-NEXT: retq ; ; AVX512F-LABEL: mul_v64i8: ; AVX512F: # BB#0: # %entry ; AVX512F-NEXT: vpmovsxbw %xmm2, %ymm4 ; AVX512F-NEXT: vpmovsxbw %xmm0, %ymm5 ; AVX512F-NEXT: vpmullw %ymm4, %ymm5, %ymm4 ; AVX512F-NEXT: vpmovsxwd %ymm4, %zmm4 ; AVX512F-NEXT: vpmovdb %zmm4, %xmm4 ; AVX512F-NEXT: vextracti128 $1, %ymm2, %xmm2 ; AVX512F-NEXT: vpmovsxbw %xmm2, %ymm2 ; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm0 ; AVX512F-NEXT: vpmovsxbw %xmm0, %ymm0 ; AVX512F-NEXT: vpmullw %ymm2, %ymm0, %ymm0 ; AVX512F-NEXT: vpmovsxwd %ymm0, %zmm0 ; AVX512F-NEXT: vpmovdb %zmm0, %xmm0 ; AVX512F-NEXT: vinsertf128 $1, %xmm0, %ymm4, %ymm0 ; AVX512F-NEXT: vpmovsxbw %xmm3, %ymm2 ; AVX512F-NEXT: vpmovsxbw %xmm1, %ymm4 ; AVX512F-NEXT: vpmullw %ymm2, %ymm4, %ymm2 ; AVX512F-NEXT: vpmovsxwd %ymm2, %zmm2 ; AVX512F-NEXT: vpmovdb %zmm2, %xmm2 ; AVX512F-NEXT: vextracti128 $1, %ymm3, %xmm3 ; AVX512F-NEXT: vpmovsxbw %xmm3, %ymm3 ; AVX512F-NEXT: vextracti128 $1, %ymm1, %xmm1 ; AVX512F-NEXT: vpmovsxbw %xmm1, %ymm1 ; AVX512F-NEXT: vpmullw %ymm3, %ymm1, %ymm1 ; AVX512F-NEXT: vpmovsxwd %ymm1, %zmm1 ; AVX512F-NEXT: vpmovdb %zmm1, %xmm1 ; AVX512F-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1 ; AVX512F-NEXT: retq ; ; AVX512BW-LABEL: mul_v64i8: ; AVX512BW: # BB#0: # %entry ; AVX512BW-NEXT: vpmovsxbw %ymm1, %zmm2 ; AVX512BW-NEXT: vpmovsxbw %ymm0, %zmm3 ; AVX512BW-NEXT: vpmullw %zmm2, %zmm3, %zmm2 ; AVX512BW-NEXT: vpmovwb %zmm2, %ymm2 ; AVX512BW-NEXT: vextracti64x4 $1, %zmm1, %ymm1 ; AVX512BW-NEXT: vpmovsxbw %ymm1, %zmm1 ; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm0 ; AVX512BW-NEXT: vpmovsxbw %ymm0, %zmm0 ; AVX512BW-NEXT: vpmullw %zmm1, %zmm0, %zmm0 ; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0 ; AVX512BW-NEXT: vinserti64x4 $1, %ymm0, %zmm2, %zmm0 ; AVX512BW-NEXT: retq entry: %A = mul <64 x i8> %i, %j ret <64 x i8> %A }