[llvm] 33b00b4 - [SLP][X86] Add basic funnel-shift / rotation test coverage

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Thu Jul 27 08:19:54 PDT 2023


Author: Simon Pilgrim
Date: 2023-07-27T15:52:12+01:00
New Revision: 33b00b4949660cebaa7c0a8836bac7a2ab45e134

URL: https://github.com/llvm/llvm-project/commit/33b00b4949660cebaa7c0a8836bac7a2ab45e134
DIFF: https://github.com/llvm/llvm-project/commit/33b00b4949660cebaa7c0a8836bac7a2ab45e134.diff

LOG: [SLP][X86] Add basic funnel-shift / rotation test coverage

Including test coverage for Issue #63980

Added: 
    llvm/test/Transforms/SLPVectorizer/X86/arith-fshl-rot.ll
    llvm/test/Transforms/SLPVectorizer/X86/arith-fshl.ll
    llvm/test/Transforms/SLPVectorizer/X86/arith-fshr-rot.ll
    llvm/test/Transforms/SLPVectorizer/X86/arith-fshr.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/Transforms/SLPVectorizer/X86/arith-fshl-rot.ll b/llvm/test/Transforms/SLPVectorizer/X86/arith-fshl-rot.ll
new file mode 100644
index 00000000000000..de74ea7f294834
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/X86/arith-fshl-rot.ll
@@ -0,0 +1,853 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -mtriple=x86_64-unknown -passes=slp-vectorizer -S | FileCheck %s --check-prefixes=SSE
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=slm -passes=slp-vectorizer -S | FileCheck %s --check-prefixes=SSE
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=corei7-avx -passes=slp-vectorizer -S | FileCheck %s --check-prefixes=AVX,AVX1
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=core-avx2 -passes=slp-vectorizer -S | FileCheck %s --check-prefixes=AVX,AVX2
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=skx -mattr=+prefer-256-bit -passes=slp-vectorizer -S | FileCheck %s --check-prefixes=AVX,AVX256
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=skx -mattr=-prefer-256-bit -passes=slp-vectorizer -S | FileCheck %s --check-prefixes=AVX512
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=knl -passes=slp-vectorizer -S | FileCheck %s --check-prefixes=AVX512
+
+ at a64 = common global [8 x i64] zeroinitializer, align 64
+ at b64 = common global [8 x i64] zeroinitializer, align 64
+ at c64 = common global [8 x i64] zeroinitializer, align 64
+ at d64 = common global [8 x i64] zeroinitializer, align 64
+ at a32 = common global [16 x i32] zeroinitializer, align 64
+ at b32 = common global [16 x i32] zeroinitializer, align 64
+ at c32 = common global [16 x i32] zeroinitializer, align 64
+ at d32 = common global [16 x i32] zeroinitializer, align 64
+ at a16 = common global [32 x i16] zeroinitializer, align 64
+ at b16 = common global [32 x i16] zeroinitializer, align 64
+ at c16 = common global [32 x i16] zeroinitializer, align 64
+ at d16 = common global [32 x i16] zeroinitializer, align 64
+ at a8  = common global [64 x i8] zeroinitializer, align 64
+ at b8  = common global [64 x i8] zeroinitializer, align 64
+ at c8  = common global [64 x i8] zeroinitializer, align 64
+ at d8  = common global [64 x i8] zeroinitializer, align 64
+
+declare i64 @llvm.fshl.i64(i64, i64, i64)
+declare i32 @llvm.fshl.i32(i32, i32, i32)
+declare i16 @llvm.fshl.i16(i16, i16, i16)
+declare i8  @llvm.fshl.i8 (i8 , i8 , i8 )
+
+define void @fshl_v8i64() {
+; SSE-LABEL: @fshl_v8i64(
+; SSE-NEXT:    [[A0:%.*]] = load i64, ptr @a64, align 8
+; SSE-NEXT:    [[A1:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @a64, i32 0, i64 1), align 8
+; SSE-NEXT:    [[A2:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @a64, i32 0, i64 2), align 8
+; SSE-NEXT:    [[A3:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @a64, i32 0, i64 3), align 8
+; SSE-NEXT:    [[A4:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @a64, i32 0, i64 4), align 8
+; SSE-NEXT:    [[A5:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @a64, i32 0, i64 5), align 8
+; SSE-NEXT:    [[A6:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @a64, i32 0, i64 6), align 8
+; SSE-NEXT:    [[A7:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @a64, i32 0, i64 7), align 8
+; SSE-NEXT:    [[B0:%.*]] = load i64, ptr @b64, align 8
+; SSE-NEXT:    [[B1:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @b64, i32 0, i64 1), align 8
+; SSE-NEXT:    [[B2:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @b64, i32 0, i64 2), align 8
+; SSE-NEXT:    [[B3:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @b64, i32 0, i64 3), align 8
+; SSE-NEXT:    [[B4:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @b64, i32 0, i64 4), align 8
+; SSE-NEXT:    [[B5:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @b64, i32 0, i64 5), align 8
+; SSE-NEXT:    [[B6:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @b64, i32 0, i64 6), align 8
+; SSE-NEXT:    [[B7:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @b64, i32 0, i64 7), align 8
+; SSE-NEXT:    [[R0:%.*]] = call i64 @llvm.fshl.i64(i64 [[A0]], i64 [[A0]], i64 [[B0]])
+; SSE-NEXT:    [[R1:%.*]] = call i64 @llvm.fshl.i64(i64 [[A1]], i64 [[A1]], i64 [[B1]])
+; SSE-NEXT:    [[R2:%.*]] = call i64 @llvm.fshl.i64(i64 [[A2]], i64 [[A2]], i64 [[B2]])
+; SSE-NEXT:    [[R3:%.*]] = call i64 @llvm.fshl.i64(i64 [[A3]], i64 [[A3]], i64 [[B3]])
+; SSE-NEXT:    [[R4:%.*]] = call i64 @llvm.fshl.i64(i64 [[A4]], i64 [[A4]], i64 [[B4]])
+; SSE-NEXT:    [[R5:%.*]] = call i64 @llvm.fshl.i64(i64 [[A5]], i64 [[A5]], i64 [[B5]])
+; SSE-NEXT:    [[R6:%.*]] = call i64 @llvm.fshl.i64(i64 [[A6]], i64 [[A6]], i64 [[B6]])
+; SSE-NEXT:    [[R7:%.*]] = call i64 @llvm.fshl.i64(i64 [[A7]], i64 [[A7]], i64 [[B7]])
+; SSE-NEXT:    store i64 [[R0]], ptr @d64, align 8
+; SSE-NEXT:    store i64 [[R1]], ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 1), align 8
+; SSE-NEXT:    store i64 [[R2]], ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 2), align 8
+; SSE-NEXT:    store i64 [[R3]], ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 3), align 8
+; SSE-NEXT:    store i64 [[R4]], ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 4), align 8
+; SSE-NEXT:    store i64 [[R5]], ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 5), align 8
+; SSE-NEXT:    store i64 [[R6]], ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 6), align 8
+; SSE-NEXT:    store i64 [[R7]], ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 7), align 8
+; SSE-NEXT:    ret void
+;
+; AVX1-LABEL: @fshl_v8i64(
+; AVX1-NEXT:    [[A0:%.*]] = load i64, ptr @a64, align 8
+; AVX1-NEXT:    [[A1:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @a64, i32 0, i64 1), align 8
+; AVX1-NEXT:    [[A2:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @a64, i32 0, i64 2), align 8
+; AVX1-NEXT:    [[A3:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @a64, i32 0, i64 3), align 8
+; AVX1-NEXT:    [[A4:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @a64, i32 0, i64 4), align 8
+; AVX1-NEXT:    [[A5:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @a64, i32 0, i64 5), align 8
+; AVX1-NEXT:    [[A6:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @a64, i32 0, i64 6), align 8
+; AVX1-NEXT:    [[A7:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @a64, i32 0, i64 7), align 8
+; AVX1-NEXT:    [[B0:%.*]] = load i64, ptr @b64, align 8
+; AVX1-NEXT:    [[B1:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @b64, i32 0, i64 1), align 8
+; AVX1-NEXT:    [[B2:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @b64, i32 0, i64 2), align 8
+; AVX1-NEXT:    [[B3:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @b64, i32 0, i64 3), align 8
+; AVX1-NEXT:    [[B4:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @b64, i32 0, i64 4), align 8
+; AVX1-NEXT:    [[B5:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @b64, i32 0, i64 5), align 8
+; AVX1-NEXT:    [[B6:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @b64, i32 0, i64 6), align 8
+; AVX1-NEXT:    [[B7:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @b64, i32 0, i64 7), align 8
+; AVX1-NEXT:    [[R0:%.*]] = call i64 @llvm.fshl.i64(i64 [[A0]], i64 [[A0]], i64 [[B0]])
+; AVX1-NEXT:    [[R1:%.*]] = call i64 @llvm.fshl.i64(i64 [[A1]], i64 [[A1]], i64 [[B1]])
+; AVX1-NEXT:    [[R2:%.*]] = call i64 @llvm.fshl.i64(i64 [[A2]], i64 [[A2]], i64 [[B2]])
+; AVX1-NEXT:    [[R3:%.*]] = call i64 @llvm.fshl.i64(i64 [[A3]], i64 [[A3]], i64 [[B3]])
+; AVX1-NEXT:    [[R4:%.*]] = call i64 @llvm.fshl.i64(i64 [[A4]], i64 [[A4]], i64 [[B4]])
+; AVX1-NEXT:    [[R5:%.*]] = call i64 @llvm.fshl.i64(i64 [[A5]], i64 [[A5]], i64 [[B5]])
+; AVX1-NEXT:    [[R6:%.*]] = call i64 @llvm.fshl.i64(i64 [[A6]], i64 [[A6]], i64 [[B6]])
+; AVX1-NEXT:    [[R7:%.*]] = call i64 @llvm.fshl.i64(i64 [[A7]], i64 [[A7]], i64 [[B7]])
+; AVX1-NEXT:    store i64 [[R0]], ptr @d64, align 8
+; AVX1-NEXT:    store i64 [[R1]], ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 1), align 8
+; AVX1-NEXT:    store i64 [[R2]], ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 2), align 8
+; AVX1-NEXT:    store i64 [[R3]], ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 3), align 8
+; AVX1-NEXT:    store i64 [[R4]], ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 4), align 8
+; AVX1-NEXT:    store i64 [[R5]], ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 5), align 8
+; AVX1-NEXT:    store i64 [[R6]], ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 6), align 8
+; AVX1-NEXT:    store i64 [[R7]], ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 7), align 8
+; AVX1-NEXT:    ret void
+;
+; AVX2-LABEL: @fshl_v8i64(
+; AVX2-NEXT:    [[TMP1:%.*]] = load <4 x i64>, ptr @a64, align 8
+; AVX2-NEXT:    [[TMP2:%.*]] = load <4 x i64>, ptr @b64, align 8
+; AVX2-NEXT:    [[TMP3:%.*]] = call <4 x i64> @llvm.fshl.v4i64(<4 x i64> [[TMP1]], <4 x i64> [[TMP1]], <4 x i64> [[TMP2]])
+; AVX2-NEXT:    store <4 x i64> [[TMP3]], ptr @d64, align 8
+; AVX2-NEXT:    [[TMP4:%.*]] = load <4 x i64>, ptr getelementptr inbounds ([8 x i64], ptr @a64, i32 0, i64 4), align 8
+; AVX2-NEXT:    [[TMP5:%.*]] = load <4 x i64>, ptr getelementptr inbounds ([8 x i64], ptr @b64, i32 0, i64 4), align 8
+; AVX2-NEXT:    [[TMP6:%.*]] = call <4 x i64> @llvm.fshl.v4i64(<4 x i64> [[TMP4]], <4 x i64> [[TMP4]], <4 x i64> [[TMP5]])
+; AVX2-NEXT:    store <4 x i64> [[TMP6]], ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 4), align 8
+; AVX2-NEXT:    ret void
+;
+; AVX256-LABEL: @fshl_v8i64(
+; AVX256-NEXT:    [[TMP1:%.*]] = load <4 x i64>, ptr @a64, align 8
+; AVX256-NEXT:    [[TMP2:%.*]] = load <4 x i64>, ptr @b64, align 8
+; AVX256-NEXT:    [[TMP3:%.*]] = call <4 x i64> @llvm.fshl.v4i64(<4 x i64> [[TMP1]], <4 x i64> [[TMP1]], <4 x i64> [[TMP2]])
+; AVX256-NEXT:    store <4 x i64> [[TMP3]], ptr @d64, align 8
+; AVX256-NEXT:    [[TMP4:%.*]] = load <4 x i64>, ptr getelementptr inbounds ([8 x i64], ptr @a64, i32 0, i64 4), align 8
+; AVX256-NEXT:    [[TMP5:%.*]] = load <4 x i64>, ptr getelementptr inbounds ([8 x i64], ptr @b64, i32 0, i64 4), align 8
+; AVX256-NEXT:    [[TMP6:%.*]] = call <4 x i64> @llvm.fshl.v4i64(<4 x i64> [[TMP4]], <4 x i64> [[TMP4]], <4 x i64> [[TMP5]])
+; AVX256-NEXT:    store <4 x i64> [[TMP6]], ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 4), align 8
+; AVX256-NEXT:    ret void
+;
+; AVX512-LABEL: @fshl_v8i64(
+; AVX512-NEXT:    [[TMP1:%.*]] = load <8 x i64>, ptr @a64, align 8
+; AVX512-NEXT:    [[TMP2:%.*]] = load <8 x i64>, ptr @b64, align 8
+; AVX512-NEXT:    [[TMP3:%.*]] = call <8 x i64> @llvm.fshl.v8i64(<8 x i64> [[TMP1]], <8 x i64> [[TMP1]], <8 x i64> [[TMP2]])
+; AVX512-NEXT:    store <8 x i64> [[TMP3]], ptr @d64, align 8
+; AVX512-NEXT:    ret void
+;
+  %a0 = load i64, ptr @a64, align 8
+  %a1 = load i64, ptr getelementptr inbounds ([8 x i64], ptr @a64, i32 0, i64 1), align 8
+  %a2 = load i64, ptr getelementptr inbounds ([8 x i64], ptr @a64, i32 0, i64 2), align 8
+  %a3 = load i64, ptr getelementptr inbounds ([8 x i64], ptr @a64, i32 0, i64 3), align 8
+  %a4 = load i64, ptr getelementptr inbounds ([8 x i64], ptr @a64, i32 0, i64 4), align 8
+  %a5 = load i64, ptr getelementptr inbounds ([8 x i64], ptr @a64, i32 0, i64 5), align 8
+  %a6 = load i64, ptr getelementptr inbounds ([8 x i64], ptr @a64, i32 0, i64 6), align 8
+  %a7 = load i64, ptr getelementptr inbounds ([8 x i64], ptr @a64, i32 0, i64 7), align 8
+  %b0 = load i64, ptr @b64, align 8
+  %b1 = load i64, ptr getelementptr inbounds ([8 x i64], ptr @b64, i32 0, i64 1), align 8
+  %b2 = load i64, ptr getelementptr inbounds ([8 x i64], ptr @b64, i32 0, i64 2), align 8
+  %b3 = load i64, ptr getelementptr inbounds ([8 x i64], ptr @b64, i32 0, i64 3), align 8
+  %b4 = load i64, ptr getelementptr inbounds ([8 x i64], ptr @b64, i32 0, i64 4), align 8
+  %b5 = load i64, ptr getelementptr inbounds ([8 x i64], ptr @b64, i32 0, i64 5), align 8
+  %b6 = load i64, ptr getelementptr inbounds ([8 x i64], ptr @b64, i32 0, i64 6), align 8
+  %b7 = load i64, ptr getelementptr inbounds ([8 x i64], ptr @b64, i32 0, i64 7), align 8
+  %r0 = call i64 @llvm.fshl.i64(i64 %a0, i64 %a0, i64 %b0)
+  %r1 = call i64 @llvm.fshl.i64(i64 %a1, i64 %a1, i64 %b1)
+  %r2 = call i64 @llvm.fshl.i64(i64 %a2, i64 %a2, i64 %b2)
+  %r3 = call i64 @llvm.fshl.i64(i64 %a3, i64 %a3, i64 %b3)
+  %r4 = call i64 @llvm.fshl.i64(i64 %a4, i64 %a4, i64 %b4)
+  %r5 = call i64 @llvm.fshl.i64(i64 %a5, i64 %a5, i64 %b5)
+  %r6 = call i64 @llvm.fshl.i64(i64 %a6, i64 %a6, i64 %b6)
+  %r7 = call i64 @llvm.fshl.i64(i64 %a7, i64 %a7, i64 %b7)
+  store i64 %r0, ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 0), align 8
+  store i64 %r1, ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 1), align 8
+  store i64 %r2, ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 2), align 8
+  store i64 %r3, ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 3), align 8
+  store i64 %r4, ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 4), align 8
+  store i64 %r5, ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 5), align 8
+  store i64 %r6, ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 6), align 8
+  store i64 %r7, ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 7), align 8
+  ret void
+}
+
+define void @fshl_v16i32() {
+; SSE-LABEL: @fshl_v16i32(
+; SSE-NEXT:    [[A0:%.*]] = load i32, ptr @a32, align 4
+; SSE-NEXT:    [[A1:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 1), align 4
+; SSE-NEXT:    [[A2:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 2), align 4
+; SSE-NEXT:    [[A3:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 3), align 4
+; SSE-NEXT:    [[A4:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 4), align 4
+; SSE-NEXT:    [[A5:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 5), align 4
+; SSE-NEXT:    [[A6:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 6), align 4
+; SSE-NEXT:    [[A7:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 7), align 4
+; SSE-NEXT:    [[A8:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 8), align 4
+; SSE-NEXT:    [[A9:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 9), align 4
+; SSE-NEXT:    [[A10:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 10), align 4
+; SSE-NEXT:    [[A11:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 11), align 4
+; SSE-NEXT:    [[A12:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 12), align 4
+; SSE-NEXT:    [[A13:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 13), align 4
+; SSE-NEXT:    [[A14:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 14), align 4
+; SSE-NEXT:    [[A15:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 15), align 4
+; SSE-NEXT:    [[B0:%.*]] = load i32, ptr @b32, align 4
+; SSE-NEXT:    [[B1:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 1), align 4
+; SSE-NEXT:    [[B2:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 2), align 4
+; SSE-NEXT:    [[B3:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 3), align 4
+; SSE-NEXT:    [[B4:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 4), align 4
+; SSE-NEXT:    [[B5:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 5), align 4
+; SSE-NEXT:    [[B6:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 6), align 4
+; SSE-NEXT:    [[B7:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 7), align 4
+; SSE-NEXT:    [[B8:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 8), align 4
+; SSE-NEXT:    [[B9:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 9), align 4
+; SSE-NEXT:    [[B10:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 10), align 4
+; SSE-NEXT:    [[B11:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 11), align 4
+; SSE-NEXT:    [[B12:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 12), align 4
+; SSE-NEXT:    [[B13:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 13), align 4
+; SSE-NEXT:    [[B14:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 14), align 4
+; SSE-NEXT:    [[B15:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 15), align 4
+; SSE-NEXT:    [[R0:%.*]] = call i32 @llvm.fshl.i32(i32 [[A0]], i32 [[A0]], i32 [[B0]])
+; SSE-NEXT:    [[R1:%.*]] = call i32 @llvm.fshl.i32(i32 [[A1]], i32 [[A1]], i32 [[B1]])
+; SSE-NEXT:    [[R2:%.*]] = call i32 @llvm.fshl.i32(i32 [[A2]], i32 [[A2]], i32 [[B2]])
+; SSE-NEXT:    [[R3:%.*]] = call i32 @llvm.fshl.i32(i32 [[A3]], i32 [[A3]], i32 [[B3]])
+; SSE-NEXT:    [[R4:%.*]] = call i32 @llvm.fshl.i32(i32 [[A4]], i32 [[A4]], i32 [[B4]])
+; SSE-NEXT:    [[R5:%.*]] = call i32 @llvm.fshl.i32(i32 [[A5]], i32 [[A5]], i32 [[B5]])
+; SSE-NEXT:    [[R6:%.*]] = call i32 @llvm.fshl.i32(i32 [[A6]], i32 [[A6]], i32 [[B6]])
+; SSE-NEXT:    [[R7:%.*]] = call i32 @llvm.fshl.i32(i32 [[A7]], i32 [[A7]], i32 [[B7]])
+; SSE-NEXT:    [[R8:%.*]] = call i32 @llvm.fshl.i32(i32 [[A8]], i32 [[A8]], i32 [[B8]])
+; SSE-NEXT:    [[R9:%.*]] = call i32 @llvm.fshl.i32(i32 [[A9]], i32 [[A9]], i32 [[B9]])
+; SSE-NEXT:    [[R10:%.*]] = call i32 @llvm.fshl.i32(i32 [[A10]], i32 [[A10]], i32 [[B10]])
+; SSE-NEXT:    [[R11:%.*]] = call i32 @llvm.fshl.i32(i32 [[A11]], i32 [[A11]], i32 [[B11]])
+; SSE-NEXT:    [[R12:%.*]] = call i32 @llvm.fshl.i32(i32 [[A12]], i32 [[A12]], i32 [[B12]])
+; SSE-NEXT:    [[R13:%.*]] = call i32 @llvm.fshl.i32(i32 [[A13]], i32 [[A13]], i32 [[B13]])
+; SSE-NEXT:    [[R14:%.*]] = call i32 @llvm.fshl.i32(i32 [[A14]], i32 [[A14]], i32 [[B14]])
+; SSE-NEXT:    [[R15:%.*]] = call i32 @llvm.fshl.i32(i32 [[A15]], i32 [[A15]], i32 [[B15]])
+; SSE-NEXT:    store i32 [[R0]], ptr @d32, align 4
+; SSE-NEXT:    store i32 [[R1]], ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 1), align 4
+; SSE-NEXT:    store i32 [[R2]], ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 2), align 4
+; SSE-NEXT:    store i32 [[R3]], ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 3), align 4
+; SSE-NEXT:    store i32 [[R4]], ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 4), align 4
+; SSE-NEXT:    store i32 [[R5]], ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 5), align 4
+; SSE-NEXT:    store i32 [[R6]], ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 6), align 4
+; SSE-NEXT:    store i32 [[R7]], ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 7), align 4
+; SSE-NEXT:    store i32 [[R8]], ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 8), align 4
+; SSE-NEXT:    store i32 [[R9]], ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 9), align 4
+; SSE-NEXT:    store i32 [[R10]], ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 10), align 4
+; SSE-NEXT:    store i32 [[R11]], ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 11), align 4
+; SSE-NEXT:    store i32 [[R12]], ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 12), align 4
+; SSE-NEXT:    store i32 [[R13]], ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 13), align 4
+; SSE-NEXT:    store i32 [[R14]], ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 14), align 4
+; SSE-NEXT:    store i32 [[R15]], ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 15), align 4
+; SSE-NEXT:    ret void
+;
+; AVX-LABEL: @fshl_v16i32(
+; AVX-NEXT:    [[TMP1:%.*]] = load <8 x i32>, ptr @a32, align 4
+; AVX-NEXT:    [[TMP2:%.*]] = load <8 x i32>, ptr @b32, align 4
+; AVX-NEXT:    [[TMP3:%.*]] = call <8 x i32> @llvm.fshl.v8i32(<8 x i32> [[TMP1]], <8 x i32> [[TMP1]], <8 x i32> [[TMP2]])
+; AVX-NEXT:    store <8 x i32> [[TMP3]], ptr @d32, align 4
+; AVX-NEXT:    [[TMP4:%.*]] = load <8 x i32>, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 8), align 4
+; AVX-NEXT:    [[TMP5:%.*]] = load <8 x i32>, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 8), align 4
+; AVX-NEXT:    [[TMP6:%.*]] = call <8 x i32> @llvm.fshl.v8i32(<8 x i32> [[TMP4]], <8 x i32> [[TMP4]], <8 x i32> [[TMP5]])
+; AVX-NEXT:    store <8 x i32> [[TMP6]], ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 8), align 4
+; AVX-NEXT:    ret void
+;
+; AVX512-LABEL: @fshl_v16i32(
+; AVX512-NEXT:    [[TMP1:%.*]] = load <16 x i32>, ptr @a32, align 4
+; AVX512-NEXT:    [[TMP2:%.*]] = load <16 x i32>, ptr @b32, align 4
+; AVX512-NEXT:    [[TMP3:%.*]] = call <16 x i32> @llvm.fshl.v16i32(<16 x i32> [[TMP1]], <16 x i32> [[TMP1]], <16 x i32> [[TMP2]])
+; AVX512-NEXT:    store <16 x i32> [[TMP3]], ptr @d32, align 4
+; AVX512-NEXT:    ret void
+;
+  %a0  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 0 ), align 4
+  %a1  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 1 ), align 4
+  %a2  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 2 ), align 4
+  %a3  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 3 ), align 4
+  %a4  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 4 ), align 4
+  %a5  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 5 ), align 4
+  %a6  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 6 ), align 4
+  %a7  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 7 ), align 4
+  %a8  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 8 ), align 4
+  %a9  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 9 ), align 4
+  %a10 = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 10), align 4
+  %a11 = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 11), align 4
+  %a12 = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 12), align 4
+  %a13 = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 13), align 4
+  %a14 = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 14), align 4
+  %a15 = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 15), align 4
+  %b0  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 0 ), align 4
+  %b1  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 1 ), align 4
+  %b2  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 2 ), align 4
+  %b3  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 3 ), align 4
+  %b4  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 4 ), align 4
+  %b5  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 5 ), align 4
+  %b6  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 6 ), align 4
+  %b7  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 7 ), align 4
+  %b8  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 8 ), align 4
+  %b9  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 9 ), align 4
+  %b10 = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 10), align 4
+  %b11 = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 11), align 4
+  %b12 = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 12), align 4
+  %b13 = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 13), align 4
+  %b14 = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 14), align 4
+  %b15 = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 15), align 4
+  %r0  = call i32 @llvm.fshl.i32(i32 %a0 , i32 %a0 , i32 %b0 )
+  %r1  = call i32 @llvm.fshl.i32(i32 %a1 , i32 %a1 , i32 %b1 )
+  %r2  = call i32 @llvm.fshl.i32(i32 %a2 , i32 %a2 , i32 %b2 )
+  %r3  = call i32 @llvm.fshl.i32(i32 %a3 , i32 %a3 , i32 %b3 )
+  %r4  = call i32 @llvm.fshl.i32(i32 %a4 , i32 %a4 , i32 %b4 )
+  %r5  = call i32 @llvm.fshl.i32(i32 %a5 , i32 %a5 , i32 %b5 )
+  %r6  = call i32 @llvm.fshl.i32(i32 %a6 , i32 %a6 , i32 %b6 )
+  %r7  = call i32 @llvm.fshl.i32(i32 %a7 , i32 %a7 , i32 %b7 )
+  %r8  = call i32 @llvm.fshl.i32(i32 %a8 , i32 %a8 , i32 %b8 )
+  %r9  = call i32 @llvm.fshl.i32(i32 %a9 , i32 %a9 , i32 %b9 )
+  %r10 = call i32 @llvm.fshl.i32(i32 %a10, i32 %a10, i32 %b10)
+  %r11 = call i32 @llvm.fshl.i32(i32 %a11, i32 %a11, i32 %b11)
+  %r12 = call i32 @llvm.fshl.i32(i32 %a12, i32 %a12, i32 %b12)
+  %r13 = call i32 @llvm.fshl.i32(i32 %a13, i32 %a13, i32 %b13)
+  %r14 = call i32 @llvm.fshl.i32(i32 %a14, i32 %a14, i32 %b14)
+  %r15 = call i32 @llvm.fshl.i32(i32 %a15, i32 %a15, i32 %b15)
+  store i32 %r0 , ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 0 ), align 4
+  store i32 %r1 , ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 1 ), align 4
+  store i32 %r2 , ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 2 ), align 4
+  store i32 %r3 , ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 3 ), align 4
+  store i32 %r4 , ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 4 ), align 4
+  store i32 %r5 , ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 5 ), align 4
+  store i32 %r6 , ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 6 ), align 4
+  store i32 %r7 , ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 7 ), align 4
+  store i32 %r8 , ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 8 ), align 4
+  store i32 %r9 , ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 9 ), align 4
+  store i32 %r10, ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 10), align 4
+  store i32 %r11, ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 11), align 4
+  store i32 %r12, ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 12), align 4
+  store i32 %r13, ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 13), align 4
+  store i32 %r14, ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 14), align 4
+  store i32 %r15, ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 15), align 4
+  ret void
+}
+
+define void @fshl_v32i16() {
+; AVX-LABEL: @fshl_v32i16(
+; AVX-NEXT:    [[TMP1:%.*]] = load <16 x i16>, ptr @a16, align 2
+; AVX-NEXT:    [[TMP2:%.*]] = load <16 x i16>, ptr @b16, align 2
+; AVX-NEXT:    [[TMP3:%.*]] = call <16 x i16> @llvm.fshl.v16i16(<16 x i16> [[TMP1]], <16 x i16> [[TMP1]], <16 x i16> [[TMP2]])
+; AVX-NEXT:    store <16 x i16> [[TMP3]], ptr @d16, align 2
+; AVX-NEXT:    [[TMP4:%.*]] = load <16 x i16>, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 16), align 2
+; AVX-NEXT:    [[TMP5:%.*]] = load <16 x i16>, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 16), align 2
+; AVX-NEXT:    [[TMP6:%.*]] = call <16 x i16> @llvm.fshl.v16i16(<16 x i16> [[TMP4]], <16 x i16> [[TMP4]], <16 x i16> [[TMP5]])
+; AVX-NEXT:    store <16 x i16> [[TMP6]], ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 16), align 2
+; AVX-NEXT:    ret void
+;
+; AVX512-LABEL: @fshl_v32i16(
+; AVX512-NEXT:    [[TMP1:%.*]] = load <32 x i16>, ptr @a16, align 2
+; AVX512-NEXT:    [[TMP2:%.*]] = load <32 x i16>, ptr @b16, align 2
+; AVX512-NEXT:    [[TMP3:%.*]] = call <32 x i16> @llvm.fshl.v32i16(<32 x i16> [[TMP1]], <32 x i16> [[TMP1]], <32 x i16> [[TMP2]])
+; AVX512-NEXT:    store <32 x i16> [[TMP3]], ptr @d16, align 2
+; AVX512-NEXT:    ret void
+;
+  %a0  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 0 ), align 2
+  %a1  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 1 ), align 2
+  %a2  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 2 ), align 2
+  %a3  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 3 ), align 2
+  %a4  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 4 ), align 2
+  %a5  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 5 ), align 2
+  %a6  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 6 ), align 2
+  %a7  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 7 ), align 2
+  %a8  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 8 ), align 2
+  %a9  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 9 ), align 2
+  %a10 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 10), align 2
+  %a11 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 11), align 2
+  %a12 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 12), align 2
+  %a13 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 13), align 2
+  %a14 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 14), align 2
+  %a15 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 15), align 2
+  %a16 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 16), align 2
+  %a17 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 17), align 2
+  %a18 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 18), align 2
+  %a19 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 19), align 2
+  %a20 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 20), align 2
+  %a21 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 21), align 2
+  %a22 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 22), align 2
+  %a23 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 23), align 2
+  %a24 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 24), align 2
+  %a25 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 25), align 2
+  %a26 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 26), align 2
+  %a27 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 27), align 2
+  %a28 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 28), align 2
+  %a29 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 29), align 2
+  %a30 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 30), align 2
+  %a31 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 31), align 2
+  %b0  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 0 ), align 2
+  %b1  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 1 ), align 2
+  %b2  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 2 ), align 2
+  %b3  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 3 ), align 2
+  %b4  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 4 ), align 2
+  %b5  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 5 ), align 2
+  %b6  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 6 ), align 2
+  %b7  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 7 ), align 2
+  %b8  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 8 ), align 2
+  %b9  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 9 ), align 2
+  %b10 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 10), align 2
+  %b11 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 11), align 2
+  %b12 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 12), align 2
+  %b13 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 13), align 2
+  %b14 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 14), align 2
+  %b15 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 15), align 2
+  %b16 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 16), align 2
+  %b17 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 17), align 2
+  %b18 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 18), align 2
+  %b19 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 19), align 2
+  %b20 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 20), align 2
+  %b21 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 21), align 2
+  %b22 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 22), align 2
+  %b23 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 23), align 2
+  %b24 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 24), align 2
+  %b25 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 25), align 2
+  %b26 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 26), align 2
+  %b27 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 27), align 2
+  %b28 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 28), align 2
+  %b29 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 29), align 2
+  %b30 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 30), align 2
+  %b31 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 31), align 2
+  %r0  = call i16 @llvm.fshl.i16(i16 %a0 , i16 %a0 , i16 %b0 )
+  %r1  = call i16 @llvm.fshl.i16(i16 %a1 , i16 %a1 , i16 %b1 )
+  %r2  = call i16 @llvm.fshl.i16(i16 %a2 , i16 %a2 , i16 %b2 )
+  %r3  = call i16 @llvm.fshl.i16(i16 %a3 , i16 %a3 , i16 %b3 )
+  %r4  = call i16 @llvm.fshl.i16(i16 %a4 , i16 %a4 , i16 %b4 )
+  %r5  = call i16 @llvm.fshl.i16(i16 %a5 , i16 %a5 , i16 %b5 )
+  %r6  = call i16 @llvm.fshl.i16(i16 %a6 , i16 %a6 , i16 %b6 )
+  %r7  = call i16 @llvm.fshl.i16(i16 %a7 , i16 %a7 , i16 %b7 )
+  %r8  = call i16 @llvm.fshl.i16(i16 %a8 , i16 %a8 , i16 %b8 )
+  %r9  = call i16 @llvm.fshl.i16(i16 %a9 , i16 %a9 , i16 %b9 )
+  %r10 = call i16 @llvm.fshl.i16(i16 %a10, i16 %a10, i16 %b10)
+  %r11 = call i16 @llvm.fshl.i16(i16 %a11, i16 %a11, i16 %b11)
+  %r12 = call i16 @llvm.fshl.i16(i16 %a12, i16 %a12, i16 %b12)
+  %r13 = call i16 @llvm.fshl.i16(i16 %a13, i16 %a13, i16 %b13)
+  %r14 = call i16 @llvm.fshl.i16(i16 %a14, i16 %a14, i16 %b14)
+  %r15 = call i16 @llvm.fshl.i16(i16 %a15, i16 %a15, i16 %b15)
+  %r16 = call i16 @llvm.fshl.i16(i16 %a16, i16 %a16, i16 %b16)
+  %r17 = call i16 @llvm.fshl.i16(i16 %a17, i16 %a17, i16 %b17)
+  %r18 = call i16 @llvm.fshl.i16(i16 %a18, i16 %a18, i16 %b18)
+  %r19 = call i16 @llvm.fshl.i16(i16 %a19, i16 %a19, i16 %b19)
+  %r20 = call i16 @llvm.fshl.i16(i16 %a20, i16 %a20, i16 %b20)
+  %r21 = call i16 @llvm.fshl.i16(i16 %a21, i16 %a21, i16 %b21)
+  %r22 = call i16 @llvm.fshl.i16(i16 %a22, i16 %a22, i16 %b22)
+  %r23 = call i16 @llvm.fshl.i16(i16 %a23, i16 %a23, i16 %b23)
+  %r24 = call i16 @llvm.fshl.i16(i16 %a24, i16 %a24, i16 %b24)
+  %r25 = call i16 @llvm.fshl.i16(i16 %a25, i16 %a25, i16 %b25)
+  %r26 = call i16 @llvm.fshl.i16(i16 %a26, i16 %a26, i16 %b26)
+  %r27 = call i16 @llvm.fshl.i16(i16 %a27, i16 %a27, i16 %b27)
+  %r28 = call i16 @llvm.fshl.i16(i16 %a28, i16 %a28, i16 %b28)
+  %r29 = call i16 @llvm.fshl.i16(i16 %a29, i16 %a29, i16 %b29)
+  %r30 = call i16 @llvm.fshl.i16(i16 %a30, i16 %a30, i16 %b30)
+  %r31 = call i16 @llvm.fshl.i16(i16 %a31, i16 %a31, i16 %b31)
+  store i16 %r0 , ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 0 ), align 2
+  store i16 %r1 , ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 1 ), align 2
+  store i16 %r2 , ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 2 ), align 2
+  store i16 %r3 , ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 3 ), align 2
+  store i16 %r4 , ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 4 ), align 2
+  store i16 %r5 , ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 5 ), align 2
+  store i16 %r6 , ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 6 ), align 2
+  store i16 %r7 , ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 7 ), align 2
+  store i16 %r8 , ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 8 ), align 2
+  store i16 %r9 , ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 9 ), align 2
+  store i16 %r10, ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 10), align 2
+  store i16 %r11, ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 11), align 2
+  store i16 %r12, ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 12), align 2
+  store i16 %r13, ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 13), align 2
+  store i16 %r14, ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 14), align 2
+  store i16 %r15, ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 15), align 2
+  store i16 %r16, ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 16), align 2
+  store i16 %r17, ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 17), align 2
+  store i16 %r18, ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 18), align 2
+  store i16 %r19, ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 19), align 2
+  store i16 %r20, ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 20), align 2
+  store i16 %r21, ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 21), align 2
+  store i16 %r22, ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 22), align 2
+  store i16 %r23, ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 23), align 2
+  store i16 %r24, ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 24), align 2
+  store i16 %r25, ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 25), align 2
+  store i16 %r26, ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 26), align 2
+  store i16 %r27, ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 27), align 2
+  store i16 %r28, ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 28), align 2
+  store i16 %r29, ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 29), align 2
+  store i16 %r30, ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 30), align 2
+  store i16 %r31, ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 31), align 2
+  ret void
+}
+
+define void @fshl_v64i8() {
+; SSE-LABEL: @fshl_v64i8(
+; SSE-NEXT:    [[TMP1:%.*]] = load <16 x i8>, ptr @a8, align 1
+; SSE-NEXT:    [[TMP2:%.*]] = load <16 x i8>, ptr @b8, align 1
+; SSE-NEXT:    [[TMP3:%.*]] = call <16 x i8> @llvm.fshl.v16i8(<16 x i8> [[TMP1]], <16 x i8> [[TMP1]], <16 x i8> [[TMP2]])
+; SSE-NEXT:    store <16 x i8> [[TMP3]], ptr @d8, align 1
+; SSE-NEXT:    [[TMP4:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 16), align 1
+; SSE-NEXT:    [[TMP5:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 16), align 1
+; SSE-NEXT:    [[TMP6:%.*]] = call <16 x i8> @llvm.fshl.v16i8(<16 x i8> [[TMP4]], <16 x i8> [[TMP4]], <16 x i8> [[TMP5]])
+; SSE-NEXT:    store <16 x i8> [[TMP6]], ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 16), align 1
+; SSE-NEXT:    [[TMP7:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 32), align 1
+; SSE-NEXT:    [[TMP8:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 32), align 1
+; SSE-NEXT:    [[TMP9:%.*]] = call <16 x i8> @llvm.fshl.v16i8(<16 x i8> [[TMP7]], <16 x i8> [[TMP7]], <16 x i8> [[TMP8]])
+; SSE-NEXT:    [[TMP10:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 48), align 1
+; SSE-NEXT:    [[TMP11:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 48), align 1
+; SSE-NEXT:    [[TMP12:%.*]] = call <16 x i8> @llvm.fshl.v16i8(<16 x i8> [[TMP10]], <16 x i8> [[TMP10]], <16 x i8> [[TMP11]])
+; SSE-NEXT:    store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 32), align 1
+; SSE-NEXT:    store <16 x i8> [[TMP12]], ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 48), align 1
+; SSE-NEXT:    ret void
+;
+; AVX-LABEL: @fshl_v64i8(
+; AVX-NEXT:    [[TMP1:%.*]] = load <32 x i8>, ptr @a8, align 1
+; AVX-NEXT:    [[TMP2:%.*]] = load <32 x i8>, ptr @b8, align 1
+; AVX-NEXT:    [[TMP3:%.*]] = call <32 x i8> @llvm.fshl.v32i8(<32 x i8> [[TMP1]], <32 x i8> [[TMP1]], <32 x i8> [[TMP2]])
+; AVX-NEXT:    store <32 x i8> [[TMP3]], ptr @d8, align 1
+; AVX-NEXT:    [[TMP4:%.*]] = load <32 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 32), align 1
+; AVX-NEXT:    [[TMP5:%.*]] = load <32 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 32), align 1
+; AVX-NEXT:    [[TMP6:%.*]] = call <32 x i8> @llvm.fshl.v32i8(<32 x i8> [[TMP4]], <32 x i8> [[TMP4]], <32 x i8> [[TMP5]])
+; AVX-NEXT:    store <32 x i8> [[TMP6]], ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 32), align 1
+; AVX-NEXT:    ret void
+;
+; AVX512-LABEL: @fshl_v64i8(
+; AVX512-NEXT:    [[TMP1:%.*]] = load <64 x i8>, ptr @a8, align 1
+; AVX512-NEXT:    [[TMP2:%.*]] = load <64 x i8>, ptr @b8, align 1
+; AVX512-NEXT:    [[TMP3:%.*]] = call <64 x i8> @llvm.fshl.v64i8(<64 x i8> [[TMP1]], <64 x i8> [[TMP1]], <64 x i8> [[TMP2]])
+; AVX512-NEXT:    store <64 x i8> [[TMP3]], ptr @d8, align 1
+; AVX512-NEXT:    ret void
+;
+  %a0  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 0 ), align 1
+  %a1  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 1 ), align 1
+  %a2  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 2 ), align 1
+  %a3  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 3 ), align 1
+  %a4  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 4 ), align 1
+  %a5  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 5 ), align 1
+  %a6  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 6 ), align 1
+  %a7  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 7 ), align 1
+  %a8  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 8 ), align 1
+  %a9  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 9 ), align 1
+  %a10 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 10), align 1
+  %a11 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 11), align 1
+  %a12 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 12), align 1
+  %a13 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 13), align 1
+  %a14 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 14), align 1
+  %a15 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 15), align 1
+  %a16 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 16), align 1
+  %a17 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 17), align 1
+  %a18 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 18), align 1
+  %a19 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 19), align 1
+  %a20 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 20), align 1
+  %a21 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 21), align 1
+  %a22 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 22), align 1
+  %a23 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 23), align 1
+  %a24 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 24), align 1
+  %a25 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 25), align 1
+  %a26 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 26), align 1
+  %a27 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 27), align 1
+  %a28 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 28), align 1
+  %a29 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 29), align 1
+  %a30 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 30), align 1
+  %a31 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 31), align 1
+  %a32 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 32), align 1
+  %a33 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 33), align 1
+  %a34 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 34), align 1
+  %a35 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 35), align 1
+  %a36 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 36), align 1
+  %a37 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 37), align 1
+  %a38 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 38), align 1
+  %a39 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 39), align 1
+  %a40 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 40), align 1
+  %a41 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 41), align 1
+  %a42 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 42), align 1
+  %a43 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 43), align 1
+  %a44 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 44), align 1
+  %a45 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 45), align 1
+  %a46 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 46), align 1
+  %a47 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 47), align 1
+  %a48 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 48), align 1
+  %a49 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 49), align 1
+  %a50 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 50), align 1
+  %a51 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 51), align 1
+  %a52 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 52), align 1
+  %a53 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 53), align 1
+  %a54 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 54), align 1
+  %a55 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 55), align 1
+  %a56 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 56), align 1
+  %a57 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 57), align 1
+  %a58 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 58), align 1
+  %a59 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 59), align 1
+  %a60 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 60), align 1
+  %a61 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 61), align 1
+  %a62 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 62), align 1
+  %a63 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 63), align 1
+  %b0  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 0 ), align 1
+  %b1  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 1 ), align 1
+  %b2  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 2 ), align 1
+  %b3  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 3 ), align 1
+  %b4  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 4 ), align 1
+  %b5  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 5 ), align 1
+  %b6  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 6 ), align 1
+  %b7  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 7 ), align 1
+  %b8  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 8 ), align 1
+  %b9  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 9 ), align 1
+  %b10 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 10), align 1
+  %b11 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 11), align 1
+  %b12 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 12), align 1
+  %b13 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 13), align 1
+  %b14 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 14), align 1
+  %b15 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 15), align 1
+  %b16 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 16), align 1
+  %b17 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 17), align 1
+  %b18 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 18), align 1
+  %b19 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 19), align 1
+  %b20 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 20), align 1
+  %b21 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 21), align 1
+  %b22 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 22), align 1
+  %b23 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 23), align 1
+  %b24 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 24), align 1
+  %b25 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 25), align 1
+  %b26 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 26), align 1
+  %b27 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 27), align 1
+  %b28 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 28), align 1
+  %b29 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 29), align 1
+  %b30 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 30), align 1
+  %b31 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 31), align 1
+  %b32 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 32), align 1
+  %b33 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 33), align 1
+  %b34 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 34), align 1
+  %b35 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 35), align 1
+  %b36 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 36), align 1
+  %b37 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 37), align 1
+  %b38 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 38), align 1
+  %b39 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 39), align 1
+  %b40 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 40), align 1
+  %b41 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 41), align 1
+  %b42 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 42), align 1
+  %b43 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 43), align 1
+  %b44 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 44), align 1
+  %b45 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 45), align 1
+  %b46 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 46), align 1
+  %b47 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 47), align 1
+  %b48 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 48), align 1
+  %b49 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 49), align 1
+  %b50 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 50), align 1
+  %b51 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 51), align 1
+  %b52 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 52), align 1
+  %b53 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 53), align 1
+  %b54 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 54), align 1
+  %b55 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 55), align 1
+  %b56 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 56), align 1
+  %b57 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 57), align 1
+  %b58 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 58), align 1
+  %b59 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 59), align 1
+  %b60 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 60), align 1
+  %b61 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 61), align 1
+  %b62 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 62), align 1
+  %b63 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 63), align 1
+  %r0  = call i8 @llvm.fshl.i8(i8 %a0 , i8 %a0 , i8 %b0 )
+  %r1  = call i8 @llvm.fshl.i8(i8 %a1 , i8 %a1 , i8 %b1 )
+  %r2  = call i8 @llvm.fshl.i8(i8 %a2 , i8 %a2 , i8 %b2 )
+  %r3  = call i8 @llvm.fshl.i8(i8 %a3 , i8 %a3 , i8 %b3 )
+  %r4  = call i8 @llvm.fshl.i8(i8 %a4 , i8 %a4 , i8 %b4 )
+  %r5  = call i8 @llvm.fshl.i8(i8 %a5 , i8 %a5 , i8 %b5 )
+  %r6  = call i8 @llvm.fshl.i8(i8 %a6 , i8 %a6 , i8 %b6 )
+  %r7  = call i8 @llvm.fshl.i8(i8 %a7 , i8 %a7 , i8 %b7 )
+  %r8  = call i8 @llvm.fshl.i8(i8 %a8 , i8 %a8 , i8 %b8 )
+  %r9  = call i8 @llvm.fshl.i8(i8 %a9 , i8 %a9 , i8 %b9 )
+  %r10 = call i8 @llvm.fshl.i8(i8 %a10, i8 %a10, i8 %b10)
+  %r11 = call i8 @llvm.fshl.i8(i8 %a11, i8 %a11, i8 %b11)
+  %r12 = call i8 @llvm.fshl.i8(i8 %a12, i8 %a12, i8 %b12)
+  %r13 = call i8 @llvm.fshl.i8(i8 %a13, i8 %a13, i8 %b13)
+  %r14 = call i8 @llvm.fshl.i8(i8 %a14, i8 %a14, i8 %b14)
+  %r15 = call i8 @llvm.fshl.i8(i8 %a15, i8 %a15, i8 %b15)
+  %r16 = call i8 @llvm.fshl.i8(i8 %a16, i8 %a16, i8 %b16)
+  %r17 = call i8 @llvm.fshl.i8(i8 %a17, i8 %a17, i8 %b17)
+  %r18 = call i8 @llvm.fshl.i8(i8 %a18, i8 %a18, i8 %b18)
+  %r19 = call i8 @llvm.fshl.i8(i8 %a19, i8 %a19, i8 %b19)
+  %r20 = call i8 @llvm.fshl.i8(i8 %a20, i8 %a20, i8 %b20)
+  %r21 = call i8 @llvm.fshl.i8(i8 %a21, i8 %a21, i8 %b21)
+  %r22 = call i8 @llvm.fshl.i8(i8 %a22, i8 %a22, i8 %b22)
+  %r23 = call i8 @llvm.fshl.i8(i8 %a23, i8 %a23, i8 %b23)
+  %r24 = call i8 @llvm.fshl.i8(i8 %a24, i8 %a24, i8 %b24)
+  %r25 = call i8 @llvm.fshl.i8(i8 %a25, i8 %a25, i8 %b25)
+  %r26 = call i8 @llvm.fshl.i8(i8 %a26, i8 %a26, i8 %b26)
+  %r27 = call i8 @llvm.fshl.i8(i8 %a27, i8 %a27, i8 %b27)
+  %r28 = call i8 @llvm.fshl.i8(i8 %a28, i8 %a28, i8 %b28)
+  %r29 = call i8 @llvm.fshl.i8(i8 %a29, i8 %a29, i8 %b29)
+  %r30 = call i8 @llvm.fshl.i8(i8 %a30, i8 %a30, i8 %b30)
+  %r31 = call i8 @llvm.fshl.i8(i8 %a31, i8 %a31, i8 %b31)
+  %r32 = call i8 @llvm.fshl.i8(i8 %a32, i8 %a32, i8 %b32)
+  %r33 = call i8 @llvm.fshl.i8(i8 %a33, i8 %a33, i8 %b33)
+  %r34 = call i8 @llvm.fshl.i8(i8 %a34, i8 %a34, i8 %b34)
+  %r35 = call i8 @llvm.fshl.i8(i8 %a35, i8 %a35, i8 %b35)
+  %r36 = call i8 @llvm.fshl.i8(i8 %a36, i8 %a36, i8 %b36)
+  %r37 = call i8 @llvm.fshl.i8(i8 %a37, i8 %a37, i8 %b37)
+  %r38 = call i8 @llvm.fshl.i8(i8 %a38, i8 %a38, i8 %b38)
+  %r39 = call i8 @llvm.fshl.i8(i8 %a39, i8 %a39, i8 %b39)
+  %r40 = call i8 @llvm.fshl.i8(i8 %a40, i8 %a40, i8 %b40)
+  %r41 = call i8 @llvm.fshl.i8(i8 %a41, i8 %a41, i8 %b41)
+  %r42 = call i8 @llvm.fshl.i8(i8 %a42, i8 %a42, i8 %b42)
+  %r43 = call i8 @llvm.fshl.i8(i8 %a43, i8 %a43, i8 %b43)
+  %r44 = call i8 @llvm.fshl.i8(i8 %a44, i8 %a44, i8 %b44)
+  %r45 = call i8 @llvm.fshl.i8(i8 %a45, i8 %a45, i8 %b45)
+  %r46 = call i8 @llvm.fshl.i8(i8 %a46, i8 %a46, i8 %b46)
+  %r47 = call i8 @llvm.fshl.i8(i8 %a47, i8 %a47, i8 %b47)
+  %r48 = call i8 @llvm.fshl.i8(i8 %a48, i8 %a48, i8 %b48)
+  %r49 = call i8 @llvm.fshl.i8(i8 %a49, i8 %a49, i8 %b49)
+  %r50 = call i8 @llvm.fshl.i8(i8 %a50, i8 %a50, i8 %b50)
+  %r51 = call i8 @llvm.fshl.i8(i8 %a51, i8 %a51, i8 %b51)
+  %r52 = call i8 @llvm.fshl.i8(i8 %a52, i8 %a52, i8 %b52)
+  %r53 = call i8 @llvm.fshl.i8(i8 %a53, i8 %a53, i8 %b53)
+  %r54 = call i8 @llvm.fshl.i8(i8 %a54, i8 %a54, i8 %b54)
+  %r55 = call i8 @llvm.fshl.i8(i8 %a55, i8 %a55, i8 %b55)
+  %r56 = call i8 @llvm.fshl.i8(i8 %a56, i8 %a56, i8 %b56)
+  %r57 = call i8 @llvm.fshl.i8(i8 %a57, i8 %a57, i8 %b57)
+  %r58 = call i8 @llvm.fshl.i8(i8 %a58, i8 %a58, i8 %b58)
+  %r59 = call i8 @llvm.fshl.i8(i8 %a59, i8 %a59, i8 %b59)
+  %r60 = call i8 @llvm.fshl.i8(i8 %a60, i8 %a60, i8 %b60)
+  %r61 = call i8 @llvm.fshl.i8(i8 %a61, i8 %a61, i8 %b61)
+  %r62 = call i8 @llvm.fshl.i8(i8 %a62, i8 %a62, i8 %b62)
+  %r63 = call i8 @llvm.fshl.i8(i8 %a63, i8 %a63, i8 %b63)
+  store i8 %r0 , ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 0 ), align 1
+  store i8 %r1 , ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 1 ), align 1
+  store i8 %r2 , ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 2 ), align 1
+  store i8 %r3 , ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 3 ), align 1
+  store i8 %r4 , ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 4 ), align 1
+  store i8 %r5 , ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 5 ), align 1
+  store i8 %r6 , ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 6 ), align 1
+  store i8 %r7 , ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 7 ), align 1
+  store i8 %r8 , ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 8 ), align 1
+  store i8 %r9 , ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 9 ), align 1
+  store i8 %r10, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 10), align 1
+  store i8 %r11, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 11), align 1
+  store i8 %r12, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 12), align 1
+  store i8 %r13, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 13), align 1
+  store i8 %r14, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 14), align 1
+  store i8 %r15, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 15), align 1
+  store i8 %r16, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 16), align 1
+  store i8 %r17, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 17), align 1
+  store i8 %r18, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 18), align 1
+  store i8 %r19, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 19), align 1
+  store i8 %r20, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 20), align 1
+  store i8 %r21, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 21), align 1
+  store i8 %r22, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 22), align 1
+  store i8 %r23, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 23), align 1
+  store i8 %r24, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 24), align 1
+  store i8 %r25, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 25), align 1
+  store i8 %r26, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 26), align 1
+  store i8 %r27, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 27), align 1
+  store i8 %r28, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 28), align 1
+  store i8 %r29, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 29), align 1
+  store i8 %r30, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 30), align 1
+  store i8 %r31, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 31), align 1
+  store i8 %r32, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 32), align 1
+  store i8 %r33, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 33), align 1
+  store i8 %r34, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 34), align 1
+  store i8 %r35, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 35), align 1
+  store i8 %r36, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 36), align 1
+  store i8 %r37, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 37), align 1
+  store i8 %r38, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 38), align 1
+  store i8 %r39, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 39), align 1
+  store i8 %r40, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 40), align 1
+  store i8 %r41, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 41), align 1
+  store i8 %r42, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 42), align 1
+  store i8 %r43, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 43), align 1
+  store i8 %r44, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 44), align 1
+  store i8 %r45, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 45), align 1
+  store i8 %r46, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 46), align 1
+  store i8 %r47, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 47), align 1
+  store i8 %r48, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 48), align 1
+  store i8 %r49, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 49), align 1
+  store i8 %r50, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 50), align 1
+  store i8 %r51, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 51), align 1
+  store i8 %r52, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 52), align 1
+  store i8 %r53, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 53), align 1
+  store i8 %r54, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 54), align 1
+  store i8 %r55, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 55), align 1
+  store i8 %r56, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 56), align 1
+  store i8 %r57, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 57), align 1
+  store i8 %r58, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 58), align 1
+  store i8 %r59, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 59), align 1
+  store i8 %r60, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 60), align 1
+  store i8 %r61, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 61), align 1
+  store i8 %r62, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 62), align 1
+  store i8 %r63, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 63), align 1
+  ret void
+}
+
+define void @fshl_v2i32() {
+; SSE-LABEL: @fshl_v2i32(
+; SSE-NEXT:    [[A0:%.*]] = load i32, ptr @a32, align 4
+; SSE-NEXT:    [[A1:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 1), align 4
+; SSE-NEXT:    [[B0:%.*]] = load i32, ptr @b32, align 4
+; SSE-NEXT:    [[B1:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 1), align 4
+; SSE-NEXT:    [[R0:%.*]] = call i32 @llvm.fshl.i32(i32 [[A0]], i32 [[A0]], i32 [[B0]])
+; SSE-NEXT:    [[R1:%.*]] = call i32 @llvm.fshl.i32(i32 [[A1]], i32 [[A1]], i32 [[B1]])
+; SSE-NEXT:    store i32 [[R0]], ptr @d32, align 4
+; SSE-NEXT:    store i32 [[R1]], ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 1), align 4
+; SSE-NEXT:    ret void
+;
+; AVX1-LABEL: @fshl_v2i32(
+; AVX1-NEXT:    [[A0:%.*]] = load i32, ptr @a32, align 4
+; AVX1-NEXT:    [[A1:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 1), align 4
+; AVX1-NEXT:    [[B0:%.*]] = load i32, ptr @b32, align 4
+; AVX1-NEXT:    [[B1:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 1), align 4
+; AVX1-NEXT:    [[R0:%.*]] = call i32 @llvm.fshl.i32(i32 [[A0]], i32 [[A0]], i32 [[B0]])
+; AVX1-NEXT:    [[R1:%.*]] = call i32 @llvm.fshl.i32(i32 [[A1]], i32 [[A1]], i32 [[B1]])
+; AVX1-NEXT:    store i32 [[R0]], ptr @d32, align 4
+; AVX1-NEXT:    store i32 [[R1]], ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 1), align 4
+; AVX1-NEXT:    ret void
+;
+; AVX2-LABEL: @fshl_v2i32(
+; AVX2-NEXT:    [[A0:%.*]] = load i32, ptr @a32, align 4
+; AVX2-NEXT:    [[A1:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 1), align 4
+; AVX2-NEXT:    [[B0:%.*]] = load i32, ptr @b32, align 4
+; AVX2-NEXT:    [[B1:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 1), align 4
+; AVX2-NEXT:    [[R0:%.*]] = call i32 @llvm.fshl.i32(i32 [[A0]], i32 [[A0]], i32 [[B0]])
+; AVX2-NEXT:    [[R1:%.*]] = call i32 @llvm.fshl.i32(i32 [[A1]], i32 [[A1]], i32 [[B1]])
+; AVX2-NEXT:    store i32 [[R0]], ptr @d32, align 4
+; AVX2-NEXT:    store i32 [[R1]], ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 1), align 4
+; AVX2-NEXT:    ret void
+;
+; AVX256-LABEL: @fshl_v2i32(
+; AVX256-NEXT:    [[TMP1:%.*]] = load <2 x i32>, ptr @a32, align 4
+; AVX256-NEXT:    [[TMP2:%.*]] = load <2 x i32>, ptr @b32, align 4
+; AVX256-NEXT:    [[TMP3:%.*]] = call <2 x i32> @llvm.fshl.v2i32(<2 x i32> [[TMP1]], <2 x i32> [[TMP1]], <2 x i32> [[TMP2]])
+; AVX256-NEXT:    store <2 x i32> [[TMP3]], ptr @d32, align 4
+; AVX256-NEXT:    ret void
+;
+; AVX512-LABEL: @fshl_v2i32(
+; AVX512-NEXT:    [[TMP1:%.*]] = load <2 x i32>, ptr @a32, align 4
+; AVX512-NEXT:    [[TMP2:%.*]] = load <2 x i32>, ptr @b32, align 4
+; AVX512-NEXT:    [[TMP3:%.*]] = call <2 x i32> @llvm.fshl.v2i32(<2 x i32> [[TMP1]], <2 x i32> [[TMP1]], <2 x i32> [[TMP2]])
+; AVX512-NEXT:    store <2 x i32> [[TMP3]], ptr @d32, align 4
+; AVX512-NEXT:    ret void
+;
+  %a0  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 0 ), align 4
+  %a1  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 1 ), align 4
+  %b0  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 0 ), align 4
+  %b1  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 1 ), align 4
+  %r0  = call i32 @llvm.fshl.i32(i32 %a0 , i32 %a0 , i32 %b0 )
+  %r1  = call i32 @llvm.fshl.i32(i32 %a1 , i32 %a1 , i32 %b1 )
+  store i32 %r0 , ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 0 ), align 4
+  store i32 %r1 , ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 1 ), align 4
+  ret void
+}
+
+; PR63980
+define void @fshl_v2i32_uniformconst() {
+; SSE-LABEL: @fshl_v2i32_uniformconst(
+; SSE-NEXT:    [[TMP1:%.*]] = load <2 x i32>, ptr @a32, align 4
+; SSE-NEXT:    [[TMP2:%.*]] = call <2 x i32> @llvm.fshl.v2i32(<2 x i32> [[TMP1]], <2 x i32> [[TMP1]], <2 x i32> <i32 1, i32 1>)
+; SSE-NEXT:    store <2 x i32> [[TMP2]], ptr @d32, align 4
+; SSE-NEXT:    ret void
+;
+; AVX-LABEL: @fshl_v2i32_uniformconst(
+; AVX-NEXT:    [[TMP1:%.*]] = load <2 x i32>, ptr @a32, align 4
+; AVX-NEXT:    [[TMP2:%.*]] = call <2 x i32> @llvm.fshl.v2i32(<2 x i32> [[TMP1]], <2 x i32> [[TMP1]], <2 x i32> <i32 1, i32 1>)
+; AVX-NEXT:    store <2 x i32> [[TMP2]], ptr @d32, align 4
+; AVX-NEXT:    ret void
+;
+; AVX512-LABEL: @fshl_v2i32_uniformconst(
+; AVX512-NEXT:    [[TMP1:%.*]] = load <2 x i32>, ptr @a32, align 4
+; AVX512-NEXT:    [[TMP2:%.*]] = call <2 x i32> @llvm.fshl.v2i32(<2 x i32> [[TMP1]], <2 x i32> [[TMP1]], <2 x i32> <i32 1, i32 1>)
+; AVX512-NEXT:    store <2 x i32> [[TMP2]], ptr @d32, align 4
+; AVX512-NEXT:    ret void
+;
+  %a0  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 0 ), align 4
+  %a1  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 1 ), align 4
+  %r0  = call i32 @llvm.fshl.i32(i32 %a0 , i32 %a0 , i32 1 )
+  %r1  = call i32 @llvm.fshl.i32(i32 %a1 , i32 %a1 , i32 1 )
+  store i32 %r0 , ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 0 ), align 4
+  store i32 %r1 , ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 1 ), align 4
+  ret void
+}

diff  --git a/llvm/test/Transforms/SLPVectorizer/X86/arith-fshl.ll b/llvm/test/Transforms/SLPVectorizer/X86/arith-fshl.ll
new file mode 100644
index 00000000000000..5153dc34e7a4ff
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/X86/arith-fshl.ll
@@ -0,0 +1,938 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -mtriple=x86_64-unknown -passes=slp-vectorizer -S | FileCheck %s --check-prefixes=SSE
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=slm -passes=slp-vectorizer -S | FileCheck %s --check-prefixes=SSE
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=corei7-avx -passes=slp-vectorizer -S | FileCheck %s --check-prefixes=AVX,AVX1
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=core-avx2 -passes=slp-vectorizer -S | FileCheck %s --check-prefixes=AVX,AVX2
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=skx -mattr=+prefer-256-bit -passes=slp-vectorizer -S | FileCheck %s --check-prefixes=AVX,AVX256
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=skx -mattr=-prefer-256-bit -passes=slp-vectorizer -S | FileCheck %s --check-prefixes=AVX512
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=knl -passes=slp-vectorizer -S | FileCheck %s --check-prefixes=AVX512
+
+ at a64 = common global [8 x i64] zeroinitializer, align 64
+ at b64 = common global [8 x i64] zeroinitializer, align 64
+ at c64 = common global [8 x i64] zeroinitializer, align 64
+ at d64 = common global [8 x i64] zeroinitializer, align 64
+ at a32 = common global [16 x i32] zeroinitializer, align 64
+ at b32 = common global [16 x i32] zeroinitializer, align 64
+ at c32 = common global [16 x i32] zeroinitializer, align 64
+ at d32 = common global [16 x i32] zeroinitializer, align 64
+ at a16 = common global [32 x i16] zeroinitializer, align 64
+ at b16 = common global [32 x i16] zeroinitializer, align 64
+ at c16 = common global [32 x i16] zeroinitializer, align 64
+ at d16 = common global [32 x i16] zeroinitializer, align 64
+ at a8  = common global [64 x i8] zeroinitializer, align 64
+ at b8  = common global [64 x i8] zeroinitializer, align 64
+ at c8  = common global [64 x i8] zeroinitializer, align 64
+ at d8  = common global [64 x i8] zeroinitializer, align 64
+
+declare i64 @llvm.fshl.i64(i64, i64, i64)
+declare i32 @llvm.fshl.i32(i32, i32, i32)
+declare i16 @llvm.fshl.i16(i16, i16, i16)
+declare i8  @llvm.fshl.i8 (i8 , i8 , i8 )
+
+define void @fshl_v8i64() {
+; SSE-LABEL: @fshl_v8i64(
+; SSE-NEXT:    [[A0:%.*]] = load i64, ptr @a64, align 8
+; SSE-NEXT:    [[A1:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @a64, i32 0, i64 1), align 8
+; SSE-NEXT:    [[A2:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @a64, i32 0, i64 2), align 8
+; SSE-NEXT:    [[A3:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @a64, i32 0, i64 3), align 8
+; SSE-NEXT:    [[A4:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @a64, i32 0, i64 4), align 8
+; SSE-NEXT:    [[A5:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @a64, i32 0, i64 5), align 8
+; SSE-NEXT:    [[A6:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @a64, i32 0, i64 6), align 8
+; SSE-NEXT:    [[A7:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @a64, i32 0, i64 7), align 8
+; SSE-NEXT:    [[B0:%.*]] = load i64, ptr @b64, align 8
+; SSE-NEXT:    [[B1:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @b64, i32 0, i64 1), align 8
+; SSE-NEXT:    [[B2:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @b64, i32 0, i64 2), align 8
+; SSE-NEXT:    [[B3:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @b64, i32 0, i64 3), align 8
+; SSE-NEXT:    [[B4:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @b64, i32 0, i64 4), align 8
+; SSE-NEXT:    [[B5:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @b64, i32 0, i64 5), align 8
+; SSE-NEXT:    [[B6:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @b64, i32 0, i64 6), align 8
+; SSE-NEXT:    [[B7:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @b64, i32 0, i64 7), align 8
+; SSE-NEXT:    [[C0:%.*]] = load i64, ptr @c64, align 8
+; SSE-NEXT:    [[C1:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @c64, i32 0, i64 1), align 8
+; SSE-NEXT:    [[C2:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @c64, i32 0, i64 2), align 8
+; SSE-NEXT:    [[C3:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @c64, i32 0, i64 3), align 8
+; SSE-NEXT:    [[C4:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @c64, i32 0, i64 4), align 8
+; SSE-NEXT:    [[C5:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @c64, i32 0, i64 5), align 8
+; SSE-NEXT:    [[C6:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @c64, i32 0, i64 6), align 8
+; SSE-NEXT:    [[C7:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @c64, i32 0, i64 7), align 8
+; SSE-NEXT:    [[R0:%.*]] = call i64 @llvm.fshl.i64(i64 [[A0]], i64 [[B0]], i64 [[C0]])
+; SSE-NEXT:    [[R1:%.*]] = call i64 @llvm.fshl.i64(i64 [[A1]], i64 [[B1]], i64 [[C1]])
+; SSE-NEXT:    [[R2:%.*]] = call i64 @llvm.fshl.i64(i64 [[A2]], i64 [[B2]], i64 [[C2]])
+; SSE-NEXT:    [[R3:%.*]] = call i64 @llvm.fshl.i64(i64 [[A3]], i64 [[B3]], i64 [[C3]])
+; SSE-NEXT:    [[R4:%.*]] = call i64 @llvm.fshl.i64(i64 [[A4]], i64 [[B4]], i64 [[C4]])
+; SSE-NEXT:    [[R5:%.*]] = call i64 @llvm.fshl.i64(i64 [[A5]], i64 [[B5]], i64 [[C5]])
+; SSE-NEXT:    [[R6:%.*]] = call i64 @llvm.fshl.i64(i64 [[A6]], i64 [[B6]], i64 [[C6]])
+; SSE-NEXT:    [[R7:%.*]] = call i64 @llvm.fshl.i64(i64 [[A7]], i64 [[B7]], i64 [[C7]])
+; SSE-NEXT:    store i64 [[R0]], ptr @d64, align 8
+; SSE-NEXT:    store i64 [[R1]], ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 1), align 8
+; SSE-NEXT:    store i64 [[R2]], ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 2), align 8
+; SSE-NEXT:    store i64 [[R3]], ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 3), align 8
+; SSE-NEXT:    store i64 [[R4]], ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 4), align 8
+; SSE-NEXT:    store i64 [[R5]], ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 5), align 8
+; SSE-NEXT:    store i64 [[R6]], ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 6), align 8
+; SSE-NEXT:    store i64 [[R7]], ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 7), align 8
+; SSE-NEXT:    ret void
+;
+; AVX1-LABEL: @fshl_v8i64(
+; AVX1-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @a64, align 8
+; AVX1-NEXT:    [[TMP2:%.*]] = load <2 x i64>, ptr @b64, align 8
+; AVX1-NEXT:    [[TMP3:%.*]] = load <2 x i64>, ptr @c64, align 8
+; AVX1-NEXT:    [[TMP4:%.*]] = call <2 x i64> @llvm.fshl.v2i64(<2 x i64> [[TMP1]], <2 x i64> [[TMP2]], <2 x i64> [[TMP3]])
+; AVX1-NEXT:    store <2 x i64> [[TMP4]], ptr @d64, align 8
+; AVX1-NEXT:    [[TMP5:%.*]] = load <2 x i64>, ptr getelementptr inbounds ([8 x i64], ptr @a64, i32 0, i64 2), align 8
+; AVX1-NEXT:    [[TMP6:%.*]] = load <2 x i64>, ptr getelementptr inbounds ([8 x i64], ptr @b64, i32 0, i64 2), align 8
+; AVX1-NEXT:    [[TMP7:%.*]] = load <2 x i64>, ptr getelementptr inbounds ([8 x i64], ptr @c64, i32 0, i64 2), align 8
+; AVX1-NEXT:    [[TMP8:%.*]] = call <2 x i64> @llvm.fshl.v2i64(<2 x i64> [[TMP5]], <2 x i64> [[TMP6]], <2 x i64> [[TMP7]])
+; AVX1-NEXT:    store <2 x i64> [[TMP8]], ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 2), align 8
+; AVX1-NEXT:    [[TMP9:%.*]] = load <2 x i64>, ptr getelementptr inbounds ([8 x i64], ptr @a64, i32 0, i64 4), align 8
+; AVX1-NEXT:    [[TMP10:%.*]] = load <2 x i64>, ptr getelementptr inbounds ([8 x i64], ptr @b64, i32 0, i64 4), align 8
+; AVX1-NEXT:    [[TMP11:%.*]] = load <2 x i64>, ptr getelementptr inbounds ([8 x i64], ptr @c64, i32 0, i64 4), align 8
+; AVX1-NEXT:    [[TMP12:%.*]] = call <2 x i64> @llvm.fshl.v2i64(<2 x i64> [[TMP9]], <2 x i64> [[TMP10]], <2 x i64> [[TMP11]])
+; AVX1-NEXT:    store <2 x i64> [[TMP12]], ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 4), align 8
+; AVX1-NEXT:    [[TMP13:%.*]] = load <2 x i64>, ptr getelementptr inbounds ([8 x i64], ptr @a64, i32 0, i64 6), align 8
+; AVX1-NEXT:    [[TMP14:%.*]] = load <2 x i64>, ptr getelementptr inbounds ([8 x i64], ptr @b64, i32 0, i64 6), align 8
+; AVX1-NEXT:    [[TMP15:%.*]] = load <2 x i64>, ptr getelementptr inbounds ([8 x i64], ptr @c64, i32 0, i64 6), align 8
+; AVX1-NEXT:    [[TMP16:%.*]] = call <2 x i64> @llvm.fshl.v2i64(<2 x i64> [[TMP13]], <2 x i64> [[TMP14]], <2 x i64> [[TMP15]])
+; AVX1-NEXT:    store <2 x i64> [[TMP16]], ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 6), align 8
+; AVX1-NEXT:    ret void
+;
+; AVX2-LABEL: @fshl_v8i64(
+; AVX2-NEXT:    [[TMP1:%.*]] = load <4 x i64>, ptr @a64, align 8
+; AVX2-NEXT:    [[TMP2:%.*]] = load <4 x i64>, ptr @b64, align 8
+; AVX2-NEXT:    [[TMP3:%.*]] = load <4 x i64>, ptr @c64, align 8
+; AVX2-NEXT:    [[TMP4:%.*]] = call <4 x i64> @llvm.fshl.v4i64(<4 x i64> [[TMP1]], <4 x i64> [[TMP2]], <4 x i64> [[TMP3]])
+; AVX2-NEXT:    store <4 x i64> [[TMP4]], ptr @d64, align 8
+; AVX2-NEXT:    [[TMP5:%.*]] = load <4 x i64>, ptr getelementptr inbounds ([8 x i64], ptr @a64, i32 0, i64 4), align 8
+; AVX2-NEXT:    [[TMP6:%.*]] = load <4 x i64>, ptr getelementptr inbounds ([8 x i64], ptr @b64, i32 0, i64 4), align 8
+; AVX2-NEXT:    [[TMP7:%.*]] = load <4 x i64>, ptr getelementptr inbounds ([8 x i64], ptr @c64, i32 0, i64 4), align 8
+; AVX2-NEXT:    [[TMP8:%.*]] = call <4 x i64> @llvm.fshl.v4i64(<4 x i64> [[TMP5]], <4 x i64> [[TMP6]], <4 x i64> [[TMP7]])
+; AVX2-NEXT:    store <4 x i64> [[TMP8]], ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 4), align 8
+; AVX2-NEXT:    ret void
+;
+; AVX256-LABEL: @fshl_v8i64(
+; AVX256-NEXT:    [[TMP1:%.*]] = load <4 x i64>, ptr @a64, align 8
+; AVX256-NEXT:    [[TMP2:%.*]] = load <4 x i64>, ptr @b64, align 8
+; AVX256-NEXT:    [[TMP3:%.*]] = load <4 x i64>, ptr @c64, align 8
+; AVX256-NEXT:    [[TMP4:%.*]] = call <4 x i64> @llvm.fshl.v4i64(<4 x i64> [[TMP1]], <4 x i64> [[TMP2]], <4 x i64> [[TMP3]])
+; AVX256-NEXT:    store <4 x i64> [[TMP4]], ptr @d64, align 8
+; AVX256-NEXT:    [[TMP5:%.*]] = load <4 x i64>, ptr getelementptr inbounds ([8 x i64], ptr @a64, i32 0, i64 4), align 8
+; AVX256-NEXT:    [[TMP6:%.*]] = load <4 x i64>, ptr getelementptr inbounds ([8 x i64], ptr @b64, i32 0, i64 4), align 8
+; AVX256-NEXT:    [[TMP7:%.*]] = load <4 x i64>, ptr getelementptr inbounds ([8 x i64], ptr @c64, i32 0, i64 4), align 8
+; AVX256-NEXT:    [[TMP8:%.*]] = call <4 x i64> @llvm.fshl.v4i64(<4 x i64> [[TMP5]], <4 x i64> [[TMP6]], <4 x i64> [[TMP7]])
+; AVX256-NEXT:    store <4 x i64> [[TMP8]], ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 4), align 8
+; AVX256-NEXT:    ret void
+;
+; AVX512-LABEL: @fshl_v8i64(
+; AVX512-NEXT:    [[TMP1:%.*]] = load <8 x i64>, ptr @a64, align 8
+; AVX512-NEXT:    [[TMP2:%.*]] = load <8 x i64>, ptr @b64, align 8
+; AVX512-NEXT:    [[TMP3:%.*]] = load <8 x i64>, ptr @c64, align 8
+; AVX512-NEXT:    [[TMP4:%.*]] = call <8 x i64> @llvm.fshl.v8i64(<8 x i64> [[TMP1]], <8 x i64> [[TMP2]], <8 x i64> [[TMP3]])
+; AVX512-NEXT:    store <8 x i64> [[TMP4]], ptr @d64, align 8
+; AVX512-NEXT:    ret void
+;
+  %a0 = load i64, ptr @a64, align 8
+  %a1 = load i64, ptr getelementptr inbounds ([8 x i64], ptr @a64, i32 0, i64 1), align 8
+  %a2 = load i64, ptr getelementptr inbounds ([8 x i64], ptr @a64, i32 0, i64 2), align 8
+  %a3 = load i64, ptr getelementptr inbounds ([8 x i64], ptr @a64, i32 0, i64 3), align 8
+  %a4 = load i64, ptr getelementptr inbounds ([8 x i64], ptr @a64, i32 0, i64 4), align 8
+  %a5 = load i64, ptr getelementptr inbounds ([8 x i64], ptr @a64, i32 0, i64 5), align 8
+  %a6 = load i64, ptr getelementptr inbounds ([8 x i64], ptr @a64, i32 0, i64 6), align 8
+  %a7 = load i64, ptr getelementptr inbounds ([8 x i64], ptr @a64, i32 0, i64 7), align 8
+  %b0 = load i64, ptr @b64, align 8
+  %b1 = load i64, ptr getelementptr inbounds ([8 x i64], ptr @b64, i32 0, i64 1), align 8
+  %b2 = load i64, ptr getelementptr inbounds ([8 x i64], ptr @b64, i32 0, i64 2), align 8
+  %b3 = load i64, ptr getelementptr inbounds ([8 x i64], ptr @b64, i32 0, i64 3), align 8
+  %b4 = load i64, ptr getelementptr inbounds ([8 x i64], ptr @b64, i32 0, i64 4), align 8
+  %b5 = load i64, ptr getelementptr inbounds ([8 x i64], ptr @b64, i32 0, i64 5), align 8
+  %b6 = load i64, ptr getelementptr inbounds ([8 x i64], ptr @b64, i32 0, i64 6), align 8
+  %b7 = load i64, ptr getelementptr inbounds ([8 x i64], ptr @b64, i32 0, i64 7), align 8
+  %c0 = load i64, ptr @c64, align 8
+  %c1 = load i64, ptr getelementptr inbounds ([8 x i64], ptr @c64, i32 0, i64 1), align 8
+  %c2 = load i64, ptr getelementptr inbounds ([8 x i64], ptr @c64, i32 0, i64 2), align 8
+  %c3 = load i64, ptr getelementptr inbounds ([8 x i64], ptr @c64, i32 0, i64 3), align 8
+  %c4 = load i64, ptr getelementptr inbounds ([8 x i64], ptr @c64, i32 0, i64 4), align 8
+  %c5 = load i64, ptr getelementptr inbounds ([8 x i64], ptr @c64, i32 0, i64 5), align 8
+  %c6 = load i64, ptr getelementptr inbounds ([8 x i64], ptr @c64, i32 0, i64 6), align 8
+  %c7 = load i64, ptr getelementptr inbounds ([8 x i64], ptr @c64, i32 0, i64 7), align 8
+  %r0 = call i64 @llvm.fshl.i64(i64 %a0, i64 %b0, i64 %c0)
+  %r1 = call i64 @llvm.fshl.i64(i64 %a1, i64 %b1, i64 %c1)
+  %r2 = call i64 @llvm.fshl.i64(i64 %a2, i64 %b2, i64 %c2)
+  %r3 = call i64 @llvm.fshl.i64(i64 %a3, i64 %b3, i64 %c3)
+  %r4 = call i64 @llvm.fshl.i64(i64 %a4, i64 %b4, i64 %c4)
+  %r5 = call i64 @llvm.fshl.i64(i64 %a5, i64 %b5, i64 %c5)
+  %r6 = call i64 @llvm.fshl.i64(i64 %a6, i64 %b6, i64 %c6)
+  %r7 = call i64 @llvm.fshl.i64(i64 %a7, i64 %b7, i64 %c7)
+  store i64 %r0, ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 0), align 8
+  store i64 %r1, ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 1), align 8
+  store i64 %r2, ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 2), align 8
+  store i64 %r3, ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 3), align 8
+  store i64 %r4, ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 4), align 8
+  store i64 %r5, ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 5), align 8
+  store i64 %r6, ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 6), align 8
+  store i64 %r7, ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 7), align 8
+  ret void
+}
+
+define void @fshl_v16i32() {
+; SSE-LABEL: @fshl_v16i32(
+; SSE-NEXT:    [[A0:%.*]] = load i32, ptr @a32, align 4
+; SSE-NEXT:    [[A1:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 1), align 4
+; SSE-NEXT:    [[A2:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 2), align 4
+; SSE-NEXT:    [[A3:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 3), align 4
+; SSE-NEXT:    [[A4:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 4), align 4
+; SSE-NEXT:    [[A5:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 5), align 4
+; SSE-NEXT:    [[A6:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 6), align 4
+; SSE-NEXT:    [[A7:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 7), align 4
+; SSE-NEXT:    [[A8:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 8), align 4
+; SSE-NEXT:    [[A9:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 9), align 4
+; SSE-NEXT:    [[A10:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 10), align 4
+; SSE-NEXT:    [[A11:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 11), align 4
+; SSE-NEXT:    [[A12:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 12), align 4
+; SSE-NEXT:    [[A13:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 13), align 4
+; SSE-NEXT:    [[A14:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 14), align 4
+; SSE-NEXT:    [[A15:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 15), align 4
+; SSE-NEXT:    [[B0:%.*]] = load i32, ptr @b32, align 4
+; SSE-NEXT:    [[B1:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 1), align 4
+; SSE-NEXT:    [[B2:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 2), align 4
+; SSE-NEXT:    [[B3:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 3), align 4
+; SSE-NEXT:    [[B4:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 4), align 4
+; SSE-NEXT:    [[B5:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 5), align 4
+; SSE-NEXT:    [[B6:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 6), align 4
+; SSE-NEXT:    [[B7:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 7), align 4
+; SSE-NEXT:    [[B8:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 8), align 4
+; SSE-NEXT:    [[B9:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 9), align 4
+; SSE-NEXT:    [[B10:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 10), align 4
+; SSE-NEXT:    [[B11:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 11), align 4
+; SSE-NEXT:    [[B12:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 12), align 4
+; SSE-NEXT:    [[B13:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 13), align 4
+; SSE-NEXT:    [[B14:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 14), align 4
+; SSE-NEXT:    [[B15:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 15), align 4
+; SSE-NEXT:    [[C0:%.*]] = load i32, ptr @c32, align 4
+; SSE-NEXT:    [[C1:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @c32, i32 0, i64 1), align 4
+; SSE-NEXT:    [[C2:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @c32, i32 0, i64 2), align 4
+; SSE-NEXT:    [[C3:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @c32, i32 0, i64 3), align 4
+; SSE-NEXT:    [[C4:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @c32, i32 0, i64 4), align 4
+; SSE-NEXT:    [[C5:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @c32, i32 0, i64 5), align 4
+; SSE-NEXT:    [[C6:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @c32, i32 0, i64 6), align 4
+; SSE-NEXT:    [[C7:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @c32, i32 0, i64 7), align 4
+; SSE-NEXT:    [[C8:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @c32, i32 0, i64 8), align 4
+; SSE-NEXT:    [[C9:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @c32, i32 0, i64 9), align 4
+; SSE-NEXT:    [[C10:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @c32, i32 0, i64 10), align 4
+; SSE-NEXT:    [[C11:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @c32, i32 0, i64 11), align 4
+; SSE-NEXT:    [[C12:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @c32, i32 0, i64 12), align 4
+; SSE-NEXT:    [[C13:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @c32, i32 0, i64 13), align 4
+; SSE-NEXT:    [[C14:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @c32, i32 0, i64 14), align 4
+; SSE-NEXT:    [[C15:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @c32, i32 0, i64 15), align 4
+; SSE-NEXT:    [[R0:%.*]] = call i32 @llvm.fshl.i32(i32 [[A0]], i32 [[B0]], i32 [[C0]])
+; SSE-NEXT:    [[R1:%.*]] = call i32 @llvm.fshl.i32(i32 [[A1]], i32 [[B1]], i32 [[C1]])
+; SSE-NEXT:    [[R2:%.*]] = call i32 @llvm.fshl.i32(i32 [[A2]], i32 [[B2]], i32 [[C2]])
+; SSE-NEXT:    [[R3:%.*]] = call i32 @llvm.fshl.i32(i32 [[A3]], i32 [[B3]], i32 [[C3]])
+; SSE-NEXT:    [[R4:%.*]] = call i32 @llvm.fshl.i32(i32 [[A4]], i32 [[B4]], i32 [[C4]])
+; SSE-NEXT:    [[R5:%.*]] = call i32 @llvm.fshl.i32(i32 [[A5]], i32 [[B5]], i32 [[C5]])
+; SSE-NEXT:    [[R6:%.*]] = call i32 @llvm.fshl.i32(i32 [[A6]], i32 [[B6]], i32 [[C6]])
+; SSE-NEXT:    [[R7:%.*]] = call i32 @llvm.fshl.i32(i32 [[A7]], i32 [[B7]], i32 [[C7]])
+; SSE-NEXT:    [[R8:%.*]] = call i32 @llvm.fshl.i32(i32 [[A8]], i32 [[B8]], i32 [[C8]])
+; SSE-NEXT:    [[R9:%.*]] = call i32 @llvm.fshl.i32(i32 [[A9]], i32 [[B9]], i32 [[C9]])
+; SSE-NEXT:    [[R10:%.*]] = call i32 @llvm.fshl.i32(i32 [[A10]], i32 [[B10]], i32 [[C10]])
+; SSE-NEXT:    [[R11:%.*]] = call i32 @llvm.fshl.i32(i32 [[A11]], i32 [[B11]], i32 [[C11]])
+; SSE-NEXT:    [[R12:%.*]] = call i32 @llvm.fshl.i32(i32 [[A12]], i32 [[B12]], i32 [[C12]])
+; SSE-NEXT:    [[R13:%.*]] = call i32 @llvm.fshl.i32(i32 [[A13]], i32 [[B13]], i32 [[C13]])
+; SSE-NEXT:    [[R14:%.*]] = call i32 @llvm.fshl.i32(i32 [[A14]], i32 [[B14]], i32 [[C14]])
+; SSE-NEXT:    [[R15:%.*]] = call i32 @llvm.fshl.i32(i32 [[A15]], i32 [[B15]], i32 [[C15]])
+; SSE-NEXT:    store i32 [[R0]], ptr @d32, align 4
+; SSE-NEXT:    store i32 [[R1]], ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 1), align 4
+; SSE-NEXT:    store i32 [[R2]], ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 2), align 4
+; SSE-NEXT:    store i32 [[R3]], ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 3), align 4
+; SSE-NEXT:    store i32 [[R4]], ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 4), align 4
+; SSE-NEXT:    store i32 [[R5]], ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 5), align 4
+; SSE-NEXT:    store i32 [[R6]], ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 6), align 4
+; SSE-NEXT:    store i32 [[R7]], ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 7), align 4
+; SSE-NEXT:    store i32 [[R8]], ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 8), align 4
+; SSE-NEXT:    store i32 [[R9]], ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 9), align 4
+; SSE-NEXT:    store i32 [[R10]], ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 10), align 4
+; SSE-NEXT:    store i32 [[R11]], ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 11), align 4
+; SSE-NEXT:    store i32 [[R12]], ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 12), align 4
+; SSE-NEXT:    store i32 [[R13]], ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 13), align 4
+; SSE-NEXT:    store i32 [[R14]], ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 14), align 4
+; SSE-NEXT:    store i32 [[R15]], ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 15), align 4
+; SSE-NEXT:    ret void
+;
+; AVX-LABEL: @fshl_v16i32(
+; AVX-NEXT:    [[TMP1:%.*]] = load <8 x i32>, ptr @a32, align 4
+; AVX-NEXT:    [[TMP2:%.*]] = load <8 x i32>, ptr @b32, align 4
+; AVX-NEXT:    [[TMP3:%.*]] = load <8 x i32>, ptr @c32, align 4
+; AVX-NEXT:    [[TMP4:%.*]] = call <8 x i32> @llvm.fshl.v8i32(<8 x i32> [[TMP1]], <8 x i32> [[TMP2]], <8 x i32> [[TMP3]])
+; AVX-NEXT:    store <8 x i32> [[TMP4]], ptr @d32, align 4
+; AVX-NEXT:    [[TMP5:%.*]] = load <8 x i32>, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 8), align 4
+; AVX-NEXT:    [[TMP6:%.*]] = load <8 x i32>, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 8), align 4
+; AVX-NEXT:    [[TMP7:%.*]] = load <8 x i32>, ptr getelementptr inbounds ([16 x i32], ptr @c32, i32 0, i64 8), align 4
+; AVX-NEXT:    [[TMP8:%.*]] = call <8 x i32> @llvm.fshl.v8i32(<8 x i32> [[TMP5]], <8 x i32> [[TMP6]], <8 x i32> [[TMP7]])
+; AVX-NEXT:    store <8 x i32> [[TMP8]], ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 8), align 4
+; AVX-NEXT:    ret void
+;
+; AVX512-LABEL: @fshl_v16i32(
+; AVX512-NEXT:    [[TMP1:%.*]] = load <16 x i32>, ptr @a32, align 4
+; AVX512-NEXT:    [[TMP2:%.*]] = load <16 x i32>, ptr @b32, align 4
+; AVX512-NEXT:    [[TMP3:%.*]] = load <16 x i32>, ptr @c32, align 4
+; AVX512-NEXT:    [[TMP4:%.*]] = call <16 x i32> @llvm.fshl.v16i32(<16 x i32> [[TMP1]], <16 x i32> [[TMP2]], <16 x i32> [[TMP3]])
+; AVX512-NEXT:    store <16 x i32> [[TMP4]], ptr @d32, align 4
+; AVX512-NEXT:    ret void
+;
+  %a0  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 0 ), align 4
+  %a1  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 1 ), align 4
+  %a2  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 2 ), align 4
+  %a3  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 3 ), align 4
+  %a4  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 4 ), align 4
+  %a5  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 5 ), align 4
+  %a6  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 6 ), align 4
+  %a7  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 7 ), align 4
+  %a8  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 8 ), align 4
+  %a9  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 9 ), align 4
+  %a10 = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 10), align 4
+  %a11 = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 11), align 4
+  %a12 = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 12), align 4
+  %a13 = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 13), align 4
+  %a14 = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 14), align 4
+  %a15 = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 15), align 4
+  %b0  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 0 ), align 4
+  %b1  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 1 ), align 4
+  %b2  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 2 ), align 4
+  %b3  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 3 ), align 4
+  %b4  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 4 ), align 4
+  %b5  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 5 ), align 4
+  %b6  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 6 ), align 4
+  %b7  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 7 ), align 4
+  %b8  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 8 ), align 4
+  %b9  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 9 ), align 4
+  %b10 = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 10), align 4
+  %b11 = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 11), align 4
+  %b12 = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 12), align 4
+  %b13 = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 13), align 4
+  %b14 = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 14), align 4
+  %b15 = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 15), align 4
+  %c0  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @c32, i32 0, i64 0 ), align 4
+  %c1  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @c32, i32 0, i64 1 ), align 4
+  %c2  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @c32, i32 0, i64 2 ), align 4
+  %c3  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @c32, i32 0, i64 3 ), align 4
+  %c4  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @c32, i32 0, i64 4 ), align 4
+  %c5  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @c32, i32 0, i64 5 ), align 4
+  %c6  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @c32, i32 0, i64 6 ), align 4
+  %c7  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @c32, i32 0, i64 7 ), align 4
+  %c8  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @c32, i32 0, i64 8 ), align 4
+  %c9  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @c32, i32 0, i64 9 ), align 4
+  %c10 = load i32, ptr getelementptr inbounds ([16 x i32], ptr @c32, i32 0, i64 10), align 4
+  %c11 = load i32, ptr getelementptr inbounds ([16 x i32], ptr @c32, i32 0, i64 11), align 4
+  %c12 = load i32, ptr getelementptr inbounds ([16 x i32], ptr @c32, i32 0, i64 12), align 4
+  %c13 = load i32, ptr getelementptr inbounds ([16 x i32], ptr @c32, i32 0, i64 13), align 4
+  %c14 = load i32, ptr getelementptr inbounds ([16 x i32], ptr @c32, i32 0, i64 14), align 4
+  %c15 = load i32, ptr getelementptr inbounds ([16 x i32], ptr @c32, i32 0, i64 15), align 4
+  %r0  = call i32 @llvm.fshl.i32(i32 %a0 , i32 %b0 , i32 %c0 )
+  %r1  = call i32 @llvm.fshl.i32(i32 %a1 , i32 %b1 , i32 %c1 )
+  %r2  = call i32 @llvm.fshl.i32(i32 %a2 , i32 %b2 , i32 %c2 )
+  %r3  = call i32 @llvm.fshl.i32(i32 %a3 , i32 %b3 , i32 %c3 )
+  %r4  = call i32 @llvm.fshl.i32(i32 %a4 , i32 %b4 , i32 %c4 )
+  %r5  = call i32 @llvm.fshl.i32(i32 %a5 , i32 %b5 , i32 %c5 )
+  %r6  = call i32 @llvm.fshl.i32(i32 %a6 , i32 %b6 , i32 %c6 )
+  %r7  = call i32 @llvm.fshl.i32(i32 %a7 , i32 %b7 , i32 %c7 )
+  %r8  = call i32 @llvm.fshl.i32(i32 %a8 , i32 %b8 , i32 %c8 )
+  %r9  = call i32 @llvm.fshl.i32(i32 %a9 , i32 %b9 , i32 %c9 )
+  %r10 = call i32 @llvm.fshl.i32(i32 %a10, i32 %b10, i32 %c10)
+  %r11 = call i32 @llvm.fshl.i32(i32 %a11, i32 %b11, i32 %c11)
+  %r12 = call i32 @llvm.fshl.i32(i32 %a12, i32 %b12, i32 %c12)
+  %r13 = call i32 @llvm.fshl.i32(i32 %a13, i32 %b13, i32 %c13)
+  %r14 = call i32 @llvm.fshl.i32(i32 %a14, i32 %b14, i32 %c14)
+  %r15 = call i32 @llvm.fshl.i32(i32 %a15, i32 %b15, i32 %c15)
+  store i32 %r0 , ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 0 ), align 4
+  store i32 %r1 , ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 1 ), align 4
+  store i32 %r2 , ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 2 ), align 4
+  store i32 %r3 , ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 3 ), align 4
+  store i32 %r4 , ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 4 ), align 4
+  store i32 %r5 , ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 5 ), align 4
+  store i32 %r6 , ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 6 ), align 4
+  store i32 %r7 , ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 7 ), align 4
+  store i32 %r8 , ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 8 ), align 4
+  store i32 %r9 , ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 9 ), align 4
+  store i32 %r10, ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 10), align 4
+  store i32 %r11, ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 11), align 4
+  store i32 %r12, ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 12), align 4
+  store i32 %r13, ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 13), align 4
+  store i32 %r14, ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 14), align 4
+  store i32 %r15, ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 15), align 4
+  ret void
+}
+
+define void @fshl_v32i16() {
+; SSE-LABEL: @fshl_v32i16(
+; SSE-NEXT:    [[TMP1:%.*]] = load <8 x i16>, ptr @a16, align 2
+; SSE-NEXT:    [[TMP2:%.*]] = load <8 x i16>, ptr @b16, align 2
+; SSE-NEXT:    [[TMP3:%.*]] = load <8 x i16>, ptr @c16, align 2
+; SSE-NEXT:    [[TMP4:%.*]] = call <8 x i16> @llvm.fshl.v8i16(<8 x i16> [[TMP1]], <8 x i16> [[TMP2]], <8 x i16> [[TMP3]])
+; SSE-NEXT:    store <8 x i16> [[TMP4]], ptr @d16, align 2
+; SSE-NEXT:    [[TMP5:%.*]] = load <8 x i16>, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 8), align 2
+; SSE-NEXT:    [[TMP6:%.*]] = load <8 x i16>, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 8), align 2
+; SSE-NEXT:    [[TMP7:%.*]] = load <8 x i16>, ptr getelementptr inbounds ([32 x i16], ptr @c16, i32 0, i64 8), align 2
+; SSE-NEXT:    [[TMP8:%.*]] = call <8 x i16> @llvm.fshl.v8i16(<8 x i16> [[TMP5]], <8 x i16> [[TMP6]], <8 x i16> [[TMP7]])
+; SSE-NEXT:    store <8 x i16> [[TMP8]], ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 8), align 2
+; SSE-NEXT:    [[TMP9:%.*]] = load <8 x i16>, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 16), align 2
+; SSE-NEXT:    [[TMP10:%.*]] = load <8 x i16>, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 16), align 2
+; SSE-NEXT:    [[TMP11:%.*]] = load <8 x i16>, ptr getelementptr inbounds ([32 x i16], ptr @c16, i32 0, i64 16), align 2
+; SSE-NEXT:    [[TMP12:%.*]] = call <8 x i16> @llvm.fshl.v8i16(<8 x i16> [[TMP9]], <8 x i16> [[TMP10]], <8 x i16> [[TMP11]])
+; SSE-NEXT:    store <8 x i16> [[TMP12]], ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 16), align 2
+; SSE-NEXT:    [[TMP13:%.*]] = load <8 x i16>, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 24), align 2
+; SSE-NEXT:    [[TMP14:%.*]] = load <8 x i16>, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 24), align 2
+; SSE-NEXT:    [[TMP15:%.*]] = load <8 x i16>, ptr getelementptr inbounds ([32 x i16], ptr @c16, i32 0, i64 24), align 2
+; SSE-NEXT:    [[TMP16:%.*]] = call <8 x i16> @llvm.fshl.v8i16(<8 x i16> [[TMP13]], <8 x i16> [[TMP14]], <8 x i16> [[TMP15]])
+; SSE-NEXT:    store <8 x i16> [[TMP16]], ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 24), align 2
+; SSE-NEXT:    ret void
+;
+; AVX-LABEL: @fshl_v32i16(
+; AVX-NEXT:    [[TMP1:%.*]] = load <16 x i16>, ptr @a16, align 2
+; AVX-NEXT:    [[TMP2:%.*]] = load <16 x i16>, ptr @b16, align 2
+; AVX-NEXT:    [[TMP3:%.*]] = load <16 x i16>, ptr @c16, align 2
+; AVX-NEXT:    [[TMP4:%.*]] = call <16 x i16> @llvm.fshl.v16i16(<16 x i16> [[TMP1]], <16 x i16> [[TMP2]], <16 x i16> [[TMP3]])
+; AVX-NEXT:    store <16 x i16> [[TMP4]], ptr @d16, align 2
+; AVX-NEXT:    [[TMP5:%.*]] = load <16 x i16>, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 16), align 2
+; AVX-NEXT:    [[TMP6:%.*]] = load <16 x i16>, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 16), align 2
+; AVX-NEXT:    [[TMP7:%.*]] = load <16 x i16>, ptr getelementptr inbounds ([32 x i16], ptr @c16, i32 0, i64 16), align 2
+; AVX-NEXT:    [[TMP8:%.*]] = call <16 x i16> @llvm.fshl.v16i16(<16 x i16> [[TMP5]], <16 x i16> [[TMP6]], <16 x i16> [[TMP7]])
+; AVX-NEXT:    store <16 x i16> [[TMP8]], ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 16), align 2
+; AVX-NEXT:    ret void
+;
+; AVX512-LABEL: @fshl_v32i16(
+; AVX512-NEXT:    [[TMP1:%.*]] = load <32 x i16>, ptr @a16, align 2
+; AVX512-NEXT:    [[TMP2:%.*]] = load <32 x i16>, ptr @b16, align 2
+; AVX512-NEXT:    [[TMP3:%.*]] = load <32 x i16>, ptr @c16, align 2
+; AVX512-NEXT:    [[TMP4:%.*]] = call <32 x i16> @llvm.fshl.v32i16(<32 x i16> [[TMP1]], <32 x i16> [[TMP2]], <32 x i16> [[TMP3]])
+; AVX512-NEXT:    store <32 x i16> [[TMP4]], ptr @d16, align 2
+; AVX512-NEXT:    ret void
+;
+  %a0  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 0 ), align 2
+  %a1  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 1 ), align 2
+  %a2  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 2 ), align 2
+  %a3  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 3 ), align 2
+  %a4  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 4 ), align 2
+  %a5  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 5 ), align 2
+  %a6  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 6 ), align 2
+  %a7  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 7 ), align 2
+  %a8  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 8 ), align 2
+  %a9  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 9 ), align 2
+  %a10 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 10), align 2
+  %a11 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 11), align 2
+  %a12 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 12), align 2
+  %a13 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 13), align 2
+  %a14 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 14), align 2
+  %a15 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 15), align 2
+  %a16 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 16), align 2
+  %a17 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 17), align 2
+  %a18 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 18), align 2
+  %a19 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 19), align 2
+  %a20 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 20), align 2
+  %a21 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 21), align 2
+  %a22 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 22), align 2
+  %a23 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 23), align 2
+  %a24 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 24), align 2
+  %a25 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 25), align 2
+  %a26 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 26), align 2
+  %a27 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 27), align 2
+  %a28 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 28), align 2
+  %a29 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 29), align 2
+  %a30 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 30), align 2
+  %a31 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 31), align 2
+  %b0  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 0 ), align 2
+  %b1  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 1 ), align 2
+  %b2  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 2 ), align 2
+  %b3  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 3 ), align 2
+  %b4  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 4 ), align 2
+  %b5  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 5 ), align 2
+  %b6  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 6 ), align 2
+  %b7  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 7 ), align 2
+  %b8  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 8 ), align 2
+  %b9  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 9 ), align 2
+  %b10 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 10), align 2
+  %b11 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 11), align 2
+  %b12 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 12), align 2
+  %b13 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 13), align 2
+  %b14 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 14), align 2
+  %b15 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 15), align 2
+  %b16 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 16), align 2
+  %b17 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 17), align 2
+  %b18 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 18), align 2
+  %b19 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 19), align 2
+  %b20 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 20), align 2
+  %b21 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 21), align 2
+  %b22 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 22), align 2
+  %b23 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 23), align 2
+  %b24 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 24), align 2
+  %b25 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 25), align 2
+  %b26 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 26), align 2
+  %b27 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 27), align 2
+  %b28 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 28), align 2
+  %b29 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 29), align 2
+  %b30 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 30), align 2
+  %b31 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 31), align 2
+  %c0  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @c16, i32 0, i64 0 ), align 2
+  %c1  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @c16, i32 0, i64 1 ), align 2
+  %c2  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @c16, i32 0, i64 2 ), align 2
+  %c3  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @c16, i32 0, i64 3 ), align 2
+  %c4  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @c16, i32 0, i64 4 ), align 2
+  %c5  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @c16, i32 0, i64 5 ), align 2
+  %c6  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @c16, i32 0, i64 6 ), align 2
+  %c7  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @c16, i32 0, i64 7 ), align 2
+  %c8  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @c16, i32 0, i64 8 ), align 2
+  %c9  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @c16, i32 0, i64 9 ), align 2
+  %c10 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @c16, i32 0, i64 10), align 2
+  %c11 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @c16, i32 0, i64 11), align 2
+  %c12 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @c16, i32 0, i64 12), align 2
+  %c13 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @c16, i32 0, i64 13), align 2
+  %c14 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @c16, i32 0, i64 14), align 2
+  %c15 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @c16, i32 0, i64 15), align 2
+  %c16 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @c16, i32 0, i64 16), align 2
+  %c17 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @c16, i32 0, i64 17), align 2
+  %c18 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @c16, i32 0, i64 18), align 2
+  %c19 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @c16, i32 0, i64 19), align 2
+  %c20 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @c16, i32 0, i64 20), align 2
+  %c21 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @c16, i32 0, i64 21), align 2
+  %c22 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @c16, i32 0, i64 22), align 2
+  %c23 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @c16, i32 0, i64 23), align 2
+  %c24 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @c16, i32 0, i64 24), align 2
+  %c25 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @c16, i32 0, i64 25), align 2
+  %c26 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @c16, i32 0, i64 26), align 2
+  %c27 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @c16, i32 0, i64 27), align 2
+  %c28 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @c16, i32 0, i64 28), align 2
+  %c29 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @c16, i32 0, i64 29), align 2
+  %c30 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @c16, i32 0, i64 30), align 2
+  %c31 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @c16, i32 0, i64 31), align 2
+  %r0  = call i16 @llvm.fshl.i16(i16 %a0 , i16 %b0 , i16 %c0 )
+  %r1  = call i16 @llvm.fshl.i16(i16 %a1 , i16 %b1 , i16 %c1 )
+  %r2  = call i16 @llvm.fshl.i16(i16 %a2 , i16 %b2 , i16 %c2 )
+  %r3  = call i16 @llvm.fshl.i16(i16 %a3 , i16 %b3 , i16 %c3 )
+  %r4  = call i16 @llvm.fshl.i16(i16 %a4 , i16 %b4 , i16 %c4 )
+  %r5  = call i16 @llvm.fshl.i16(i16 %a5 , i16 %b5 , i16 %c5 )
+  %r6  = call i16 @llvm.fshl.i16(i16 %a6 , i16 %b6 , i16 %c6 )
+  %r7  = call i16 @llvm.fshl.i16(i16 %a7 , i16 %b7 , i16 %c7 )
+  %r8  = call i16 @llvm.fshl.i16(i16 %a8 , i16 %b8 , i16 %c8 )
+  %r9  = call i16 @llvm.fshl.i16(i16 %a9 , i16 %b9 , i16 %c9 )
+  %r10 = call i16 @llvm.fshl.i16(i16 %a10, i16 %b10, i16 %c10)
+  %r11 = call i16 @llvm.fshl.i16(i16 %a11, i16 %b11, i16 %c11)
+  %r12 = call i16 @llvm.fshl.i16(i16 %a12, i16 %b12, i16 %c12)
+  %r13 = call i16 @llvm.fshl.i16(i16 %a13, i16 %b13, i16 %c13)
+  %r14 = call i16 @llvm.fshl.i16(i16 %a14, i16 %b14, i16 %c14)
+  %r15 = call i16 @llvm.fshl.i16(i16 %a15, i16 %b15, i16 %c15)
+  %r16 = call i16 @llvm.fshl.i16(i16 %a16, i16 %b16, i16 %c16)
+  %r17 = call i16 @llvm.fshl.i16(i16 %a17, i16 %b17, i16 %c17)
+  %r18 = call i16 @llvm.fshl.i16(i16 %a18, i16 %b18, i16 %c18)
+  %r19 = call i16 @llvm.fshl.i16(i16 %a19, i16 %b19, i16 %c19)
+  %r20 = call i16 @llvm.fshl.i16(i16 %a20, i16 %b20, i16 %c20)
+  %r21 = call i16 @llvm.fshl.i16(i16 %a21, i16 %b21, i16 %c21)
+  %r22 = call i16 @llvm.fshl.i16(i16 %a22, i16 %b22, i16 %c22)
+  %r23 = call i16 @llvm.fshl.i16(i16 %a23, i16 %b23, i16 %c23)
+  %r24 = call i16 @llvm.fshl.i16(i16 %a24, i16 %b24, i16 %c24)
+  %r25 = call i16 @llvm.fshl.i16(i16 %a25, i16 %b25, i16 %c25)
+  %r26 = call i16 @llvm.fshl.i16(i16 %a26, i16 %b26, i16 %c26)
+  %r27 = call i16 @llvm.fshl.i16(i16 %a27, i16 %b27, i16 %c27)
+  %r28 = call i16 @llvm.fshl.i16(i16 %a28, i16 %b28, i16 %c28)
+  %r29 = call i16 @llvm.fshl.i16(i16 %a29, i16 %b29, i16 %c29)
+  %r30 = call i16 @llvm.fshl.i16(i16 %a30, i16 %b30, i16 %c30)
+  %r31 = call i16 @llvm.fshl.i16(i16 %a31, i16 %b31, i16 %c31)
+  store i16 %r0 , ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 0 ), align 2
+  store i16 %r1 , ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 1 ), align 2
+  store i16 %r2 , ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 2 ), align 2
+  store i16 %r3 , ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 3 ), align 2
+  store i16 %r4 , ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 4 ), align 2
+  store i16 %r5 , ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 5 ), align 2
+  store i16 %r6 , ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 6 ), align 2
+  store i16 %r7 , ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 7 ), align 2
+  store i16 %r8 , ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 8 ), align 2
+  store i16 %r9 , ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 9 ), align 2
+  store i16 %r10, ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 10), align 2
+  store i16 %r11, ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 11), align 2
+  store i16 %r12, ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 12), align 2
+  store i16 %r13, ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 13), align 2
+  store i16 %r14, ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 14), align 2
+  store i16 %r15, ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 15), align 2
+  store i16 %r16, ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 16), align 2
+  store i16 %r17, ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 17), align 2
+  store i16 %r18, ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 18), align 2
+  store i16 %r19, ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 19), align 2
+  store i16 %r20, ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 20), align 2
+  store i16 %r21, ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 21), align 2
+  store i16 %r22, ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 22), align 2
+  store i16 %r23, ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 23), align 2
+  store i16 %r24, ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 24), align 2
+  store i16 %r25, ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 25), align 2
+  store i16 %r26, ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 26), align 2
+  store i16 %r27, ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 27), align 2
+  store i16 %r28, ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 28), align 2
+  store i16 %r29, ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 29), align 2
+  store i16 %r30, ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 30), align 2
+  store i16 %r31, ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 31), align 2
+  ret void
+}
+
+define void @fshl_v64i8() {
+; SSE-LABEL: @fshl_v64i8(
+; SSE-NEXT:    [[TMP1:%.*]] = load <16 x i8>, ptr @a8, align 1
+; SSE-NEXT:    [[TMP2:%.*]] = load <16 x i8>, ptr @b8, align 1
+; SSE-NEXT:    [[TMP3:%.*]] = load <16 x i8>, ptr @c8, align 1
+; SSE-NEXT:    [[TMP4:%.*]] = call <16 x i8> @llvm.fshl.v16i8(<16 x i8> [[TMP1]], <16 x i8> [[TMP2]], <16 x i8> [[TMP3]])
+; SSE-NEXT:    [[TMP5:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 16), align 1
+; SSE-NEXT:    [[TMP6:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 16), align 1
+; SSE-NEXT:    [[TMP7:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 16), align 1
+; SSE-NEXT:    [[TMP8:%.*]] = call <16 x i8> @llvm.fshl.v16i8(<16 x i8> [[TMP5]], <16 x i8> [[TMP6]], <16 x i8> [[TMP7]])
+; SSE-NEXT:    [[TMP9:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 32), align 1
+; SSE-NEXT:    [[TMP10:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 32), align 1
+; SSE-NEXT:    [[TMP11:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 48), align 1
+; SSE-NEXT:    [[TMP12:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 48), align 1
+; SSE-NEXT:    [[TMP13:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
+; SSE-NEXT:    [[TMP14:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 48), align 1
+; SSE-NEXT:    [[TMP15:%.*]] = call <16 x i8> @llvm.fshl.v16i8(<16 x i8> [[TMP9]], <16 x i8> [[TMP10]], <16 x i8> [[TMP13]])
+; SSE-NEXT:    [[TMP16:%.*]] = call <16 x i8> @llvm.fshl.v16i8(<16 x i8> [[TMP11]], <16 x i8> [[TMP12]], <16 x i8> [[TMP14]])
+; SSE-NEXT:    store <16 x i8> [[TMP4]], ptr @d8, align 1
+; SSE-NEXT:    store <16 x i8> [[TMP8]], ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 16), align 1
+; SSE-NEXT:    store <16 x i8> [[TMP15]], ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 32), align 1
+; SSE-NEXT:    store <16 x i8> [[TMP16]], ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 48), align 1
+; SSE-NEXT:    ret void
+;
+; AVX-LABEL: @fshl_v64i8(
+; AVX-NEXT:    [[TMP1:%.*]] = load <32 x i8>, ptr @a8, align 1
+; AVX-NEXT:    [[TMP2:%.*]] = load <32 x i8>, ptr @b8, align 1
+; AVX-NEXT:    [[TMP3:%.*]] = load <32 x i8>, ptr @c8, align 1
+; AVX-NEXT:    [[TMP4:%.*]] = call <32 x i8> @llvm.fshl.v32i8(<32 x i8> [[TMP1]], <32 x i8> [[TMP2]], <32 x i8> [[TMP3]])
+; AVX-NEXT:    [[TMP5:%.*]] = load <32 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 32), align 1
+; AVX-NEXT:    [[TMP6:%.*]] = load <32 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 32), align 1
+; AVX-NEXT:    [[TMP7:%.*]] = load <32 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
+; AVX-NEXT:    [[TMP8:%.*]] = call <32 x i8> @llvm.fshl.v32i8(<32 x i8> [[TMP5]], <32 x i8> [[TMP6]], <32 x i8> [[TMP7]])
+; AVX-NEXT:    store <32 x i8> [[TMP4]], ptr @d8, align 1
+; AVX-NEXT:    store <32 x i8> [[TMP8]], ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 32), align 1
+; AVX-NEXT:    ret void
+;
+; AVX512-LABEL: @fshl_v64i8(
+; AVX512-NEXT:    [[TMP1:%.*]] = load <64 x i8>, ptr @a8, align 1
+; AVX512-NEXT:    [[TMP2:%.*]] = load <64 x i8>, ptr @b8, align 1
+; AVX512-NEXT:    [[TMP3:%.*]] = load <64 x i8>, ptr @c8, align 1
+; AVX512-NEXT:    [[TMP4:%.*]] = call <64 x i8> @llvm.fshl.v64i8(<64 x i8> [[TMP1]], <64 x i8> [[TMP2]], <64 x i8> [[TMP3]])
+; AVX512-NEXT:    store <64 x i8> [[TMP4]], ptr @d8, align 1
+; AVX512-NEXT:    ret void
+;
+  %a0  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 0 ), align 1
+  %a1  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 1 ), align 1
+  %a2  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 2 ), align 1
+  %a3  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 3 ), align 1
+  %a4  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 4 ), align 1
+  %a5  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 5 ), align 1
+  %a6  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 6 ), align 1
+  %a7  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 7 ), align 1
+  %a8  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 8 ), align 1
+  %a9  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 9 ), align 1
+  %a10 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 10), align 1
+  %a11 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 11), align 1
+  %a12 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 12), align 1
+  %a13 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 13), align 1
+  %a14 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 14), align 1
+  %a15 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 15), align 1
+  %a16 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 16), align 1
+  %a17 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 17), align 1
+  %a18 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 18), align 1
+  %a19 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 19), align 1
+  %a20 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 20), align 1
+  %a21 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 21), align 1
+  %a22 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 22), align 1
+  %a23 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 23), align 1
+  %a24 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 24), align 1
+  %a25 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 25), align 1
+  %a26 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 26), align 1
+  %a27 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 27), align 1
+  %a28 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 28), align 1
+  %a29 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 29), align 1
+  %a30 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 30), align 1
+  %a31 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 31), align 1
+  %a32 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 32), align 1
+  %a33 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 33), align 1
+  %a34 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 34), align 1
+  %a35 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 35), align 1
+  %a36 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 36), align 1
+  %a37 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 37), align 1
+  %a38 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 38), align 1
+  %a39 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 39), align 1
+  %a40 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 40), align 1
+  %a41 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 41), align 1
+  %a42 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 42), align 1
+  %a43 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 43), align 1
+  %a44 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 44), align 1
+  %a45 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 45), align 1
+  %a46 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 46), align 1
+  %a47 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 47), align 1
+  %a48 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 48), align 1
+  %a49 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 49), align 1
+  %a50 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 50), align 1
+  %a51 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 51), align 1
+  %a52 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 52), align 1
+  %a53 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 53), align 1
+  %a54 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 54), align 1
+  %a55 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 55), align 1
+  %a56 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 56), align 1
+  %a57 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 57), align 1
+  %a58 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 58), align 1
+  %a59 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 59), align 1
+  %a60 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 60), align 1
+  %a61 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 61), align 1
+  %a62 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 62), align 1
+  %a63 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 63), align 1
+  %b0  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 0 ), align 1
+  %b1  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 1 ), align 1
+  %b2  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 2 ), align 1
+  %b3  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 3 ), align 1
+  %b4  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 4 ), align 1
+  %b5  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 5 ), align 1
+  %b6  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 6 ), align 1
+  %b7  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 7 ), align 1
+  %b8  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 8 ), align 1
+  %b9  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 9 ), align 1
+  %b10 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 10), align 1
+  %b11 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 11), align 1
+  %b12 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 12), align 1
+  %b13 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 13), align 1
+  %b14 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 14), align 1
+  %b15 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 15), align 1
+  %b16 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 16), align 1
+  %b17 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 17), align 1
+  %b18 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 18), align 1
+  %b19 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 19), align 1
+  %b20 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 20), align 1
+  %b21 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 21), align 1
+  %b22 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 22), align 1
+  %b23 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 23), align 1
+  %b24 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 24), align 1
+  %b25 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 25), align 1
+  %b26 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 26), align 1
+  %b27 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 27), align 1
+  %b28 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 28), align 1
+  %b29 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 29), align 1
+  %b30 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 30), align 1
+  %b31 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 31), align 1
+  %b32 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 32), align 1
+  %b33 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 33), align 1
+  %b34 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 34), align 1
+  %b35 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 35), align 1
+  %b36 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 36), align 1
+  %b37 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 37), align 1
+  %b38 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 38), align 1
+  %b39 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 39), align 1
+  %b40 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 40), align 1
+  %b41 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 41), align 1
+  %b42 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 42), align 1
+  %b43 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 43), align 1
+  %b44 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 44), align 1
+  %b45 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 45), align 1
+  %b46 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 46), align 1
+  %b47 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 47), align 1
+  %b48 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 48), align 1
+  %b49 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 49), align 1
+  %b50 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 50), align 1
+  %b51 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 51), align 1
+  %b52 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 52), align 1
+  %b53 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 53), align 1
+  %b54 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 54), align 1
+  %b55 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 55), align 1
+  %b56 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 56), align 1
+  %b57 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 57), align 1
+  %b58 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 58), align 1
+  %b59 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 59), align 1
+  %b60 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 60), align 1
+  %b61 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 61), align 1
+  %b62 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 62), align 1
+  %b63 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 63), align 1
+  %c0  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 0 ), align 1
+  %c1  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 1 ), align 1
+  %c2  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 2 ), align 1
+  %c3  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 3 ), align 1
+  %c4  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 4 ), align 1
+  %c5  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 5 ), align 1
+  %c6  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 6 ), align 1
+  %c7  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 7 ), align 1
+  %c8  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 8 ), align 1
+  %c9  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 9 ), align 1
+  %c10 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 10), align 1
+  %c11 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 11), align 1
+  %c12 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 12), align 1
+  %c13 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 13), align 1
+  %c14 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 14), align 1
+  %c15 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 15), align 1
+  %c16 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 16), align 1
+  %c17 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 17), align 1
+  %c18 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 18), align 1
+  %c19 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 19), align 1
+  %c20 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 20), align 1
+  %c21 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 21), align 1
+  %c22 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 22), align 1
+  %c23 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 23), align 1
+  %c24 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 24), align 1
+  %c25 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 25), align 1
+  %c26 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 26), align 1
+  %c27 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 27), align 1
+  %c28 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 28), align 1
+  %c29 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 29), align 1
+  %c30 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 30), align 1
+  %c31 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 31), align 1
+  %c32 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
+  %c33 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 33), align 1
+  %c34 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 34), align 1
+  %c35 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 35), align 1
+  %c36 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 36), align 1
+  %c37 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 37), align 1
+  %c38 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 38), align 1
+  %c39 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 39), align 1
+  %c40 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 40), align 1
+  %c41 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 41), align 1
+  %c42 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 42), align 1
+  %c43 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 43), align 1
+  %c44 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 44), align 1
+  %c45 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 45), align 1
+  %c46 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 46), align 1
+  %c47 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 47), align 1
+  %c48 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 48), align 1
+  %c49 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 49), align 1
+  %c50 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 50), align 1
+  %c51 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 51), align 1
+  %c52 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 52), align 1
+  %c53 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 53), align 1
+  %c54 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 54), align 1
+  %c55 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 55), align 1
+  %c56 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 56), align 1
+  %c57 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 57), align 1
+  %c58 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 58), align 1
+  %c59 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 59), align 1
+  %c60 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 60), align 1
+  %c61 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 61), align 1
+  %c62 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 62), align 1
+  %c63 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 63), align 1
+  %r0  = call i8 @llvm.fshl.i8(i8 %a0 , i8 %b0 , i8 %c0 )
+  %r1  = call i8 @llvm.fshl.i8(i8 %a1 , i8 %b1 , i8 %c1 )
+  %r2  = call i8 @llvm.fshl.i8(i8 %a2 , i8 %b2 , i8 %c2 )
+  %r3  = call i8 @llvm.fshl.i8(i8 %a3 , i8 %b3 , i8 %c3 )
+  %r4  = call i8 @llvm.fshl.i8(i8 %a4 , i8 %b4 , i8 %c4 )
+  %r5  = call i8 @llvm.fshl.i8(i8 %a5 , i8 %b5 , i8 %c5 )
+  %r6  = call i8 @llvm.fshl.i8(i8 %a6 , i8 %b6 , i8 %c6 )
+  %r7  = call i8 @llvm.fshl.i8(i8 %a7 , i8 %b7 , i8 %c7 )
+  %r8  = call i8 @llvm.fshl.i8(i8 %a8 , i8 %b8 , i8 %c8 )
+  %r9  = call i8 @llvm.fshl.i8(i8 %a9 , i8 %b9 , i8 %c9 )
+  %r10 = call i8 @llvm.fshl.i8(i8 %a10, i8 %b10, i8 %c10)
+  %r11 = call i8 @llvm.fshl.i8(i8 %a11, i8 %b11, i8 %c11)
+  %r12 = call i8 @llvm.fshl.i8(i8 %a12, i8 %b12, i8 %c12)
+  %r13 = call i8 @llvm.fshl.i8(i8 %a13, i8 %b13, i8 %c13)
+  %r14 = call i8 @llvm.fshl.i8(i8 %a14, i8 %b14, i8 %c14)
+  %r15 = call i8 @llvm.fshl.i8(i8 %a15, i8 %b15, i8 %c15)
+  %r16 = call i8 @llvm.fshl.i8(i8 %a16, i8 %b16, i8 %c16)
+  %r17 = call i8 @llvm.fshl.i8(i8 %a17, i8 %b17, i8 %c17)
+  %r18 = call i8 @llvm.fshl.i8(i8 %a18, i8 %b18, i8 %c18)
+  %r19 = call i8 @llvm.fshl.i8(i8 %a19, i8 %b19, i8 %c19)
+  %r20 = call i8 @llvm.fshl.i8(i8 %a20, i8 %b20, i8 %c20)
+  %r21 = call i8 @llvm.fshl.i8(i8 %a21, i8 %b21, i8 %c21)
+  %r22 = call i8 @llvm.fshl.i8(i8 %a22, i8 %b22, i8 %c22)
+  %r23 = call i8 @llvm.fshl.i8(i8 %a23, i8 %b23, i8 %c23)
+  %r24 = call i8 @llvm.fshl.i8(i8 %a24, i8 %b24, i8 %c24)
+  %r25 = call i8 @llvm.fshl.i8(i8 %a25, i8 %b25, i8 %c25)
+  %r26 = call i8 @llvm.fshl.i8(i8 %a26, i8 %b26, i8 %c26)
+  %r27 = call i8 @llvm.fshl.i8(i8 %a27, i8 %b27, i8 %c27)
+  %r28 = call i8 @llvm.fshl.i8(i8 %a28, i8 %b28, i8 %c28)
+  %r29 = call i8 @llvm.fshl.i8(i8 %a29, i8 %b29, i8 %c29)
+  %r30 = call i8 @llvm.fshl.i8(i8 %a30, i8 %b30, i8 %c30)
+  %r31 = call i8 @llvm.fshl.i8(i8 %a31, i8 %b31, i8 %c31)
+  %r32 = call i8 @llvm.fshl.i8(i8 %a32, i8 %b32, i8 %c32)
+  %r33 = call i8 @llvm.fshl.i8(i8 %a33, i8 %b33, i8 %c33)
+  %r34 = call i8 @llvm.fshl.i8(i8 %a34, i8 %b34, i8 %c34)
+  %r35 = call i8 @llvm.fshl.i8(i8 %a35, i8 %b35, i8 %c35)
+  %r36 = call i8 @llvm.fshl.i8(i8 %a36, i8 %b36, i8 %c36)
+  %r37 = call i8 @llvm.fshl.i8(i8 %a37, i8 %b37, i8 %c37)
+  %r38 = call i8 @llvm.fshl.i8(i8 %a38, i8 %b38, i8 %c38)
+  %r39 = call i8 @llvm.fshl.i8(i8 %a39, i8 %b39, i8 %c39)
+  %r40 = call i8 @llvm.fshl.i8(i8 %a40, i8 %b40, i8 %c40)
+  %r41 = call i8 @llvm.fshl.i8(i8 %a41, i8 %b41, i8 %c41)
+  %r42 = call i8 @llvm.fshl.i8(i8 %a42, i8 %b42, i8 %c42)
+  %r43 = call i8 @llvm.fshl.i8(i8 %a43, i8 %b43, i8 %c43)
+  %r44 = call i8 @llvm.fshl.i8(i8 %a44, i8 %b44, i8 %c44)
+  %r45 = call i8 @llvm.fshl.i8(i8 %a45, i8 %b45, i8 %c45)
+  %r46 = call i8 @llvm.fshl.i8(i8 %a46, i8 %b46, i8 %c46)
+  %r47 = call i8 @llvm.fshl.i8(i8 %a47, i8 %b47, i8 %c47)
+  %r48 = call i8 @llvm.fshl.i8(i8 %a48, i8 %b48, i8 %c48)
+  %r49 = call i8 @llvm.fshl.i8(i8 %a49, i8 %b49, i8 %c49)
+  %r50 = call i8 @llvm.fshl.i8(i8 %a50, i8 %b50, i8 %c50)
+  %r51 = call i8 @llvm.fshl.i8(i8 %a51, i8 %b51, i8 %c51)
+  %r52 = call i8 @llvm.fshl.i8(i8 %a52, i8 %b52, i8 %c52)
+  %r53 = call i8 @llvm.fshl.i8(i8 %a53, i8 %b53, i8 %c53)
+  %r54 = call i8 @llvm.fshl.i8(i8 %a54, i8 %b54, i8 %c54)
+  %r55 = call i8 @llvm.fshl.i8(i8 %a55, i8 %b55, i8 %c55)
+  %r56 = call i8 @llvm.fshl.i8(i8 %a56, i8 %b56, i8 %c56)
+  %r57 = call i8 @llvm.fshl.i8(i8 %a57, i8 %b57, i8 %c57)
+  %r58 = call i8 @llvm.fshl.i8(i8 %a58, i8 %b58, i8 %c58)
+  %r59 = call i8 @llvm.fshl.i8(i8 %a59, i8 %b59, i8 %c59)
+  %r60 = call i8 @llvm.fshl.i8(i8 %a60, i8 %b60, i8 %c60)
+  %r61 = call i8 @llvm.fshl.i8(i8 %a61, i8 %b61, i8 %c61)
+  %r62 = call i8 @llvm.fshl.i8(i8 %a62, i8 %b62, i8 %c62)
+  %r63 = call i8 @llvm.fshl.i8(i8 %a63, i8 %b63, i8 %c63)
+  store i8 %r0 , ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 0 ), align 1
+  store i8 %r1 , ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 1 ), align 1
+  store i8 %r2 , ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 2 ), align 1
+  store i8 %r3 , ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 3 ), align 1
+  store i8 %r4 , ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 4 ), align 1
+  store i8 %r5 , ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 5 ), align 1
+  store i8 %r6 , ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 6 ), align 1
+  store i8 %r7 , ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 7 ), align 1
+  store i8 %r8 , ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 8 ), align 1
+  store i8 %r9 , ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 9 ), align 1
+  store i8 %r10, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 10), align 1
+  store i8 %r11, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 11), align 1
+  store i8 %r12, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 12), align 1
+  store i8 %r13, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 13), align 1
+  store i8 %r14, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 14), align 1
+  store i8 %r15, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 15), align 1
+  store i8 %r16, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 16), align 1
+  store i8 %r17, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 17), align 1
+  store i8 %r18, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 18), align 1
+  store i8 %r19, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 19), align 1
+  store i8 %r20, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 20), align 1
+  store i8 %r21, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 21), align 1
+  store i8 %r22, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 22), align 1
+  store i8 %r23, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 23), align 1
+  store i8 %r24, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 24), align 1
+  store i8 %r25, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 25), align 1
+  store i8 %r26, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 26), align 1
+  store i8 %r27, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 27), align 1
+  store i8 %r28, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 28), align 1
+  store i8 %r29, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 29), align 1
+  store i8 %r30, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 30), align 1
+  store i8 %r31, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 31), align 1
+  store i8 %r32, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 32), align 1
+  store i8 %r33, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 33), align 1
+  store i8 %r34, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 34), align 1
+  store i8 %r35, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 35), align 1
+  store i8 %r36, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 36), align 1
+  store i8 %r37, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 37), align 1
+  store i8 %r38, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 38), align 1
+  store i8 %r39, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 39), align 1
+  store i8 %r40, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 40), align 1
+  store i8 %r41, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 41), align 1
+  store i8 %r42, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 42), align 1
+  store i8 %r43, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 43), align 1
+  store i8 %r44, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 44), align 1
+  store i8 %r45, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 45), align 1
+  store i8 %r46, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 46), align 1
+  store i8 %r47, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 47), align 1
+  store i8 %r48, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 48), align 1
+  store i8 %r49, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 49), align 1
+  store i8 %r50, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 50), align 1
+  store i8 %r51, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 51), align 1
+  store i8 %r52, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 52), align 1
+  store i8 %r53, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 53), align 1
+  store i8 %r54, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 54), align 1
+  store i8 %r55, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 55), align 1
+  store i8 %r56, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 56), align 1
+  store i8 %r57, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 57), align 1
+  store i8 %r58, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 58), align 1
+  store i8 %r59, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 59), align 1
+  store i8 %r60, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 60), align 1
+  store i8 %r61, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 61), align 1
+  store i8 %r62, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 62), align 1
+  store i8 %r63, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 63), align 1
+  ret void
+}

diff  --git a/llvm/test/Transforms/SLPVectorizer/X86/arith-fshr-rot.ll b/llvm/test/Transforms/SLPVectorizer/X86/arith-fshr-rot.ll
new file mode 100644
index 00000000000000..7f8b3bc0f40e03
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/X86/arith-fshr-rot.ll
@@ -0,0 +1,853 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -mtriple=x86_64-unknown -passes=slp-vectorizer -S | FileCheck %s --check-prefixes=SSE
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=slm -passes=slp-vectorizer -S | FileCheck %s --check-prefixes=SSE
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=corei7-avx -passes=slp-vectorizer -S | FileCheck %s --check-prefixes=AVX,AVX1
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=core-avx2 -passes=slp-vectorizer -S | FileCheck %s --check-prefixes=AVX,AVX2
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=skx -mattr=+prefer-256-bit -passes=slp-vectorizer -S | FileCheck %s --check-prefixes=AVX,AVX256
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=skx -mattr=-prefer-256-bit -passes=slp-vectorizer -S | FileCheck %s --check-prefixes=AVX512
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=knl -passes=slp-vectorizer -S | FileCheck %s --check-prefixes=AVX512
+
+ at a64 = common global [8 x i64] zeroinitializer, align 64
+ at b64 = common global [8 x i64] zeroinitializer, align 64
+ at c64 = common global [8 x i64] zeroinitializer, align 64
+ at d64 = common global [8 x i64] zeroinitializer, align 64
+ at a32 = common global [16 x i32] zeroinitializer, align 64
+ at b32 = common global [16 x i32] zeroinitializer, align 64
+ at c32 = common global [16 x i32] zeroinitializer, align 64
+ at d32 = common global [16 x i32] zeroinitializer, align 64
+ at a16 = common global [32 x i16] zeroinitializer, align 64
+ at b16 = common global [32 x i16] zeroinitializer, align 64
+ at c16 = common global [32 x i16] zeroinitializer, align 64
+ at d16 = common global [32 x i16] zeroinitializer, align 64
+ at a8  = common global [64 x i8] zeroinitializer, align 64
+ at b8  = common global [64 x i8] zeroinitializer, align 64
+ at c8  = common global [64 x i8] zeroinitializer, align 64
+ at d8  = common global [64 x i8] zeroinitializer, align 64
+
+declare i64 @llvm.fshr.i64(i64, i64, i64)
+declare i32 @llvm.fshr.i32(i32, i32, i32)
+declare i16 @llvm.fshr.i16(i16, i16, i16)
+declare i8  @llvm.fshr.i8 (i8 , i8 , i8 )
+
+define void @fshr_v8i64() {
+; SSE-LABEL: @fshr_v8i64(
+; SSE-NEXT:    [[A0:%.*]] = load i64, ptr @a64, align 8
+; SSE-NEXT:    [[A1:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @a64, i32 0, i64 1), align 8
+; SSE-NEXT:    [[A2:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @a64, i32 0, i64 2), align 8
+; SSE-NEXT:    [[A3:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @a64, i32 0, i64 3), align 8
+; SSE-NEXT:    [[A4:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @a64, i32 0, i64 4), align 8
+; SSE-NEXT:    [[A5:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @a64, i32 0, i64 5), align 8
+; SSE-NEXT:    [[A6:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @a64, i32 0, i64 6), align 8
+; SSE-NEXT:    [[A7:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @a64, i32 0, i64 7), align 8
+; SSE-NEXT:    [[B0:%.*]] = load i64, ptr @b64, align 8
+; SSE-NEXT:    [[B1:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @b64, i32 0, i64 1), align 8
+; SSE-NEXT:    [[B2:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @b64, i32 0, i64 2), align 8
+; SSE-NEXT:    [[B3:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @b64, i32 0, i64 3), align 8
+; SSE-NEXT:    [[B4:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @b64, i32 0, i64 4), align 8
+; SSE-NEXT:    [[B5:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @b64, i32 0, i64 5), align 8
+; SSE-NEXT:    [[B6:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @b64, i32 0, i64 6), align 8
+; SSE-NEXT:    [[B7:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @b64, i32 0, i64 7), align 8
+; SSE-NEXT:    [[R0:%.*]] = call i64 @llvm.fshr.i64(i64 [[A0]], i64 [[A0]], i64 [[B0]])
+; SSE-NEXT:    [[R1:%.*]] = call i64 @llvm.fshr.i64(i64 [[A1]], i64 [[A1]], i64 [[B1]])
+; SSE-NEXT:    [[R2:%.*]] = call i64 @llvm.fshr.i64(i64 [[A2]], i64 [[A2]], i64 [[B2]])
+; SSE-NEXT:    [[R3:%.*]] = call i64 @llvm.fshr.i64(i64 [[A3]], i64 [[A3]], i64 [[B3]])
+; SSE-NEXT:    [[R4:%.*]] = call i64 @llvm.fshr.i64(i64 [[A4]], i64 [[A4]], i64 [[B4]])
+; SSE-NEXT:    [[R5:%.*]] = call i64 @llvm.fshr.i64(i64 [[A5]], i64 [[A5]], i64 [[B5]])
+; SSE-NEXT:    [[R6:%.*]] = call i64 @llvm.fshr.i64(i64 [[A6]], i64 [[A6]], i64 [[B6]])
+; SSE-NEXT:    [[R7:%.*]] = call i64 @llvm.fshr.i64(i64 [[A7]], i64 [[A7]], i64 [[B7]])
+; SSE-NEXT:    store i64 [[R0]], ptr @d64, align 8
+; SSE-NEXT:    store i64 [[R1]], ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 1), align 8
+; SSE-NEXT:    store i64 [[R2]], ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 2), align 8
+; SSE-NEXT:    store i64 [[R3]], ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 3), align 8
+; SSE-NEXT:    store i64 [[R4]], ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 4), align 8
+; SSE-NEXT:    store i64 [[R5]], ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 5), align 8
+; SSE-NEXT:    store i64 [[R6]], ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 6), align 8
+; SSE-NEXT:    store i64 [[R7]], ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 7), align 8
+; SSE-NEXT:    ret void
+;
+; AVX1-LABEL: @fshr_v8i64(
+; AVX1-NEXT:    [[A0:%.*]] = load i64, ptr @a64, align 8
+; AVX1-NEXT:    [[A1:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @a64, i32 0, i64 1), align 8
+; AVX1-NEXT:    [[A2:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @a64, i32 0, i64 2), align 8
+; AVX1-NEXT:    [[A3:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @a64, i32 0, i64 3), align 8
+; AVX1-NEXT:    [[A4:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @a64, i32 0, i64 4), align 8
+; AVX1-NEXT:    [[A5:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @a64, i32 0, i64 5), align 8
+; AVX1-NEXT:    [[A6:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @a64, i32 0, i64 6), align 8
+; AVX1-NEXT:    [[A7:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @a64, i32 0, i64 7), align 8
+; AVX1-NEXT:    [[B0:%.*]] = load i64, ptr @b64, align 8
+; AVX1-NEXT:    [[B1:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @b64, i32 0, i64 1), align 8
+; AVX1-NEXT:    [[B2:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @b64, i32 0, i64 2), align 8
+; AVX1-NEXT:    [[B3:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @b64, i32 0, i64 3), align 8
+; AVX1-NEXT:    [[B4:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @b64, i32 0, i64 4), align 8
+; AVX1-NEXT:    [[B5:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @b64, i32 0, i64 5), align 8
+; AVX1-NEXT:    [[B6:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @b64, i32 0, i64 6), align 8
+; AVX1-NEXT:    [[B7:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @b64, i32 0, i64 7), align 8
+; AVX1-NEXT:    [[R0:%.*]] = call i64 @llvm.fshr.i64(i64 [[A0]], i64 [[A0]], i64 [[B0]])
+; AVX1-NEXT:    [[R1:%.*]] = call i64 @llvm.fshr.i64(i64 [[A1]], i64 [[A1]], i64 [[B1]])
+; AVX1-NEXT:    [[R2:%.*]] = call i64 @llvm.fshr.i64(i64 [[A2]], i64 [[A2]], i64 [[B2]])
+; AVX1-NEXT:    [[R3:%.*]] = call i64 @llvm.fshr.i64(i64 [[A3]], i64 [[A3]], i64 [[B3]])
+; AVX1-NEXT:    [[R4:%.*]] = call i64 @llvm.fshr.i64(i64 [[A4]], i64 [[A4]], i64 [[B4]])
+; AVX1-NEXT:    [[R5:%.*]] = call i64 @llvm.fshr.i64(i64 [[A5]], i64 [[A5]], i64 [[B5]])
+; AVX1-NEXT:    [[R6:%.*]] = call i64 @llvm.fshr.i64(i64 [[A6]], i64 [[A6]], i64 [[B6]])
+; AVX1-NEXT:    [[R7:%.*]] = call i64 @llvm.fshr.i64(i64 [[A7]], i64 [[A7]], i64 [[B7]])
+; AVX1-NEXT:    store i64 [[R0]], ptr @d64, align 8
+; AVX1-NEXT:    store i64 [[R1]], ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 1), align 8
+; AVX1-NEXT:    store i64 [[R2]], ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 2), align 8
+; AVX1-NEXT:    store i64 [[R3]], ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 3), align 8
+; AVX1-NEXT:    store i64 [[R4]], ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 4), align 8
+; AVX1-NEXT:    store i64 [[R5]], ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 5), align 8
+; AVX1-NEXT:    store i64 [[R6]], ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 6), align 8
+; AVX1-NEXT:    store i64 [[R7]], ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 7), align 8
+; AVX1-NEXT:    ret void
+;
+; AVX2-LABEL: @fshr_v8i64(
+; AVX2-NEXT:    [[TMP1:%.*]] = load <4 x i64>, ptr @a64, align 8
+; AVX2-NEXT:    [[TMP2:%.*]] = load <4 x i64>, ptr @b64, align 8
+; AVX2-NEXT:    [[TMP3:%.*]] = call <4 x i64> @llvm.fshr.v4i64(<4 x i64> [[TMP1]], <4 x i64> [[TMP1]], <4 x i64> [[TMP2]])
+; AVX2-NEXT:    store <4 x i64> [[TMP3]], ptr @d64, align 8
+; AVX2-NEXT:    [[TMP4:%.*]] = load <4 x i64>, ptr getelementptr inbounds ([8 x i64], ptr @a64, i32 0, i64 4), align 8
+; AVX2-NEXT:    [[TMP5:%.*]] = load <4 x i64>, ptr getelementptr inbounds ([8 x i64], ptr @b64, i32 0, i64 4), align 8
+; AVX2-NEXT:    [[TMP6:%.*]] = call <4 x i64> @llvm.fshr.v4i64(<4 x i64> [[TMP4]], <4 x i64> [[TMP4]], <4 x i64> [[TMP5]])
+; AVX2-NEXT:    store <4 x i64> [[TMP6]], ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 4), align 8
+; AVX2-NEXT:    ret void
+;
+; AVX256-LABEL: @fshr_v8i64(
+; AVX256-NEXT:    [[TMP1:%.*]] = load <4 x i64>, ptr @a64, align 8
+; AVX256-NEXT:    [[TMP2:%.*]] = load <4 x i64>, ptr @b64, align 8
+; AVX256-NEXT:    [[TMP3:%.*]] = call <4 x i64> @llvm.fshr.v4i64(<4 x i64> [[TMP1]], <4 x i64> [[TMP1]], <4 x i64> [[TMP2]])
+; AVX256-NEXT:    store <4 x i64> [[TMP3]], ptr @d64, align 8
+; AVX256-NEXT:    [[TMP4:%.*]] = load <4 x i64>, ptr getelementptr inbounds ([8 x i64], ptr @a64, i32 0, i64 4), align 8
+; AVX256-NEXT:    [[TMP5:%.*]] = load <4 x i64>, ptr getelementptr inbounds ([8 x i64], ptr @b64, i32 0, i64 4), align 8
+; AVX256-NEXT:    [[TMP6:%.*]] = call <4 x i64> @llvm.fshr.v4i64(<4 x i64> [[TMP4]], <4 x i64> [[TMP4]], <4 x i64> [[TMP5]])
+; AVX256-NEXT:    store <4 x i64> [[TMP6]], ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 4), align 8
+; AVX256-NEXT:    ret void
+;
+; AVX512-LABEL: @fshr_v8i64(
+; AVX512-NEXT:    [[TMP1:%.*]] = load <8 x i64>, ptr @a64, align 8
+; AVX512-NEXT:    [[TMP2:%.*]] = load <8 x i64>, ptr @b64, align 8
+; AVX512-NEXT:    [[TMP3:%.*]] = call <8 x i64> @llvm.fshr.v8i64(<8 x i64> [[TMP1]], <8 x i64> [[TMP1]], <8 x i64> [[TMP2]])
+; AVX512-NEXT:    store <8 x i64> [[TMP3]], ptr @d64, align 8
+; AVX512-NEXT:    ret void
+;
+  %a0 = load i64, ptr @a64, align 8
+  %a1 = load i64, ptr getelementptr inbounds ([8 x i64], ptr @a64, i32 0, i64 1), align 8
+  %a2 = load i64, ptr getelementptr inbounds ([8 x i64], ptr @a64, i32 0, i64 2), align 8
+  %a3 = load i64, ptr getelementptr inbounds ([8 x i64], ptr @a64, i32 0, i64 3), align 8
+  %a4 = load i64, ptr getelementptr inbounds ([8 x i64], ptr @a64, i32 0, i64 4), align 8
+  %a5 = load i64, ptr getelementptr inbounds ([8 x i64], ptr @a64, i32 0, i64 5), align 8
+  %a6 = load i64, ptr getelementptr inbounds ([8 x i64], ptr @a64, i32 0, i64 6), align 8
+  %a7 = load i64, ptr getelementptr inbounds ([8 x i64], ptr @a64, i32 0, i64 7), align 8
+  %b0 = load i64, ptr @b64, align 8
+  %b1 = load i64, ptr getelementptr inbounds ([8 x i64], ptr @b64, i32 0, i64 1), align 8
+  %b2 = load i64, ptr getelementptr inbounds ([8 x i64], ptr @b64, i32 0, i64 2), align 8
+  %b3 = load i64, ptr getelementptr inbounds ([8 x i64], ptr @b64, i32 0, i64 3), align 8
+  %b4 = load i64, ptr getelementptr inbounds ([8 x i64], ptr @b64, i32 0, i64 4), align 8
+  %b5 = load i64, ptr getelementptr inbounds ([8 x i64], ptr @b64, i32 0, i64 5), align 8
+  %b6 = load i64, ptr getelementptr inbounds ([8 x i64], ptr @b64, i32 0, i64 6), align 8
+  %b7 = load i64, ptr getelementptr inbounds ([8 x i64], ptr @b64, i32 0, i64 7), align 8
+  %r0 = call i64 @llvm.fshr.i64(i64 %a0, i64 %a0, i64 %b0)
+  %r1 = call i64 @llvm.fshr.i64(i64 %a1, i64 %a1, i64 %b1)
+  %r2 = call i64 @llvm.fshr.i64(i64 %a2, i64 %a2, i64 %b2)
+  %r3 = call i64 @llvm.fshr.i64(i64 %a3, i64 %a3, i64 %b3)
+  %r4 = call i64 @llvm.fshr.i64(i64 %a4, i64 %a4, i64 %b4)
+  %r5 = call i64 @llvm.fshr.i64(i64 %a5, i64 %a5, i64 %b5)
+  %r6 = call i64 @llvm.fshr.i64(i64 %a6, i64 %a6, i64 %b6)
+  %r7 = call i64 @llvm.fshr.i64(i64 %a7, i64 %a7, i64 %b7)
+  store i64 %r0, ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 0), align 8
+  store i64 %r1, ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 1), align 8
+  store i64 %r2, ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 2), align 8
+  store i64 %r3, ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 3), align 8
+  store i64 %r4, ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 4), align 8
+  store i64 %r5, ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 5), align 8
+  store i64 %r6, ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 6), align 8
+  store i64 %r7, ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 7), align 8
+  ret void
+}
+
+define void @fshr_v16i32() {
+; SSE-LABEL: @fshr_v16i32(
+; SSE-NEXT:    [[A0:%.*]] = load i32, ptr @a32, align 4
+; SSE-NEXT:    [[A1:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 1), align 4
+; SSE-NEXT:    [[A2:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 2), align 4
+; SSE-NEXT:    [[A3:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 3), align 4
+; SSE-NEXT:    [[A4:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 4), align 4
+; SSE-NEXT:    [[A5:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 5), align 4
+; SSE-NEXT:    [[A6:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 6), align 4
+; SSE-NEXT:    [[A7:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 7), align 4
+; SSE-NEXT:    [[A8:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 8), align 4
+; SSE-NEXT:    [[A9:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 9), align 4
+; SSE-NEXT:    [[A10:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 10), align 4
+; SSE-NEXT:    [[A11:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 11), align 4
+; SSE-NEXT:    [[A12:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 12), align 4
+; SSE-NEXT:    [[A13:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 13), align 4
+; SSE-NEXT:    [[A14:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 14), align 4
+; SSE-NEXT:    [[A15:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 15), align 4
+; SSE-NEXT:    [[B0:%.*]] = load i32, ptr @b32, align 4
+; SSE-NEXT:    [[B1:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 1), align 4
+; SSE-NEXT:    [[B2:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 2), align 4
+; SSE-NEXT:    [[B3:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 3), align 4
+; SSE-NEXT:    [[B4:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 4), align 4
+; SSE-NEXT:    [[B5:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 5), align 4
+; SSE-NEXT:    [[B6:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 6), align 4
+; SSE-NEXT:    [[B7:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 7), align 4
+; SSE-NEXT:    [[B8:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 8), align 4
+; SSE-NEXT:    [[B9:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 9), align 4
+; SSE-NEXT:    [[B10:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 10), align 4
+; SSE-NEXT:    [[B11:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 11), align 4
+; SSE-NEXT:    [[B12:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 12), align 4
+; SSE-NEXT:    [[B13:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 13), align 4
+; SSE-NEXT:    [[B14:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 14), align 4
+; SSE-NEXT:    [[B15:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 15), align 4
+; SSE-NEXT:    [[R0:%.*]] = call i32 @llvm.fshr.i32(i32 [[A0]], i32 [[A0]], i32 [[B0]])
+; SSE-NEXT:    [[R1:%.*]] = call i32 @llvm.fshr.i32(i32 [[A1]], i32 [[A1]], i32 [[B1]])
+; SSE-NEXT:    [[R2:%.*]] = call i32 @llvm.fshr.i32(i32 [[A2]], i32 [[A2]], i32 [[B2]])
+; SSE-NEXT:    [[R3:%.*]] = call i32 @llvm.fshr.i32(i32 [[A3]], i32 [[A3]], i32 [[B3]])
+; SSE-NEXT:    [[R4:%.*]] = call i32 @llvm.fshr.i32(i32 [[A4]], i32 [[A4]], i32 [[B4]])
+; SSE-NEXT:    [[R5:%.*]] = call i32 @llvm.fshr.i32(i32 [[A5]], i32 [[A5]], i32 [[B5]])
+; SSE-NEXT:    [[R6:%.*]] = call i32 @llvm.fshr.i32(i32 [[A6]], i32 [[A6]], i32 [[B6]])
+; SSE-NEXT:    [[R7:%.*]] = call i32 @llvm.fshr.i32(i32 [[A7]], i32 [[A7]], i32 [[B7]])
+; SSE-NEXT:    [[R8:%.*]] = call i32 @llvm.fshr.i32(i32 [[A8]], i32 [[A8]], i32 [[B8]])
+; SSE-NEXT:    [[R9:%.*]] = call i32 @llvm.fshr.i32(i32 [[A9]], i32 [[A9]], i32 [[B9]])
+; SSE-NEXT:    [[R10:%.*]] = call i32 @llvm.fshr.i32(i32 [[A10]], i32 [[A10]], i32 [[B10]])
+; SSE-NEXT:    [[R11:%.*]] = call i32 @llvm.fshr.i32(i32 [[A11]], i32 [[A11]], i32 [[B11]])
+; SSE-NEXT:    [[R12:%.*]] = call i32 @llvm.fshr.i32(i32 [[A12]], i32 [[A12]], i32 [[B12]])
+; SSE-NEXT:    [[R13:%.*]] = call i32 @llvm.fshr.i32(i32 [[A13]], i32 [[A13]], i32 [[B13]])
+; SSE-NEXT:    [[R14:%.*]] = call i32 @llvm.fshr.i32(i32 [[A14]], i32 [[A14]], i32 [[B14]])
+; SSE-NEXT:    [[R15:%.*]] = call i32 @llvm.fshr.i32(i32 [[A15]], i32 [[A15]], i32 [[B15]])
+; SSE-NEXT:    store i32 [[R0]], ptr @d32, align 4
+; SSE-NEXT:    store i32 [[R1]], ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 1), align 4
+; SSE-NEXT:    store i32 [[R2]], ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 2), align 4
+; SSE-NEXT:    store i32 [[R3]], ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 3), align 4
+; SSE-NEXT:    store i32 [[R4]], ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 4), align 4
+; SSE-NEXT:    store i32 [[R5]], ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 5), align 4
+; SSE-NEXT:    store i32 [[R6]], ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 6), align 4
+; SSE-NEXT:    store i32 [[R7]], ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 7), align 4
+; SSE-NEXT:    store i32 [[R8]], ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 8), align 4
+; SSE-NEXT:    store i32 [[R9]], ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 9), align 4
+; SSE-NEXT:    store i32 [[R10]], ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 10), align 4
+; SSE-NEXT:    store i32 [[R11]], ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 11), align 4
+; SSE-NEXT:    store i32 [[R12]], ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 12), align 4
+; SSE-NEXT:    store i32 [[R13]], ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 13), align 4
+; SSE-NEXT:    store i32 [[R14]], ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 14), align 4
+; SSE-NEXT:    store i32 [[R15]], ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 15), align 4
+; SSE-NEXT:    ret void
+;
+; AVX-LABEL: @fshr_v16i32(
+; AVX-NEXT:    [[TMP1:%.*]] = load <8 x i32>, ptr @a32, align 4
+; AVX-NEXT:    [[TMP2:%.*]] = load <8 x i32>, ptr @b32, align 4
+; AVX-NEXT:    [[TMP3:%.*]] = call <8 x i32> @llvm.fshr.v8i32(<8 x i32> [[TMP1]], <8 x i32> [[TMP1]], <8 x i32> [[TMP2]])
+; AVX-NEXT:    store <8 x i32> [[TMP3]], ptr @d32, align 4
+; AVX-NEXT:    [[TMP4:%.*]] = load <8 x i32>, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 8), align 4
+; AVX-NEXT:    [[TMP5:%.*]] = load <8 x i32>, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 8), align 4
+; AVX-NEXT:    [[TMP6:%.*]] = call <8 x i32> @llvm.fshr.v8i32(<8 x i32> [[TMP4]], <8 x i32> [[TMP4]], <8 x i32> [[TMP5]])
+; AVX-NEXT:    store <8 x i32> [[TMP6]], ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 8), align 4
+; AVX-NEXT:    ret void
+;
+; AVX512-LABEL: @fshr_v16i32(
+; AVX512-NEXT:    [[TMP1:%.*]] = load <16 x i32>, ptr @a32, align 4
+; AVX512-NEXT:    [[TMP2:%.*]] = load <16 x i32>, ptr @b32, align 4
+; AVX512-NEXT:    [[TMP3:%.*]] = call <16 x i32> @llvm.fshr.v16i32(<16 x i32> [[TMP1]], <16 x i32> [[TMP1]], <16 x i32> [[TMP2]])
+; AVX512-NEXT:    store <16 x i32> [[TMP3]], ptr @d32, align 4
+; AVX512-NEXT:    ret void
+;
+  %a0  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 0 ), align 4
+  %a1  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 1 ), align 4
+  %a2  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 2 ), align 4
+  %a3  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 3 ), align 4
+  %a4  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 4 ), align 4
+  %a5  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 5 ), align 4
+  %a6  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 6 ), align 4
+  %a7  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 7 ), align 4
+  %a8  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 8 ), align 4
+  %a9  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 9 ), align 4
+  %a10 = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 10), align 4
+  %a11 = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 11), align 4
+  %a12 = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 12), align 4
+  %a13 = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 13), align 4
+  %a14 = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 14), align 4
+  %a15 = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 15), align 4
+  %b0  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 0 ), align 4
+  %b1  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 1 ), align 4
+  %b2  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 2 ), align 4
+  %b3  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 3 ), align 4
+  %b4  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 4 ), align 4
+  %b5  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 5 ), align 4
+  %b6  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 6 ), align 4
+  %b7  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 7 ), align 4
+  %b8  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 8 ), align 4
+  %b9  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 9 ), align 4
+  %b10 = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 10), align 4
+  %b11 = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 11), align 4
+  %b12 = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 12), align 4
+  %b13 = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 13), align 4
+  %b14 = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 14), align 4
+  %b15 = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 15), align 4
+  %r0  = call i32 @llvm.fshr.i32(i32 %a0 , i32 %a0 , i32 %b0 )
+  %r1  = call i32 @llvm.fshr.i32(i32 %a1 , i32 %a1 , i32 %b1 )
+  %r2  = call i32 @llvm.fshr.i32(i32 %a2 , i32 %a2 , i32 %b2 )
+  %r3  = call i32 @llvm.fshr.i32(i32 %a3 , i32 %a3 , i32 %b3 )
+  %r4  = call i32 @llvm.fshr.i32(i32 %a4 , i32 %a4 , i32 %b4 )
+  %r5  = call i32 @llvm.fshr.i32(i32 %a5 , i32 %a5 , i32 %b5 )
+  %r6  = call i32 @llvm.fshr.i32(i32 %a6 , i32 %a6 , i32 %b6 )
+  %r7  = call i32 @llvm.fshr.i32(i32 %a7 , i32 %a7 , i32 %b7 )
+  %r8  = call i32 @llvm.fshr.i32(i32 %a8 , i32 %a8 , i32 %b8 )
+  %r9  = call i32 @llvm.fshr.i32(i32 %a9 , i32 %a9 , i32 %b9 )
+  %r10 = call i32 @llvm.fshr.i32(i32 %a10, i32 %a10, i32 %b10)
+  %r11 = call i32 @llvm.fshr.i32(i32 %a11, i32 %a11, i32 %b11)
+  %r12 = call i32 @llvm.fshr.i32(i32 %a12, i32 %a12, i32 %b12)
+  %r13 = call i32 @llvm.fshr.i32(i32 %a13, i32 %a13, i32 %b13)
+  %r14 = call i32 @llvm.fshr.i32(i32 %a14, i32 %a14, i32 %b14)
+  %r15 = call i32 @llvm.fshr.i32(i32 %a15, i32 %a15, i32 %b15)
+  store i32 %r0 , ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 0 ), align 4
+  store i32 %r1 , ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 1 ), align 4
+  store i32 %r2 , ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 2 ), align 4
+  store i32 %r3 , ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 3 ), align 4
+  store i32 %r4 , ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 4 ), align 4
+  store i32 %r5 , ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 5 ), align 4
+  store i32 %r6 , ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 6 ), align 4
+  store i32 %r7 , ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 7 ), align 4
+  store i32 %r8 , ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 8 ), align 4
+  store i32 %r9 , ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 9 ), align 4
+  store i32 %r10, ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 10), align 4
+  store i32 %r11, ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 11), align 4
+  store i32 %r12, ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 12), align 4
+  store i32 %r13, ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 13), align 4
+  store i32 %r14, ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 14), align 4
+  store i32 %r15, ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 15), align 4
+  ret void
+}
+
+define void @fshr_v32i16() {
+; AVX-LABEL: @fshr_v32i16(
+; AVX-NEXT:    [[TMP1:%.*]] = load <16 x i16>, ptr @a16, align 2
+; AVX-NEXT:    [[TMP2:%.*]] = load <16 x i16>, ptr @b16, align 2
+; AVX-NEXT:    [[TMP3:%.*]] = call <16 x i16> @llvm.fshr.v16i16(<16 x i16> [[TMP1]], <16 x i16> [[TMP1]], <16 x i16> [[TMP2]])
+; AVX-NEXT:    store <16 x i16> [[TMP3]], ptr @d16, align 2
+; AVX-NEXT:    [[TMP4:%.*]] = load <16 x i16>, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 16), align 2
+; AVX-NEXT:    [[TMP5:%.*]] = load <16 x i16>, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 16), align 2
+; AVX-NEXT:    [[TMP6:%.*]] = call <16 x i16> @llvm.fshr.v16i16(<16 x i16> [[TMP4]], <16 x i16> [[TMP4]], <16 x i16> [[TMP5]])
+; AVX-NEXT:    store <16 x i16> [[TMP6]], ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 16), align 2
+; AVX-NEXT:    ret void
+;
+; AVX512-LABEL: @fshr_v32i16(
+; AVX512-NEXT:    [[TMP1:%.*]] = load <32 x i16>, ptr @a16, align 2
+; AVX512-NEXT:    [[TMP2:%.*]] = load <32 x i16>, ptr @b16, align 2
+; AVX512-NEXT:    [[TMP3:%.*]] = call <32 x i16> @llvm.fshr.v32i16(<32 x i16> [[TMP1]], <32 x i16> [[TMP1]], <32 x i16> [[TMP2]])
+; AVX512-NEXT:    store <32 x i16> [[TMP3]], ptr @d16, align 2
+; AVX512-NEXT:    ret void
+;
+  %a0  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 0 ), align 2
+  %a1  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 1 ), align 2
+  %a2  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 2 ), align 2
+  %a3  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 3 ), align 2
+  %a4  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 4 ), align 2
+  %a5  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 5 ), align 2
+  %a6  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 6 ), align 2
+  %a7  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 7 ), align 2
+  %a8  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 8 ), align 2
+  %a9  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 9 ), align 2
+  %a10 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 10), align 2
+  %a11 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 11), align 2
+  %a12 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 12), align 2
+  %a13 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 13), align 2
+  %a14 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 14), align 2
+  %a15 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 15), align 2
+  %a16 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 16), align 2
+  %a17 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 17), align 2
+  %a18 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 18), align 2
+  %a19 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 19), align 2
+  %a20 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 20), align 2
+  %a21 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 21), align 2
+  %a22 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 22), align 2
+  %a23 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 23), align 2
+  %a24 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 24), align 2
+  %a25 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 25), align 2
+  %a26 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 26), align 2
+  %a27 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 27), align 2
+  %a28 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 28), align 2
+  %a29 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 29), align 2
+  %a30 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 30), align 2
+  %a31 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 31), align 2
+  %b0  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 0 ), align 2
+  %b1  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 1 ), align 2
+  %b2  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 2 ), align 2
+  %b3  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 3 ), align 2
+  %b4  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 4 ), align 2
+  %b5  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 5 ), align 2
+  %b6  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 6 ), align 2
+  %b7  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 7 ), align 2
+  %b8  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 8 ), align 2
+  %b9  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 9 ), align 2
+  %b10 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 10), align 2
+  %b11 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 11), align 2
+  %b12 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 12), align 2
+  %b13 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 13), align 2
+  %b14 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 14), align 2
+  %b15 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 15), align 2
+  %b16 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 16), align 2
+  %b17 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 17), align 2
+  %b18 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 18), align 2
+  %b19 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 19), align 2
+  %b20 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 20), align 2
+  %b21 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 21), align 2
+  %b22 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 22), align 2
+  %b23 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 23), align 2
+  %b24 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 24), align 2
+  %b25 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 25), align 2
+  %b26 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 26), align 2
+  %b27 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 27), align 2
+  %b28 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 28), align 2
+  %b29 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 29), align 2
+  %b30 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 30), align 2
+  %b31 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 31), align 2
+  %r0  = call i16 @llvm.fshr.i16(i16 %a0 , i16 %a0 , i16 %b0 )
+  %r1  = call i16 @llvm.fshr.i16(i16 %a1 , i16 %a1 , i16 %b1 )
+  %r2  = call i16 @llvm.fshr.i16(i16 %a2 , i16 %a2 , i16 %b2 )
+  %r3  = call i16 @llvm.fshr.i16(i16 %a3 , i16 %a3 , i16 %b3 )
+  %r4  = call i16 @llvm.fshr.i16(i16 %a4 , i16 %a4 , i16 %b4 )
+  %r5  = call i16 @llvm.fshr.i16(i16 %a5 , i16 %a5 , i16 %b5 )
+  %r6  = call i16 @llvm.fshr.i16(i16 %a6 , i16 %a6 , i16 %b6 )
+  %r7  = call i16 @llvm.fshr.i16(i16 %a7 , i16 %a7 , i16 %b7 )
+  %r8  = call i16 @llvm.fshr.i16(i16 %a8 , i16 %a8 , i16 %b8 )
+  %r9  = call i16 @llvm.fshr.i16(i16 %a9 , i16 %a9 , i16 %b9 )
+  %r10 = call i16 @llvm.fshr.i16(i16 %a10, i16 %a10, i16 %b10)
+  %r11 = call i16 @llvm.fshr.i16(i16 %a11, i16 %a11, i16 %b11)
+  %r12 = call i16 @llvm.fshr.i16(i16 %a12, i16 %a12, i16 %b12)
+  %r13 = call i16 @llvm.fshr.i16(i16 %a13, i16 %a13, i16 %b13)
+  %r14 = call i16 @llvm.fshr.i16(i16 %a14, i16 %a14, i16 %b14)
+  %r15 = call i16 @llvm.fshr.i16(i16 %a15, i16 %a15, i16 %b15)
+  %r16 = call i16 @llvm.fshr.i16(i16 %a16, i16 %a16, i16 %b16)
+  %r17 = call i16 @llvm.fshr.i16(i16 %a17, i16 %a17, i16 %b17)
+  %r18 = call i16 @llvm.fshr.i16(i16 %a18, i16 %a18, i16 %b18)
+  %r19 = call i16 @llvm.fshr.i16(i16 %a19, i16 %a19, i16 %b19)
+  %r20 = call i16 @llvm.fshr.i16(i16 %a20, i16 %a20, i16 %b20)
+  %r21 = call i16 @llvm.fshr.i16(i16 %a21, i16 %a21, i16 %b21)
+  %r22 = call i16 @llvm.fshr.i16(i16 %a22, i16 %a22, i16 %b22)
+  %r23 = call i16 @llvm.fshr.i16(i16 %a23, i16 %a23, i16 %b23)
+  %r24 = call i16 @llvm.fshr.i16(i16 %a24, i16 %a24, i16 %b24)
+  %r25 = call i16 @llvm.fshr.i16(i16 %a25, i16 %a25, i16 %b25)
+  %r26 = call i16 @llvm.fshr.i16(i16 %a26, i16 %a26, i16 %b26)
+  %r27 = call i16 @llvm.fshr.i16(i16 %a27, i16 %a27, i16 %b27)
+  %r28 = call i16 @llvm.fshr.i16(i16 %a28, i16 %a28, i16 %b28)
+  %r29 = call i16 @llvm.fshr.i16(i16 %a29, i16 %a29, i16 %b29)
+  %r30 = call i16 @llvm.fshr.i16(i16 %a30, i16 %a30, i16 %b30)
+  %r31 = call i16 @llvm.fshr.i16(i16 %a31, i16 %a31, i16 %b31)
+  store i16 %r0 , ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 0 ), align 2
+  store i16 %r1 , ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 1 ), align 2
+  store i16 %r2 , ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 2 ), align 2
+  store i16 %r3 , ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 3 ), align 2
+  store i16 %r4 , ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 4 ), align 2
+  store i16 %r5 , ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 5 ), align 2
+  store i16 %r6 , ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 6 ), align 2
+  store i16 %r7 , ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 7 ), align 2
+  store i16 %r8 , ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 8 ), align 2
+  store i16 %r9 , ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 9 ), align 2
+  store i16 %r10, ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 10), align 2
+  store i16 %r11, ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 11), align 2
+  store i16 %r12, ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 12), align 2
+  store i16 %r13, ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 13), align 2
+  store i16 %r14, ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 14), align 2
+  store i16 %r15, ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 15), align 2
+  store i16 %r16, ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 16), align 2
+  store i16 %r17, ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 17), align 2
+  store i16 %r18, ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 18), align 2
+  store i16 %r19, ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 19), align 2
+  store i16 %r20, ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 20), align 2
+  store i16 %r21, ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 21), align 2
+  store i16 %r22, ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 22), align 2
+  store i16 %r23, ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 23), align 2
+  store i16 %r24, ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 24), align 2
+  store i16 %r25, ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 25), align 2
+  store i16 %r26, ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 26), align 2
+  store i16 %r27, ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 27), align 2
+  store i16 %r28, ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 28), align 2
+  store i16 %r29, ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 29), align 2
+  store i16 %r30, ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 30), align 2
+  store i16 %r31, ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 31), align 2
+  ret void
+}
+
+define void @fshr_v64i8() {
+; SSE-LABEL: @fshr_v64i8(
+; SSE-NEXT:    [[TMP1:%.*]] = load <16 x i8>, ptr @a8, align 1
+; SSE-NEXT:    [[TMP2:%.*]] = load <16 x i8>, ptr @b8, align 1
+; SSE-NEXT:    [[TMP3:%.*]] = call <16 x i8> @llvm.fshr.v16i8(<16 x i8> [[TMP1]], <16 x i8> [[TMP1]], <16 x i8> [[TMP2]])
+; SSE-NEXT:    store <16 x i8> [[TMP3]], ptr @d8, align 1
+; SSE-NEXT:    [[TMP4:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 16), align 1
+; SSE-NEXT:    [[TMP5:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 16), align 1
+; SSE-NEXT:    [[TMP6:%.*]] = call <16 x i8> @llvm.fshr.v16i8(<16 x i8> [[TMP4]], <16 x i8> [[TMP4]], <16 x i8> [[TMP5]])
+; SSE-NEXT:    store <16 x i8> [[TMP6]], ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 16), align 1
+; SSE-NEXT:    [[TMP7:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 32), align 1
+; SSE-NEXT:    [[TMP8:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 32), align 1
+; SSE-NEXT:    [[TMP9:%.*]] = call <16 x i8> @llvm.fshr.v16i8(<16 x i8> [[TMP7]], <16 x i8> [[TMP7]], <16 x i8> [[TMP8]])
+; SSE-NEXT:    [[TMP10:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 48), align 1
+; SSE-NEXT:    [[TMP11:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 48), align 1
+; SSE-NEXT:    [[TMP12:%.*]] = call <16 x i8> @llvm.fshr.v16i8(<16 x i8> [[TMP10]], <16 x i8> [[TMP10]], <16 x i8> [[TMP11]])
+; SSE-NEXT:    store <16 x i8> [[TMP9]], ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 32), align 1
+; SSE-NEXT:    store <16 x i8> [[TMP12]], ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 48), align 1
+; SSE-NEXT:    ret void
+;
+; AVX-LABEL: @fshr_v64i8(
+; AVX-NEXT:    [[TMP1:%.*]] = load <32 x i8>, ptr @a8, align 1
+; AVX-NEXT:    [[TMP2:%.*]] = load <32 x i8>, ptr @b8, align 1
+; AVX-NEXT:    [[TMP3:%.*]] = call <32 x i8> @llvm.fshr.v32i8(<32 x i8> [[TMP1]], <32 x i8> [[TMP1]], <32 x i8> [[TMP2]])
+; AVX-NEXT:    store <32 x i8> [[TMP3]], ptr @d8, align 1
+; AVX-NEXT:    [[TMP4:%.*]] = load <32 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 32), align 1
+; AVX-NEXT:    [[TMP5:%.*]] = load <32 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 32), align 1
+; AVX-NEXT:    [[TMP6:%.*]] = call <32 x i8> @llvm.fshr.v32i8(<32 x i8> [[TMP4]], <32 x i8> [[TMP4]], <32 x i8> [[TMP5]])
+; AVX-NEXT:    store <32 x i8> [[TMP6]], ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 32), align 1
+; AVX-NEXT:    ret void
+;
+; AVX512-LABEL: @fshr_v64i8(
+; AVX512-NEXT:    [[TMP1:%.*]] = load <64 x i8>, ptr @a8, align 1
+; AVX512-NEXT:    [[TMP2:%.*]] = load <64 x i8>, ptr @b8, align 1
+; AVX512-NEXT:    [[TMP3:%.*]] = call <64 x i8> @llvm.fshr.v64i8(<64 x i8> [[TMP1]], <64 x i8> [[TMP1]], <64 x i8> [[TMP2]])
+; AVX512-NEXT:    store <64 x i8> [[TMP3]], ptr @d8, align 1
+; AVX512-NEXT:    ret void
+;
+  %a0  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 0 ), align 1
+  %a1  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 1 ), align 1
+  %a2  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 2 ), align 1
+  %a3  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 3 ), align 1
+  %a4  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 4 ), align 1
+  %a5  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 5 ), align 1
+  %a6  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 6 ), align 1
+  %a7  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 7 ), align 1
+  %a8  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 8 ), align 1
+  %a9  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 9 ), align 1
+  %a10 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 10), align 1
+  %a11 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 11), align 1
+  %a12 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 12), align 1
+  %a13 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 13), align 1
+  %a14 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 14), align 1
+  %a15 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 15), align 1
+  %a16 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 16), align 1
+  %a17 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 17), align 1
+  %a18 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 18), align 1
+  %a19 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 19), align 1
+  %a20 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 20), align 1
+  %a21 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 21), align 1
+  %a22 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 22), align 1
+  %a23 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 23), align 1
+  %a24 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 24), align 1
+  %a25 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 25), align 1
+  %a26 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 26), align 1
+  %a27 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 27), align 1
+  %a28 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 28), align 1
+  %a29 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 29), align 1
+  %a30 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 30), align 1
+  %a31 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 31), align 1
+  %a32 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 32), align 1
+  %a33 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 33), align 1
+  %a34 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 34), align 1
+  %a35 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 35), align 1
+  %a36 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 36), align 1
+  %a37 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 37), align 1
+  %a38 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 38), align 1
+  %a39 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 39), align 1
+  %a40 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 40), align 1
+  %a41 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 41), align 1
+  %a42 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 42), align 1
+  %a43 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 43), align 1
+  %a44 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 44), align 1
+  %a45 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 45), align 1
+  %a46 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 46), align 1
+  %a47 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 47), align 1
+  %a48 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 48), align 1
+  %a49 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 49), align 1
+  %a50 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 50), align 1
+  %a51 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 51), align 1
+  %a52 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 52), align 1
+  %a53 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 53), align 1
+  %a54 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 54), align 1
+  %a55 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 55), align 1
+  %a56 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 56), align 1
+  %a57 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 57), align 1
+  %a58 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 58), align 1
+  %a59 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 59), align 1
+  %a60 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 60), align 1
+  %a61 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 61), align 1
+  %a62 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 62), align 1
+  %a63 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 63), align 1
+  %b0  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 0 ), align 1
+  %b1  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 1 ), align 1
+  %b2  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 2 ), align 1
+  %b3  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 3 ), align 1
+  %b4  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 4 ), align 1
+  %b5  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 5 ), align 1
+  %b6  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 6 ), align 1
+  %b7  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 7 ), align 1
+  %b8  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 8 ), align 1
+  %b9  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 9 ), align 1
+  %b10 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 10), align 1
+  %b11 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 11), align 1
+  %b12 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 12), align 1
+  %b13 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 13), align 1
+  %b14 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 14), align 1
+  %b15 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 15), align 1
+  %b16 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 16), align 1
+  %b17 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 17), align 1
+  %b18 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 18), align 1
+  %b19 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 19), align 1
+  %b20 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 20), align 1
+  %b21 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 21), align 1
+  %b22 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 22), align 1
+  %b23 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 23), align 1
+  %b24 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 24), align 1
+  %b25 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 25), align 1
+  %b26 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 26), align 1
+  %b27 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 27), align 1
+  %b28 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 28), align 1
+  %b29 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 29), align 1
+  %b30 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 30), align 1
+  %b31 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 31), align 1
+  %b32 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 32), align 1
+  %b33 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 33), align 1
+  %b34 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 34), align 1
+  %b35 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 35), align 1
+  %b36 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 36), align 1
+  %b37 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 37), align 1
+  %b38 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 38), align 1
+  %b39 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 39), align 1
+  %b40 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 40), align 1
+  %b41 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 41), align 1
+  %b42 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 42), align 1
+  %b43 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 43), align 1
+  %b44 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 44), align 1
+  %b45 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 45), align 1
+  %b46 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 46), align 1
+  %b47 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 47), align 1
+  %b48 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 48), align 1
+  %b49 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 49), align 1
+  %b50 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 50), align 1
+  %b51 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 51), align 1
+  %b52 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 52), align 1
+  %b53 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 53), align 1
+  %b54 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 54), align 1
+  %b55 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 55), align 1
+  %b56 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 56), align 1
+  %b57 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 57), align 1
+  %b58 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 58), align 1
+  %b59 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 59), align 1
+  %b60 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 60), align 1
+  %b61 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 61), align 1
+  %b62 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 62), align 1
+  %b63 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 63), align 1
+  %r0  = call i8 @llvm.fshr.i8(i8 %a0 , i8 %a0 , i8 %b0 )
+  %r1  = call i8 @llvm.fshr.i8(i8 %a1 , i8 %a1 , i8 %b1 )
+  %r2  = call i8 @llvm.fshr.i8(i8 %a2 , i8 %a2 , i8 %b2 )
+  %r3  = call i8 @llvm.fshr.i8(i8 %a3 , i8 %a3 , i8 %b3 )
+  %r4  = call i8 @llvm.fshr.i8(i8 %a4 , i8 %a4 , i8 %b4 )
+  %r5  = call i8 @llvm.fshr.i8(i8 %a5 , i8 %a5 , i8 %b5 )
+  %r6  = call i8 @llvm.fshr.i8(i8 %a6 , i8 %a6 , i8 %b6 )
+  %r7  = call i8 @llvm.fshr.i8(i8 %a7 , i8 %a7 , i8 %b7 )
+  %r8  = call i8 @llvm.fshr.i8(i8 %a8 , i8 %a8 , i8 %b8 )
+  %r9  = call i8 @llvm.fshr.i8(i8 %a9 , i8 %a9 , i8 %b9 )
+  %r10 = call i8 @llvm.fshr.i8(i8 %a10, i8 %a10, i8 %b10)
+  %r11 = call i8 @llvm.fshr.i8(i8 %a11, i8 %a11, i8 %b11)
+  %r12 = call i8 @llvm.fshr.i8(i8 %a12, i8 %a12, i8 %b12)
+  %r13 = call i8 @llvm.fshr.i8(i8 %a13, i8 %a13, i8 %b13)
+  %r14 = call i8 @llvm.fshr.i8(i8 %a14, i8 %a14, i8 %b14)
+  %r15 = call i8 @llvm.fshr.i8(i8 %a15, i8 %a15, i8 %b15)
+  %r16 = call i8 @llvm.fshr.i8(i8 %a16, i8 %a16, i8 %b16)
+  %r17 = call i8 @llvm.fshr.i8(i8 %a17, i8 %a17, i8 %b17)
+  %r18 = call i8 @llvm.fshr.i8(i8 %a18, i8 %a18, i8 %b18)
+  %r19 = call i8 @llvm.fshr.i8(i8 %a19, i8 %a19, i8 %b19)
+  %r20 = call i8 @llvm.fshr.i8(i8 %a20, i8 %a20, i8 %b20)
+  %r21 = call i8 @llvm.fshr.i8(i8 %a21, i8 %a21, i8 %b21)
+  %r22 = call i8 @llvm.fshr.i8(i8 %a22, i8 %a22, i8 %b22)
+  %r23 = call i8 @llvm.fshr.i8(i8 %a23, i8 %a23, i8 %b23)
+  %r24 = call i8 @llvm.fshr.i8(i8 %a24, i8 %a24, i8 %b24)
+  %r25 = call i8 @llvm.fshr.i8(i8 %a25, i8 %a25, i8 %b25)
+  %r26 = call i8 @llvm.fshr.i8(i8 %a26, i8 %a26, i8 %b26)
+  %r27 = call i8 @llvm.fshr.i8(i8 %a27, i8 %a27, i8 %b27)
+  %r28 = call i8 @llvm.fshr.i8(i8 %a28, i8 %a28, i8 %b28)
+  %r29 = call i8 @llvm.fshr.i8(i8 %a29, i8 %a29, i8 %b29)
+  %r30 = call i8 @llvm.fshr.i8(i8 %a30, i8 %a30, i8 %b30)
+  %r31 = call i8 @llvm.fshr.i8(i8 %a31, i8 %a31, i8 %b31)
+  %r32 = call i8 @llvm.fshr.i8(i8 %a32, i8 %a32, i8 %b32)
+  %r33 = call i8 @llvm.fshr.i8(i8 %a33, i8 %a33, i8 %b33)
+  %r34 = call i8 @llvm.fshr.i8(i8 %a34, i8 %a34, i8 %b34)
+  %r35 = call i8 @llvm.fshr.i8(i8 %a35, i8 %a35, i8 %b35)
+  %r36 = call i8 @llvm.fshr.i8(i8 %a36, i8 %a36, i8 %b36)
+  %r37 = call i8 @llvm.fshr.i8(i8 %a37, i8 %a37, i8 %b37)
+  %r38 = call i8 @llvm.fshr.i8(i8 %a38, i8 %a38, i8 %b38)
+  %r39 = call i8 @llvm.fshr.i8(i8 %a39, i8 %a39, i8 %b39)
+  %r40 = call i8 @llvm.fshr.i8(i8 %a40, i8 %a40, i8 %b40)
+  %r41 = call i8 @llvm.fshr.i8(i8 %a41, i8 %a41, i8 %b41)
+  %r42 = call i8 @llvm.fshr.i8(i8 %a42, i8 %a42, i8 %b42)
+  %r43 = call i8 @llvm.fshr.i8(i8 %a43, i8 %a43, i8 %b43)
+  %r44 = call i8 @llvm.fshr.i8(i8 %a44, i8 %a44, i8 %b44)
+  %r45 = call i8 @llvm.fshr.i8(i8 %a45, i8 %a45, i8 %b45)
+  %r46 = call i8 @llvm.fshr.i8(i8 %a46, i8 %a46, i8 %b46)
+  %r47 = call i8 @llvm.fshr.i8(i8 %a47, i8 %a47, i8 %b47)
+  %r48 = call i8 @llvm.fshr.i8(i8 %a48, i8 %a48, i8 %b48)
+  %r49 = call i8 @llvm.fshr.i8(i8 %a49, i8 %a49, i8 %b49)
+  %r50 = call i8 @llvm.fshr.i8(i8 %a50, i8 %a50, i8 %b50)
+  %r51 = call i8 @llvm.fshr.i8(i8 %a51, i8 %a51, i8 %b51)
+  %r52 = call i8 @llvm.fshr.i8(i8 %a52, i8 %a52, i8 %b52)
+  %r53 = call i8 @llvm.fshr.i8(i8 %a53, i8 %a53, i8 %b53)
+  %r54 = call i8 @llvm.fshr.i8(i8 %a54, i8 %a54, i8 %b54)
+  %r55 = call i8 @llvm.fshr.i8(i8 %a55, i8 %a55, i8 %b55)
+  %r56 = call i8 @llvm.fshr.i8(i8 %a56, i8 %a56, i8 %b56)
+  %r57 = call i8 @llvm.fshr.i8(i8 %a57, i8 %a57, i8 %b57)
+  %r58 = call i8 @llvm.fshr.i8(i8 %a58, i8 %a58, i8 %b58)
+  %r59 = call i8 @llvm.fshr.i8(i8 %a59, i8 %a59, i8 %b59)
+  %r60 = call i8 @llvm.fshr.i8(i8 %a60, i8 %a60, i8 %b60)
+  %r61 = call i8 @llvm.fshr.i8(i8 %a61, i8 %a61, i8 %b61)
+  %r62 = call i8 @llvm.fshr.i8(i8 %a62, i8 %a62, i8 %b62)
+  %r63 = call i8 @llvm.fshr.i8(i8 %a63, i8 %a63, i8 %b63)
+  store i8 %r0 , ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 0 ), align 1
+  store i8 %r1 , ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 1 ), align 1
+  store i8 %r2 , ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 2 ), align 1
+  store i8 %r3 , ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 3 ), align 1
+  store i8 %r4 , ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 4 ), align 1
+  store i8 %r5 , ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 5 ), align 1
+  store i8 %r6 , ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 6 ), align 1
+  store i8 %r7 , ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 7 ), align 1
+  store i8 %r8 , ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 8 ), align 1
+  store i8 %r9 , ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 9 ), align 1
+  store i8 %r10, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 10), align 1
+  store i8 %r11, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 11), align 1
+  store i8 %r12, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 12), align 1
+  store i8 %r13, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 13), align 1
+  store i8 %r14, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 14), align 1
+  store i8 %r15, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 15), align 1
+  store i8 %r16, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 16), align 1
+  store i8 %r17, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 17), align 1
+  store i8 %r18, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 18), align 1
+  store i8 %r19, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 19), align 1
+  store i8 %r20, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 20), align 1
+  store i8 %r21, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 21), align 1
+  store i8 %r22, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 22), align 1
+  store i8 %r23, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 23), align 1
+  store i8 %r24, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 24), align 1
+  store i8 %r25, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 25), align 1
+  store i8 %r26, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 26), align 1
+  store i8 %r27, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 27), align 1
+  store i8 %r28, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 28), align 1
+  store i8 %r29, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 29), align 1
+  store i8 %r30, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 30), align 1
+  store i8 %r31, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 31), align 1
+  store i8 %r32, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 32), align 1
+  store i8 %r33, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 33), align 1
+  store i8 %r34, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 34), align 1
+  store i8 %r35, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 35), align 1
+  store i8 %r36, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 36), align 1
+  store i8 %r37, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 37), align 1
+  store i8 %r38, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 38), align 1
+  store i8 %r39, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 39), align 1
+  store i8 %r40, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 40), align 1
+  store i8 %r41, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 41), align 1
+  store i8 %r42, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 42), align 1
+  store i8 %r43, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 43), align 1
+  store i8 %r44, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 44), align 1
+  store i8 %r45, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 45), align 1
+  store i8 %r46, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 46), align 1
+  store i8 %r47, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 47), align 1
+  store i8 %r48, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 48), align 1
+  store i8 %r49, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 49), align 1
+  store i8 %r50, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 50), align 1
+  store i8 %r51, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 51), align 1
+  store i8 %r52, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 52), align 1
+  store i8 %r53, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 53), align 1
+  store i8 %r54, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 54), align 1
+  store i8 %r55, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 55), align 1
+  store i8 %r56, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 56), align 1
+  store i8 %r57, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 57), align 1
+  store i8 %r58, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 58), align 1
+  store i8 %r59, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 59), align 1
+  store i8 %r60, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 60), align 1
+  store i8 %r61, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 61), align 1
+  store i8 %r62, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 62), align 1
+  store i8 %r63, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 63), align 1
+  ret void
+}
+
+define void @fshr_v2i32() {
+; SSE-LABEL: @fshr_v2i32(
+; SSE-NEXT:    [[A0:%.*]] = load i32, ptr @a32, align 4
+; SSE-NEXT:    [[A1:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 1), align 4
+; SSE-NEXT:    [[B0:%.*]] = load i32, ptr @b32, align 4
+; SSE-NEXT:    [[B1:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 1), align 4
+; SSE-NEXT:    [[R0:%.*]] = call i32 @llvm.fshr.i32(i32 [[A0]], i32 [[A0]], i32 [[B0]])
+; SSE-NEXT:    [[R1:%.*]] = call i32 @llvm.fshr.i32(i32 [[A1]], i32 [[A1]], i32 [[B1]])
+; SSE-NEXT:    store i32 [[R0]], ptr @d32, align 4
+; SSE-NEXT:    store i32 [[R1]], ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 1), align 4
+; SSE-NEXT:    ret void
+;
+; AVX1-LABEL: @fshr_v2i32(
+; AVX1-NEXT:    [[A0:%.*]] = load i32, ptr @a32, align 4
+; AVX1-NEXT:    [[A1:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 1), align 4
+; AVX1-NEXT:    [[B0:%.*]] = load i32, ptr @b32, align 4
+; AVX1-NEXT:    [[B1:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 1), align 4
+; AVX1-NEXT:    [[R0:%.*]] = call i32 @llvm.fshr.i32(i32 [[A0]], i32 [[A0]], i32 [[B0]])
+; AVX1-NEXT:    [[R1:%.*]] = call i32 @llvm.fshr.i32(i32 [[A1]], i32 [[A1]], i32 [[B1]])
+; AVX1-NEXT:    store i32 [[R0]], ptr @d32, align 4
+; AVX1-NEXT:    store i32 [[R1]], ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 1), align 4
+; AVX1-NEXT:    ret void
+;
+; AVX2-LABEL: @fshr_v2i32(
+; AVX2-NEXT:    [[A0:%.*]] = load i32, ptr @a32, align 4
+; AVX2-NEXT:    [[A1:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 1), align 4
+; AVX2-NEXT:    [[B0:%.*]] = load i32, ptr @b32, align 4
+; AVX2-NEXT:    [[B1:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 1), align 4
+; AVX2-NEXT:    [[R0:%.*]] = call i32 @llvm.fshr.i32(i32 [[A0]], i32 [[A0]], i32 [[B0]])
+; AVX2-NEXT:    [[R1:%.*]] = call i32 @llvm.fshr.i32(i32 [[A1]], i32 [[A1]], i32 [[B1]])
+; AVX2-NEXT:    store i32 [[R0]], ptr @d32, align 4
+; AVX2-NEXT:    store i32 [[R1]], ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 1), align 4
+; AVX2-NEXT:    ret void
+;
+; AVX256-LABEL: @fshr_v2i32(
+; AVX256-NEXT:    [[TMP1:%.*]] = load <2 x i32>, ptr @a32, align 4
+; AVX256-NEXT:    [[TMP2:%.*]] = load <2 x i32>, ptr @b32, align 4
+; AVX256-NEXT:    [[TMP3:%.*]] = call <2 x i32> @llvm.fshr.v2i32(<2 x i32> [[TMP1]], <2 x i32> [[TMP1]], <2 x i32> [[TMP2]])
+; AVX256-NEXT:    store <2 x i32> [[TMP3]], ptr @d32, align 4
+; AVX256-NEXT:    ret void
+;
+; AVX512-LABEL: @fshr_v2i32(
+; AVX512-NEXT:    [[TMP1:%.*]] = load <2 x i32>, ptr @a32, align 4
+; AVX512-NEXT:    [[TMP2:%.*]] = load <2 x i32>, ptr @b32, align 4
+; AVX512-NEXT:    [[TMP3:%.*]] = call <2 x i32> @llvm.fshr.v2i32(<2 x i32> [[TMP1]], <2 x i32> [[TMP1]], <2 x i32> [[TMP2]])
+; AVX512-NEXT:    store <2 x i32> [[TMP3]], ptr @d32, align 4
+; AVX512-NEXT:    ret void
+;
+  %a0  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 0 ), align 4
+  %a1  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 1 ), align 4
+  %b0  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 0 ), align 4
+  %b1  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 1 ), align 4
+  %r0  = call i32 @llvm.fshr.i32(i32 %a0 , i32 %a0 , i32 %b0 )
+  %r1  = call i32 @llvm.fshr.i32(i32 %a1 , i32 %a1 , i32 %b1 )
+  store i32 %r0 , ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 0 ), align 4
+  store i32 %r1 , ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 1 ), align 4
+  ret void
+}
+
+; PR63980
+define void @fshr_v2i32_uniformconst() {
+; SSE-LABEL: @fshr_v2i32_uniformconst(
+; SSE-NEXT:    [[TMP1:%.*]] = load <2 x i32>, ptr @a32, align 4
+; SSE-NEXT:    [[TMP2:%.*]] = call <2 x i32> @llvm.fshr.v2i32(<2 x i32> [[TMP1]], <2 x i32> [[TMP1]], <2 x i32> <i32 1, i32 1>)
+; SSE-NEXT:    store <2 x i32> [[TMP2]], ptr @d32, align 4
+; SSE-NEXT:    ret void
+;
+; AVX-LABEL: @fshr_v2i32_uniformconst(
+; AVX-NEXT:    [[TMP1:%.*]] = load <2 x i32>, ptr @a32, align 4
+; AVX-NEXT:    [[TMP2:%.*]] = call <2 x i32> @llvm.fshr.v2i32(<2 x i32> [[TMP1]], <2 x i32> [[TMP1]], <2 x i32> <i32 1, i32 1>)
+; AVX-NEXT:    store <2 x i32> [[TMP2]], ptr @d32, align 4
+; AVX-NEXT:    ret void
+;
+; AVX512-LABEL: @fshr_v2i32_uniformconst(
+; AVX512-NEXT:    [[TMP1:%.*]] = load <2 x i32>, ptr @a32, align 4
+; AVX512-NEXT:    [[TMP2:%.*]] = call <2 x i32> @llvm.fshr.v2i32(<2 x i32> [[TMP1]], <2 x i32> [[TMP1]], <2 x i32> <i32 1, i32 1>)
+; AVX512-NEXT:    store <2 x i32> [[TMP2]], ptr @d32, align 4
+; AVX512-NEXT:    ret void
+;
+  %a0  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 0 ), align 4
+  %a1  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 1 ), align 4
+  %r0  = call i32 @llvm.fshr.i32(i32 %a0 , i32 %a0 , i32 1 )
+  %r1  = call i32 @llvm.fshr.i32(i32 %a1 , i32 %a1 , i32 1 )
+  store i32 %r0 , ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 0 ), align 4
+  store i32 %r1 , ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 1 ), align 4
+  ret void
+}

diff  --git a/llvm/test/Transforms/SLPVectorizer/X86/arith-fshr.ll b/llvm/test/Transforms/SLPVectorizer/X86/arith-fshr.ll
new file mode 100644
index 00000000000000..b456742337abd8
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/X86/arith-fshr.ll
@@ -0,0 +1,938 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -mtriple=x86_64-unknown -passes=slp-vectorizer -S | FileCheck %s --check-prefixes=SSE
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=slm -passes=slp-vectorizer -S | FileCheck %s --check-prefixes=SSE
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=corei7-avx -passes=slp-vectorizer -S | FileCheck %s --check-prefixes=AVX,AVX1
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=core-avx2 -passes=slp-vectorizer -S | FileCheck %s --check-prefixes=AVX,AVX2
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=skx -mattr=+prefer-256-bit -passes=slp-vectorizer -S | FileCheck %s --check-prefixes=AVX,AVX256
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=skx -mattr=-prefer-256-bit -passes=slp-vectorizer -S | FileCheck %s --check-prefixes=AVX512
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=knl -passes=slp-vectorizer -S | FileCheck %s --check-prefixes=AVX512
+
+ at a64 = common global [8 x i64] zeroinitializer, align 64
+ at b64 = common global [8 x i64] zeroinitializer, align 64
+ at c64 = common global [8 x i64] zeroinitializer, align 64
+ at d64 = common global [8 x i64] zeroinitializer, align 64
+ at a32 = common global [16 x i32] zeroinitializer, align 64
+ at b32 = common global [16 x i32] zeroinitializer, align 64
+ at c32 = common global [16 x i32] zeroinitializer, align 64
+ at d32 = common global [16 x i32] zeroinitializer, align 64
+ at a16 = common global [32 x i16] zeroinitializer, align 64
+ at b16 = common global [32 x i16] zeroinitializer, align 64
+ at c16 = common global [32 x i16] zeroinitializer, align 64
+ at d16 = common global [32 x i16] zeroinitializer, align 64
+ at a8  = common global [64 x i8] zeroinitializer, align 64
+ at b8  = common global [64 x i8] zeroinitializer, align 64
+ at c8  = common global [64 x i8] zeroinitializer, align 64
+ at d8  = common global [64 x i8] zeroinitializer, align 64
+
+declare i64 @llvm.fshr.i64(i64, i64, i64)
+declare i32 @llvm.fshr.i32(i32, i32, i32)
+declare i16 @llvm.fshr.i16(i16, i16, i16)
+declare i8  @llvm.fshr.i8 (i8 , i8 , i8 )
+
+define void @fshr_v8i64() {
+; SSE-LABEL: @fshr_v8i64(
+; SSE-NEXT:    [[A0:%.*]] = load i64, ptr @a64, align 8
+; SSE-NEXT:    [[A1:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @a64, i32 0, i64 1), align 8
+; SSE-NEXT:    [[A2:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @a64, i32 0, i64 2), align 8
+; SSE-NEXT:    [[A3:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @a64, i32 0, i64 3), align 8
+; SSE-NEXT:    [[A4:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @a64, i32 0, i64 4), align 8
+; SSE-NEXT:    [[A5:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @a64, i32 0, i64 5), align 8
+; SSE-NEXT:    [[A6:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @a64, i32 0, i64 6), align 8
+; SSE-NEXT:    [[A7:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @a64, i32 0, i64 7), align 8
+; SSE-NEXT:    [[B0:%.*]] = load i64, ptr @b64, align 8
+; SSE-NEXT:    [[B1:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @b64, i32 0, i64 1), align 8
+; SSE-NEXT:    [[B2:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @b64, i32 0, i64 2), align 8
+; SSE-NEXT:    [[B3:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @b64, i32 0, i64 3), align 8
+; SSE-NEXT:    [[B4:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @b64, i32 0, i64 4), align 8
+; SSE-NEXT:    [[B5:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @b64, i32 0, i64 5), align 8
+; SSE-NEXT:    [[B6:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @b64, i32 0, i64 6), align 8
+; SSE-NEXT:    [[B7:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @b64, i32 0, i64 7), align 8
+; SSE-NEXT:    [[C0:%.*]] = load i64, ptr @c64, align 8
+; SSE-NEXT:    [[C1:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @c64, i32 0, i64 1), align 8
+; SSE-NEXT:    [[C2:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @c64, i32 0, i64 2), align 8
+; SSE-NEXT:    [[C3:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @c64, i32 0, i64 3), align 8
+; SSE-NEXT:    [[C4:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @c64, i32 0, i64 4), align 8
+; SSE-NEXT:    [[C5:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @c64, i32 0, i64 5), align 8
+; SSE-NEXT:    [[C6:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @c64, i32 0, i64 6), align 8
+; SSE-NEXT:    [[C7:%.*]] = load i64, ptr getelementptr inbounds ([8 x i64], ptr @c64, i32 0, i64 7), align 8
+; SSE-NEXT:    [[R0:%.*]] = call i64 @llvm.fshr.i64(i64 [[A0]], i64 [[B0]], i64 [[C0]])
+; SSE-NEXT:    [[R1:%.*]] = call i64 @llvm.fshr.i64(i64 [[A1]], i64 [[B1]], i64 [[C1]])
+; SSE-NEXT:    [[R2:%.*]] = call i64 @llvm.fshr.i64(i64 [[A2]], i64 [[B2]], i64 [[C2]])
+; SSE-NEXT:    [[R3:%.*]] = call i64 @llvm.fshr.i64(i64 [[A3]], i64 [[B3]], i64 [[C3]])
+; SSE-NEXT:    [[R4:%.*]] = call i64 @llvm.fshr.i64(i64 [[A4]], i64 [[B4]], i64 [[C4]])
+; SSE-NEXT:    [[R5:%.*]] = call i64 @llvm.fshr.i64(i64 [[A5]], i64 [[B5]], i64 [[C5]])
+; SSE-NEXT:    [[R6:%.*]] = call i64 @llvm.fshr.i64(i64 [[A6]], i64 [[B6]], i64 [[C6]])
+; SSE-NEXT:    [[R7:%.*]] = call i64 @llvm.fshr.i64(i64 [[A7]], i64 [[B7]], i64 [[C7]])
+; SSE-NEXT:    store i64 [[R0]], ptr @d64, align 8
+; SSE-NEXT:    store i64 [[R1]], ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 1), align 8
+; SSE-NEXT:    store i64 [[R2]], ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 2), align 8
+; SSE-NEXT:    store i64 [[R3]], ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 3), align 8
+; SSE-NEXT:    store i64 [[R4]], ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 4), align 8
+; SSE-NEXT:    store i64 [[R5]], ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 5), align 8
+; SSE-NEXT:    store i64 [[R6]], ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 6), align 8
+; SSE-NEXT:    store i64 [[R7]], ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 7), align 8
+; SSE-NEXT:    ret void
+;
+; AVX1-LABEL: @fshr_v8i64(
+; AVX1-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @a64, align 8
+; AVX1-NEXT:    [[TMP2:%.*]] = load <2 x i64>, ptr @b64, align 8
+; AVX1-NEXT:    [[TMP3:%.*]] = load <2 x i64>, ptr @c64, align 8
+; AVX1-NEXT:    [[TMP4:%.*]] = call <2 x i64> @llvm.fshr.v2i64(<2 x i64> [[TMP1]], <2 x i64> [[TMP2]], <2 x i64> [[TMP3]])
+; AVX1-NEXT:    store <2 x i64> [[TMP4]], ptr @d64, align 8
+; AVX1-NEXT:    [[TMP5:%.*]] = load <2 x i64>, ptr getelementptr inbounds ([8 x i64], ptr @a64, i32 0, i64 2), align 8
+; AVX1-NEXT:    [[TMP6:%.*]] = load <2 x i64>, ptr getelementptr inbounds ([8 x i64], ptr @b64, i32 0, i64 2), align 8
+; AVX1-NEXT:    [[TMP7:%.*]] = load <2 x i64>, ptr getelementptr inbounds ([8 x i64], ptr @c64, i32 0, i64 2), align 8
+; AVX1-NEXT:    [[TMP8:%.*]] = call <2 x i64> @llvm.fshr.v2i64(<2 x i64> [[TMP5]], <2 x i64> [[TMP6]], <2 x i64> [[TMP7]])
+; AVX1-NEXT:    store <2 x i64> [[TMP8]], ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 2), align 8
+; AVX1-NEXT:    [[TMP9:%.*]] = load <2 x i64>, ptr getelementptr inbounds ([8 x i64], ptr @a64, i32 0, i64 4), align 8
+; AVX1-NEXT:    [[TMP10:%.*]] = load <2 x i64>, ptr getelementptr inbounds ([8 x i64], ptr @b64, i32 0, i64 4), align 8
+; AVX1-NEXT:    [[TMP11:%.*]] = load <2 x i64>, ptr getelementptr inbounds ([8 x i64], ptr @c64, i32 0, i64 4), align 8
+; AVX1-NEXT:    [[TMP12:%.*]] = call <2 x i64> @llvm.fshr.v2i64(<2 x i64> [[TMP9]], <2 x i64> [[TMP10]], <2 x i64> [[TMP11]])
+; AVX1-NEXT:    store <2 x i64> [[TMP12]], ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 4), align 8
+; AVX1-NEXT:    [[TMP13:%.*]] = load <2 x i64>, ptr getelementptr inbounds ([8 x i64], ptr @a64, i32 0, i64 6), align 8
+; AVX1-NEXT:    [[TMP14:%.*]] = load <2 x i64>, ptr getelementptr inbounds ([8 x i64], ptr @b64, i32 0, i64 6), align 8
+; AVX1-NEXT:    [[TMP15:%.*]] = load <2 x i64>, ptr getelementptr inbounds ([8 x i64], ptr @c64, i32 0, i64 6), align 8
+; AVX1-NEXT:    [[TMP16:%.*]] = call <2 x i64> @llvm.fshr.v2i64(<2 x i64> [[TMP13]], <2 x i64> [[TMP14]], <2 x i64> [[TMP15]])
+; AVX1-NEXT:    store <2 x i64> [[TMP16]], ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 6), align 8
+; AVX1-NEXT:    ret void
+;
+; AVX2-LABEL: @fshr_v8i64(
+; AVX2-NEXT:    [[TMP1:%.*]] = load <4 x i64>, ptr @a64, align 8
+; AVX2-NEXT:    [[TMP2:%.*]] = load <4 x i64>, ptr @b64, align 8
+; AVX2-NEXT:    [[TMP3:%.*]] = load <4 x i64>, ptr @c64, align 8
+; AVX2-NEXT:    [[TMP4:%.*]] = call <4 x i64> @llvm.fshr.v4i64(<4 x i64> [[TMP1]], <4 x i64> [[TMP2]], <4 x i64> [[TMP3]])
+; AVX2-NEXT:    store <4 x i64> [[TMP4]], ptr @d64, align 8
+; AVX2-NEXT:    [[TMP5:%.*]] = load <4 x i64>, ptr getelementptr inbounds ([8 x i64], ptr @a64, i32 0, i64 4), align 8
+; AVX2-NEXT:    [[TMP6:%.*]] = load <4 x i64>, ptr getelementptr inbounds ([8 x i64], ptr @b64, i32 0, i64 4), align 8
+; AVX2-NEXT:    [[TMP7:%.*]] = load <4 x i64>, ptr getelementptr inbounds ([8 x i64], ptr @c64, i32 0, i64 4), align 8
+; AVX2-NEXT:    [[TMP8:%.*]] = call <4 x i64> @llvm.fshr.v4i64(<4 x i64> [[TMP5]], <4 x i64> [[TMP6]], <4 x i64> [[TMP7]])
+; AVX2-NEXT:    store <4 x i64> [[TMP8]], ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 4), align 8
+; AVX2-NEXT:    ret void
+;
+; AVX256-LABEL: @fshr_v8i64(
+; AVX256-NEXT:    [[TMP1:%.*]] = load <4 x i64>, ptr @a64, align 8
+; AVX256-NEXT:    [[TMP2:%.*]] = load <4 x i64>, ptr @b64, align 8
+; AVX256-NEXT:    [[TMP3:%.*]] = load <4 x i64>, ptr @c64, align 8
+; AVX256-NEXT:    [[TMP4:%.*]] = call <4 x i64> @llvm.fshr.v4i64(<4 x i64> [[TMP1]], <4 x i64> [[TMP2]], <4 x i64> [[TMP3]])
+; AVX256-NEXT:    store <4 x i64> [[TMP4]], ptr @d64, align 8
+; AVX256-NEXT:    [[TMP5:%.*]] = load <4 x i64>, ptr getelementptr inbounds ([8 x i64], ptr @a64, i32 0, i64 4), align 8
+; AVX256-NEXT:    [[TMP6:%.*]] = load <4 x i64>, ptr getelementptr inbounds ([8 x i64], ptr @b64, i32 0, i64 4), align 8
+; AVX256-NEXT:    [[TMP7:%.*]] = load <4 x i64>, ptr getelementptr inbounds ([8 x i64], ptr @c64, i32 0, i64 4), align 8
+; AVX256-NEXT:    [[TMP8:%.*]] = call <4 x i64> @llvm.fshr.v4i64(<4 x i64> [[TMP5]], <4 x i64> [[TMP6]], <4 x i64> [[TMP7]])
+; AVX256-NEXT:    store <4 x i64> [[TMP8]], ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 4), align 8
+; AVX256-NEXT:    ret void
+;
+; AVX512-LABEL: @fshr_v8i64(
+; AVX512-NEXT:    [[TMP1:%.*]] = load <8 x i64>, ptr @a64, align 8
+; AVX512-NEXT:    [[TMP2:%.*]] = load <8 x i64>, ptr @b64, align 8
+; AVX512-NEXT:    [[TMP3:%.*]] = load <8 x i64>, ptr @c64, align 8
+; AVX512-NEXT:    [[TMP4:%.*]] = call <8 x i64> @llvm.fshr.v8i64(<8 x i64> [[TMP1]], <8 x i64> [[TMP2]], <8 x i64> [[TMP3]])
+; AVX512-NEXT:    store <8 x i64> [[TMP4]], ptr @d64, align 8
+; AVX512-NEXT:    ret void
+;
+  %a0 = load i64, ptr @a64, align 8
+  %a1 = load i64, ptr getelementptr inbounds ([8 x i64], ptr @a64, i32 0, i64 1), align 8
+  %a2 = load i64, ptr getelementptr inbounds ([8 x i64], ptr @a64, i32 0, i64 2), align 8
+  %a3 = load i64, ptr getelementptr inbounds ([8 x i64], ptr @a64, i32 0, i64 3), align 8
+  %a4 = load i64, ptr getelementptr inbounds ([8 x i64], ptr @a64, i32 0, i64 4), align 8
+  %a5 = load i64, ptr getelementptr inbounds ([8 x i64], ptr @a64, i32 0, i64 5), align 8
+  %a6 = load i64, ptr getelementptr inbounds ([8 x i64], ptr @a64, i32 0, i64 6), align 8
+  %a7 = load i64, ptr getelementptr inbounds ([8 x i64], ptr @a64, i32 0, i64 7), align 8
+  %b0 = load i64, ptr @b64, align 8
+  %b1 = load i64, ptr getelementptr inbounds ([8 x i64], ptr @b64, i32 0, i64 1), align 8
+  %b2 = load i64, ptr getelementptr inbounds ([8 x i64], ptr @b64, i32 0, i64 2), align 8
+  %b3 = load i64, ptr getelementptr inbounds ([8 x i64], ptr @b64, i32 0, i64 3), align 8
+  %b4 = load i64, ptr getelementptr inbounds ([8 x i64], ptr @b64, i32 0, i64 4), align 8
+  %b5 = load i64, ptr getelementptr inbounds ([8 x i64], ptr @b64, i32 0, i64 5), align 8
+  %b6 = load i64, ptr getelementptr inbounds ([8 x i64], ptr @b64, i32 0, i64 6), align 8
+  %b7 = load i64, ptr getelementptr inbounds ([8 x i64], ptr @b64, i32 0, i64 7), align 8
+  %c0 = load i64, ptr @c64, align 8
+  %c1 = load i64, ptr getelementptr inbounds ([8 x i64], ptr @c64, i32 0, i64 1), align 8
+  %c2 = load i64, ptr getelementptr inbounds ([8 x i64], ptr @c64, i32 0, i64 2), align 8
+  %c3 = load i64, ptr getelementptr inbounds ([8 x i64], ptr @c64, i32 0, i64 3), align 8
+  %c4 = load i64, ptr getelementptr inbounds ([8 x i64], ptr @c64, i32 0, i64 4), align 8
+  %c5 = load i64, ptr getelementptr inbounds ([8 x i64], ptr @c64, i32 0, i64 5), align 8
+  %c6 = load i64, ptr getelementptr inbounds ([8 x i64], ptr @c64, i32 0, i64 6), align 8
+  %c7 = load i64, ptr getelementptr inbounds ([8 x i64], ptr @c64, i32 0, i64 7), align 8
+  %r0 = call i64 @llvm.fshr.i64(i64 %a0, i64 %b0, i64 %c0)
+  %r1 = call i64 @llvm.fshr.i64(i64 %a1, i64 %b1, i64 %c1)
+  %r2 = call i64 @llvm.fshr.i64(i64 %a2, i64 %b2, i64 %c2)
+  %r3 = call i64 @llvm.fshr.i64(i64 %a3, i64 %b3, i64 %c3)
+  %r4 = call i64 @llvm.fshr.i64(i64 %a4, i64 %b4, i64 %c4)
+  %r5 = call i64 @llvm.fshr.i64(i64 %a5, i64 %b5, i64 %c5)
+  %r6 = call i64 @llvm.fshr.i64(i64 %a6, i64 %b6, i64 %c6)
+  %r7 = call i64 @llvm.fshr.i64(i64 %a7, i64 %b7, i64 %c7)
+  store i64 %r0, ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 0), align 8
+  store i64 %r1, ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 1), align 8
+  store i64 %r2, ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 2), align 8
+  store i64 %r3, ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 3), align 8
+  store i64 %r4, ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 4), align 8
+  store i64 %r5, ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 5), align 8
+  store i64 %r6, ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 6), align 8
+  store i64 %r7, ptr getelementptr inbounds ([8 x i64], ptr @d64, i32 0, i64 7), align 8
+  ret void
+}
+
+define void @fshr_v16i32() {
+; SSE-LABEL: @fshr_v16i32(
+; SSE-NEXT:    [[A0:%.*]] = load i32, ptr @a32, align 4
+; SSE-NEXT:    [[A1:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 1), align 4
+; SSE-NEXT:    [[A2:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 2), align 4
+; SSE-NEXT:    [[A3:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 3), align 4
+; SSE-NEXT:    [[A4:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 4), align 4
+; SSE-NEXT:    [[A5:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 5), align 4
+; SSE-NEXT:    [[A6:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 6), align 4
+; SSE-NEXT:    [[A7:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 7), align 4
+; SSE-NEXT:    [[A8:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 8), align 4
+; SSE-NEXT:    [[A9:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 9), align 4
+; SSE-NEXT:    [[A10:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 10), align 4
+; SSE-NEXT:    [[A11:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 11), align 4
+; SSE-NEXT:    [[A12:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 12), align 4
+; SSE-NEXT:    [[A13:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 13), align 4
+; SSE-NEXT:    [[A14:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 14), align 4
+; SSE-NEXT:    [[A15:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 15), align 4
+; SSE-NEXT:    [[B0:%.*]] = load i32, ptr @b32, align 4
+; SSE-NEXT:    [[B1:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 1), align 4
+; SSE-NEXT:    [[B2:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 2), align 4
+; SSE-NEXT:    [[B3:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 3), align 4
+; SSE-NEXT:    [[B4:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 4), align 4
+; SSE-NEXT:    [[B5:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 5), align 4
+; SSE-NEXT:    [[B6:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 6), align 4
+; SSE-NEXT:    [[B7:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 7), align 4
+; SSE-NEXT:    [[B8:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 8), align 4
+; SSE-NEXT:    [[B9:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 9), align 4
+; SSE-NEXT:    [[B10:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 10), align 4
+; SSE-NEXT:    [[B11:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 11), align 4
+; SSE-NEXT:    [[B12:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 12), align 4
+; SSE-NEXT:    [[B13:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 13), align 4
+; SSE-NEXT:    [[B14:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 14), align 4
+; SSE-NEXT:    [[B15:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 15), align 4
+; SSE-NEXT:    [[C0:%.*]] = load i32, ptr @c32, align 4
+; SSE-NEXT:    [[C1:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @c32, i32 0, i64 1), align 4
+; SSE-NEXT:    [[C2:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @c32, i32 0, i64 2), align 4
+; SSE-NEXT:    [[C3:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @c32, i32 0, i64 3), align 4
+; SSE-NEXT:    [[C4:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @c32, i32 0, i64 4), align 4
+; SSE-NEXT:    [[C5:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @c32, i32 0, i64 5), align 4
+; SSE-NEXT:    [[C6:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @c32, i32 0, i64 6), align 4
+; SSE-NEXT:    [[C7:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @c32, i32 0, i64 7), align 4
+; SSE-NEXT:    [[C8:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @c32, i32 0, i64 8), align 4
+; SSE-NEXT:    [[C9:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @c32, i32 0, i64 9), align 4
+; SSE-NEXT:    [[C10:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @c32, i32 0, i64 10), align 4
+; SSE-NEXT:    [[C11:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @c32, i32 0, i64 11), align 4
+; SSE-NEXT:    [[C12:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @c32, i32 0, i64 12), align 4
+; SSE-NEXT:    [[C13:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @c32, i32 0, i64 13), align 4
+; SSE-NEXT:    [[C14:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @c32, i32 0, i64 14), align 4
+; SSE-NEXT:    [[C15:%.*]] = load i32, ptr getelementptr inbounds ([16 x i32], ptr @c32, i32 0, i64 15), align 4
+; SSE-NEXT:    [[R0:%.*]] = call i32 @llvm.fshr.i32(i32 [[A0]], i32 [[B0]], i32 [[C0]])
+; SSE-NEXT:    [[R1:%.*]] = call i32 @llvm.fshr.i32(i32 [[A1]], i32 [[B1]], i32 [[C1]])
+; SSE-NEXT:    [[R2:%.*]] = call i32 @llvm.fshr.i32(i32 [[A2]], i32 [[B2]], i32 [[C2]])
+; SSE-NEXT:    [[R3:%.*]] = call i32 @llvm.fshr.i32(i32 [[A3]], i32 [[B3]], i32 [[C3]])
+; SSE-NEXT:    [[R4:%.*]] = call i32 @llvm.fshr.i32(i32 [[A4]], i32 [[B4]], i32 [[C4]])
+; SSE-NEXT:    [[R5:%.*]] = call i32 @llvm.fshr.i32(i32 [[A5]], i32 [[B5]], i32 [[C5]])
+; SSE-NEXT:    [[R6:%.*]] = call i32 @llvm.fshr.i32(i32 [[A6]], i32 [[B6]], i32 [[C6]])
+; SSE-NEXT:    [[R7:%.*]] = call i32 @llvm.fshr.i32(i32 [[A7]], i32 [[B7]], i32 [[C7]])
+; SSE-NEXT:    [[R8:%.*]] = call i32 @llvm.fshr.i32(i32 [[A8]], i32 [[B8]], i32 [[C8]])
+; SSE-NEXT:    [[R9:%.*]] = call i32 @llvm.fshr.i32(i32 [[A9]], i32 [[B9]], i32 [[C9]])
+; SSE-NEXT:    [[R10:%.*]] = call i32 @llvm.fshr.i32(i32 [[A10]], i32 [[B10]], i32 [[C10]])
+; SSE-NEXT:    [[R11:%.*]] = call i32 @llvm.fshr.i32(i32 [[A11]], i32 [[B11]], i32 [[C11]])
+; SSE-NEXT:    [[R12:%.*]] = call i32 @llvm.fshr.i32(i32 [[A12]], i32 [[B12]], i32 [[C12]])
+; SSE-NEXT:    [[R13:%.*]] = call i32 @llvm.fshr.i32(i32 [[A13]], i32 [[B13]], i32 [[C13]])
+; SSE-NEXT:    [[R14:%.*]] = call i32 @llvm.fshr.i32(i32 [[A14]], i32 [[B14]], i32 [[C14]])
+; SSE-NEXT:    [[R15:%.*]] = call i32 @llvm.fshr.i32(i32 [[A15]], i32 [[B15]], i32 [[C15]])
+; SSE-NEXT:    store i32 [[R0]], ptr @d32, align 4
+; SSE-NEXT:    store i32 [[R1]], ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 1), align 4
+; SSE-NEXT:    store i32 [[R2]], ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 2), align 4
+; SSE-NEXT:    store i32 [[R3]], ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 3), align 4
+; SSE-NEXT:    store i32 [[R4]], ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 4), align 4
+; SSE-NEXT:    store i32 [[R5]], ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 5), align 4
+; SSE-NEXT:    store i32 [[R6]], ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 6), align 4
+; SSE-NEXT:    store i32 [[R7]], ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 7), align 4
+; SSE-NEXT:    store i32 [[R8]], ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 8), align 4
+; SSE-NEXT:    store i32 [[R9]], ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 9), align 4
+; SSE-NEXT:    store i32 [[R10]], ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 10), align 4
+; SSE-NEXT:    store i32 [[R11]], ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 11), align 4
+; SSE-NEXT:    store i32 [[R12]], ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 12), align 4
+; SSE-NEXT:    store i32 [[R13]], ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 13), align 4
+; SSE-NEXT:    store i32 [[R14]], ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 14), align 4
+; SSE-NEXT:    store i32 [[R15]], ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 15), align 4
+; SSE-NEXT:    ret void
+;
+; AVX-LABEL: @fshr_v16i32(
+; AVX-NEXT:    [[TMP1:%.*]] = load <8 x i32>, ptr @a32, align 4
+; AVX-NEXT:    [[TMP2:%.*]] = load <8 x i32>, ptr @b32, align 4
+; AVX-NEXT:    [[TMP3:%.*]] = load <8 x i32>, ptr @c32, align 4
+; AVX-NEXT:    [[TMP4:%.*]] = call <8 x i32> @llvm.fshr.v8i32(<8 x i32> [[TMP1]], <8 x i32> [[TMP2]], <8 x i32> [[TMP3]])
+; AVX-NEXT:    store <8 x i32> [[TMP4]], ptr @d32, align 4
+; AVX-NEXT:    [[TMP5:%.*]] = load <8 x i32>, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 8), align 4
+; AVX-NEXT:    [[TMP6:%.*]] = load <8 x i32>, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 8), align 4
+; AVX-NEXT:    [[TMP7:%.*]] = load <8 x i32>, ptr getelementptr inbounds ([16 x i32], ptr @c32, i32 0, i64 8), align 4
+; AVX-NEXT:    [[TMP8:%.*]] = call <8 x i32> @llvm.fshr.v8i32(<8 x i32> [[TMP5]], <8 x i32> [[TMP6]], <8 x i32> [[TMP7]])
+; AVX-NEXT:    store <8 x i32> [[TMP8]], ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 8), align 4
+; AVX-NEXT:    ret void
+;
+; AVX512-LABEL: @fshr_v16i32(
+; AVX512-NEXT:    [[TMP1:%.*]] = load <16 x i32>, ptr @a32, align 4
+; AVX512-NEXT:    [[TMP2:%.*]] = load <16 x i32>, ptr @b32, align 4
+; AVX512-NEXT:    [[TMP3:%.*]] = load <16 x i32>, ptr @c32, align 4
+; AVX512-NEXT:    [[TMP4:%.*]] = call <16 x i32> @llvm.fshr.v16i32(<16 x i32> [[TMP1]], <16 x i32> [[TMP2]], <16 x i32> [[TMP3]])
+; AVX512-NEXT:    store <16 x i32> [[TMP4]], ptr @d32, align 4
+; AVX512-NEXT:    ret void
+;
+  %a0  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 0 ), align 4
+  %a1  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 1 ), align 4
+  %a2  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 2 ), align 4
+  %a3  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 3 ), align 4
+  %a4  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 4 ), align 4
+  %a5  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 5 ), align 4
+  %a6  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 6 ), align 4
+  %a7  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 7 ), align 4
+  %a8  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 8 ), align 4
+  %a9  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 9 ), align 4
+  %a10 = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 10), align 4
+  %a11 = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 11), align 4
+  %a12 = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 12), align 4
+  %a13 = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 13), align 4
+  %a14 = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 14), align 4
+  %a15 = load i32, ptr getelementptr inbounds ([16 x i32], ptr @a32, i32 0, i64 15), align 4
+  %b0  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 0 ), align 4
+  %b1  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 1 ), align 4
+  %b2  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 2 ), align 4
+  %b3  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 3 ), align 4
+  %b4  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 4 ), align 4
+  %b5  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 5 ), align 4
+  %b6  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 6 ), align 4
+  %b7  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 7 ), align 4
+  %b8  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 8 ), align 4
+  %b9  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 9 ), align 4
+  %b10 = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 10), align 4
+  %b11 = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 11), align 4
+  %b12 = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 12), align 4
+  %b13 = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 13), align 4
+  %b14 = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 14), align 4
+  %b15 = load i32, ptr getelementptr inbounds ([16 x i32], ptr @b32, i32 0, i64 15), align 4
+  %c0  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @c32, i32 0, i64 0 ), align 4
+  %c1  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @c32, i32 0, i64 1 ), align 4
+  %c2  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @c32, i32 0, i64 2 ), align 4
+  %c3  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @c32, i32 0, i64 3 ), align 4
+  %c4  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @c32, i32 0, i64 4 ), align 4
+  %c5  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @c32, i32 0, i64 5 ), align 4
+  %c6  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @c32, i32 0, i64 6 ), align 4
+  %c7  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @c32, i32 0, i64 7 ), align 4
+  %c8  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @c32, i32 0, i64 8 ), align 4
+  %c9  = load i32, ptr getelementptr inbounds ([16 x i32], ptr @c32, i32 0, i64 9 ), align 4
+  %c10 = load i32, ptr getelementptr inbounds ([16 x i32], ptr @c32, i32 0, i64 10), align 4
+  %c11 = load i32, ptr getelementptr inbounds ([16 x i32], ptr @c32, i32 0, i64 11), align 4
+  %c12 = load i32, ptr getelementptr inbounds ([16 x i32], ptr @c32, i32 0, i64 12), align 4
+  %c13 = load i32, ptr getelementptr inbounds ([16 x i32], ptr @c32, i32 0, i64 13), align 4
+  %c14 = load i32, ptr getelementptr inbounds ([16 x i32], ptr @c32, i32 0, i64 14), align 4
+  %c15 = load i32, ptr getelementptr inbounds ([16 x i32], ptr @c32, i32 0, i64 15), align 4
+  %r0  = call i32 @llvm.fshr.i32(i32 %a0 , i32 %b0 , i32 %c0 )
+  %r1  = call i32 @llvm.fshr.i32(i32 %a1 , i32 %b1 , i32 %c1 )
+  %r2  = call i32 @llvm.fshr.i32(i32 %a2 , i32 %b2 , i32 %c2 )
+  %r3  = call i32 @llvm.fshr.i32(i32 %a3 , i32 %b3 , i32 %c3 )
+  %r4  = call i32 @llvm.fshr.i32(i32 %a4 , i32 %b4 , i32 %c4 )
+  %r5  = call i32 @llvm.fshr.i32(i32 %a5 , i32 %b5 , i32 %c5 )
+  %r6  = call i32 @llvm.fshr.i32(i32 %a6 , i32 %b6 , i32 %c6 )
+  %r7  = call i32 @llvm.fshr.i32(i32 %a7 , i32 %b7 , i32 %c7 )
+  %r8  = call i32 @llvm.fshr.i32(i32 %a8 , i32 %b8 , i32 %c8 )
+  %r9  = call i32 @llvm.fshr.i32(i32 %a9 , i32 %b9 , i32 %c9 )
+  %r10 = call i32 @llvm.fshr.i32(i32 %a10, i32 %b10, i32 %c10)
+  %r11 = call i32 @llvm.fshr.i32(i32 %a11, i32 %b11, i32 %c11)
+  %r12 = call i32 @llvm.fshr.i32(i32 %a12, i32 %b12, i32 %c12)
+  %r13 = call i32 @llvm.fshr.i32(i32 %a13, i32 %b13, i32 %c13)
+  %r14 = call i32 @llvm.fshr.i32(i32 %a14, i32 %b14, i32 %c14)
+  %r15 = call i32 @llvm.fshr.i32(i32 %a15, i32 %b15, i32 %c15)
+  store i32 %r0 , ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 0 ), align 4
+  store i32 %r1 , ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 1 ), align 4
+  store i32 %r2 , ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 2 ), align 4
+  store i32 %r3 , ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 3 ), align 4
+  store i32 %r4 , ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 4 ), align 4
+  store i32 %r5 , ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 5 ), align 4
+  store i32 %r6 , ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 6 ), align 4
+  store i32 %r7 , ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 7 ), align 4
+  store i32 %r8 , ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 8 ), align 4
+  store i32 %r9 , ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 9 ), align 4
+  store i32 %r10, ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 10), align 4
+  store i32 %r11, ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 11), align 4
+  store i32 %r12, ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 12), align 4
+  store i32 %r13, ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 13), align 4
+  store i32 %r14, ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 14), align 4
+  store i32 %r15, ptr getelementptr inbounds ([16 x i32], ptr @d32, i32 0, i64 15), align 4
+  ret void
+}
+
+define void @fshr_v32i16() {
+; SSE-LABEL: @fshr_v32i16(
+; SSE-NEXT:    [[TMP1:%.*]] = load <8 x i16>, ptr @a16, align 2
+; SSE-NEXT:    [[TMP2:%.*]] = load <8 x i16>, ptr @b16, align 2
+; SSE-NEXT:    [[TMP3:%.*]] = load <8 x i16>, ptr @c16, align 2
+; SSE-NEXT:    [[TMP4:%.*]] = call <8 x i16> @llvm.fshr.v8i16(<8 x i16> [[TMP1]], <8 x i16> [[TMP2]], <8 x i16> [[TMP3]])
+; SSE-NEXT:    store <8 x i16> [[TMP4]], ptr @d16, align 2
+; SSE-NEXT:    [[TMP5:%.*]] = load <8 x i16>, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 8), align 2
+; SSE-NEXT:    [[TMP6:%.*]] = load <8 x i16>, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 8), align 2
+; SSE-NEXT:    [[TMP7:%.*]] = load <8 x i16>, ptr getelementptr inbounds ([32 x i16], ptr @c16, i32 0, i64 8), align 2
+; SSE-NEXT:    [[TMP8:%.*]] = call <8 x i16> @llvm.fshr.v8i16(<8 x i16> [[TMP5]], <8 x i16> [[TMP6]], <8 x i16> [[TMP7]])
+; SSE-NEXT:    store <8 x i16> [[TMP8]], ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 8), align 2
+; SSE-NEXT:    [[TMP9:%.*]] = load <8 x i16>, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 16), align 2
+; SSE-NEXT:    [[TMP10:%.*]] = load <8 x i16>, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 16), align 2
+; SSE-NEXT:    [[TMP11:%.*]] = load <8 x i16>, ptr getelementptr inbounds ([32 x i16], ptr @c16, i32 0, i64 16), align 2
+; SSE-NEXT:    [[TMP12:%.*]] = call <8 x i16> @llvm.fshr.v8i16(<8 x i16> [[TMP9]], <8 x i16> [[TMP10]], <8 x i16> [[TMP11]])
+; SSE-NEXT:    store <8 x i16> [[TMP12]], ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 16), align 2
+; SSE-NEXT:    [[TMP13:%.*]] = load <8 x i16>, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 24), align 2
+; SSE-NEXT:    [[TMP14:%.*]] = load <8 x i16>, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 24), align 2
+; SSE-NEXT:    [[TMP15:%.*]] = load <8 x i16>, ptr getelementptr inbounds ([32 x i16], ptr @c16, i32 0, i64 24), align 2
+; SSE-NEXT:    [[TMP16:%.*]] = call <8 x i16> @llvm.fshr.v8i16(<8 x i16> [[TMP13]], <8 x i16> [[TMP14]], <8 x i16> [[TMP15]])
+; SSE-NEXT:    store <8 x i16> [[TMP16]], ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 24), align 2
+; SSE-NEXT:    ret void
+;
+; AVX-LABEL: @fshr_v32i16(
+; AVX-NEXT:    [[TMP1:%.*]] = load <16 x i16>, ptr @a16, align 2
+; AVX-NEXT:    [[TMP2:%.*]] = load <16 x i16>, ptr @b16, align 2
+; AVX-NEXT:    [[TMP3:%.*]] = load <16 x i16>, ptr @c16, align 2
+; AVX-NEXT:    [[TMP4:%.*]] = call <16 x i16> @llvm.fshr.v16i16(<16 x i16> [[TMP1]], <16 x i16> [[TMP2]], <16 x i16> [[TMP3]])
+; AVX-NEXT:    store <16 x i16> [[TMP4]], ptr @d16, align 2
+; AVX-NEXT:    [[TMP5:%.*]] = load <16 x i16>, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 16), align 2
+; AVX-NEXT:    [[TMP6:%.*]] = load <16 x i16>, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 16), align 2
+; AVX-NEXT:    [[TMP7:%.*]] = load <16 x i16>, ptr getelementptr inbounds ([32 x i16], ptr @c16, i32 0, i64 16), align 2
+; AVX-NEXT:    [[TMP8:%.*]] = call <16 x i16> @llvm.fshr.v16i16(<16 x i16> [[TMP5]], <16 x i16> [[TMP6]], <16 x i16> [[TMP7]])
+; AVX-NEXT:    store <16 x i16> [[TMP8]], ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 16), align 2
+; AVX-NEXT:    ret void
+;
+; AVX512-LABEL: @fshr_v32i16(
+; AVX512-NEXT:    [[TMP1:%.*]] = load <32 x i16>, ptr @a16, align 2
+; AVX512-NEXT:    [[TMP2:%.*]] = load <32 x i16>, ptr @b16, align 2
+; AVX512-NEXT:    [[TMP3:%.*]] = load <32 x i16>, ptr @c16, align 2
+; AVX512-NEXT:    [[TMP4:%.*]] = call <32 x i16> @llvm.fshr.v32i16(<32 x i16> [[TMP1]], <32 x i16> [[TMP2]], <32 x i16> [[TMP3]])
+; AVX512-NEXT:    store <32 x i16> [[TMP4]], ptr @d16, align 2
+; AVX512-NEXT:    ret void
+;
+  %a0  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 0 ), align 2
+  %a1  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 1 ), align 2
+  %a2  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 2 ), align 2
+  %a3  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 3 ), align 2
+  %a4  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 4 ), align 2
+  %a5  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 5 ), align 2
+  %a6  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 6 ), align 2
+  %a7  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 7 ), align 2
+  %a8  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 8 ), align 2
+  %a9  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 9 ), align 2
+  %a10 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 10), align 2
+  %a11 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 11), align 2
+  %a12 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 12), align 2
+  %a13 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 13), align 2
+  %a14 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 14), align 2
+  %a15 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 15), align 2
+  %a16 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 16), align 2
+  %a17 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 17), align 2
+  %a18 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 18), align 2
+  %a19 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 19), align 2
+  %a20 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 20), align 2
+  %a21 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 21), align 2
+  %a22 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 22), align 2
+  %a23 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 23), align 2
+  %a24 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 24), align 2
+  %a25 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 25), align 2
+  %a26 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 26), align 2
+  %a27 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 27), align 2
+  %a28 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 28), align 2
+  %a29 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 29), align 2
+  %a30 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 30), align 2
+  %a31 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @a16, i32 0, i64 31), align 2
+  %b0  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 0 ), align 2
+  %b1  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 1 ), align 2
+  %b2  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 2 ), align 2
+  %b3  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 3 ), align 2
+  %b4  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 4 ), align 2
+  %b5  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 5 ), align 2
+  %b6  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 6 ), align 2
+  %b7  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 7 ), align 2
+  %b8  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 8 ), align 2
+  %b9  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 9 ), align 2
+  %b10 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 10), align 2
+  %b11 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 11), align 2
+  %b12 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 12), align 2
+  %b13 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 13), align 2
+  %b14 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 14), align 2
+  %b15 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 15), align 2
+  %b16 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 16), align 2
+  %b17 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 17), align 2
+  %b18 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 18), align 2
+  %b19 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 19), align 2
+  %b20 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 20), align 2
+  %b21 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 21), align 2
+  %b22 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 22), align 2
+  %b23 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 23), align 2
+  %b24 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 24), align 2
+  %b25 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 25), align 2
+  %b26 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 26), align 2
+  %b27 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 27), align 2
+  %b28 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 28), align 2
+  %b29 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 29), align 2
+  %b30 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 30), align 2
+  %b31 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @b16, i32 0, i64 31), align 2
+  %c0  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @c16, i32 0, i64 0 ), align 2
+  %c1  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @c16, i32 0, i64 1 ), align 2
+  %c2  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @c16, i32 0, i64 2 ), align 2
+  %c3  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @c16, i32 0, i64 3 ), align 2
+  %c4  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @c16, i32 0, i64 4 ), align 2
+  %c5  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @c16, i32 0, i64 5 ), align 2
+  %c6  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @c16, i32 0, i64 6 ), align 2
+  %c7  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @c16, i32 0, i64 7 ), align 2
+  %c8  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @c16, i32 0, i64 8 ), align 2
+  %c9  = load i16, ptr getelementptr inbounds ([32 x i16], ptr @c16, i32 0, i64 9 ), align 2
+  %c10 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @c16, i32 0, i64 10), align 2
+  %c11 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @c16, i32 0, i64 11), align 2
+  %c12 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @c16, i32 0, i64 12), align 2
+  %c13 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @c16, i32 0, i64 13), align 2
+  %c14 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @c16, i32 0, i64 14), align 2
+  %c15 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @c16, i32 0, i64 15), align 2
+  %c16 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @c16, i32 0, i64 16), align 2
+  %c17 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @c16, i32 0, i64 17), align 2
+  %c18 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @c16, i32 0, i64 18), align 2
+  %c19 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @c16, i32 0, i64 19), align 2
+  %c20 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @c16, i32 0, i64 20), align 2
+  %c21 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @c16, i32 0, i64 21), align 2
+  %c22 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @c16, i32 0, i64 22), align 2
+  %c23 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @c16, i32 0, i64 23), align 2
+  %c24 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @c16, i32 0, i64 24), align 2
+  %c25 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @c16, i32 0, i64 25), align 2
+  %c26 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @c16, i32 0, i64 26), align 2
+  %c27 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @c16, i32 0, i64 27), align 2
+  %c28 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @c16, i32 0, i64 28), align 2
+  %c29 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @c16, i32 0, i64 29), align 2
+  %c30 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @c16, i32 0, i64 30), align 2
+  %c31 = load i16, ptr getelementptr inbounds ([32 x i16], ptr @c16, i32 0, i64 31), align 2
+  %r0  = call i16 @llvm.fshr.i16(i16 %a0 , i16 %b0 , i16 %c0 )
+  %r1  = call i16 @llvm.fshr.i16(i16 %a1 , i16 %b1 , i16 %c1 )
+  %r2  = call i16 @llvm.fshr.i16(i16 %a2 , i16 %b2 , i16 %c2 )
+  %r3  = call i16 @llvm.fshr.i16(i16 %a3 , i16 %b3 , i16 %c3 )
+  %r4  = call i16 @llvm.fshr.i16(i16 %a4 , i16 %b4 , i16 %c4 )
+  %r5  = call i16 @llvm.fshr.i16(i16 %a5 , i16 %b5 , i16 %c5 )
+  %r6  = call i16 @llvm.fshr.i16(i16 %a6 , i16 %b6 , i16 %c6 )
+  %r7  = call i16 @llvm.fshr.i16(i16 %a7 , i16 %b7 , i16 %c7 )
+  %r8  = call i16 @llvm.fshr.i16(i16 %a8 , i16 %b8 , i16 %c8 )
+  %r9  = call i16 @llvm.fshr.i16(i16 %a9 , i16 %b9 , i16 %c9 )
+  %r10 = call i16 @llvm.fshr.i16(i16 %a10, i16 %b10, i16 %c10)
+  %r11 = call i16 @llvm.fshr.i16(i16 %a11, i16 %b11, i16 %c11)
+  %r12 = call i16 @llvm.fshr.i16(i16 %a12, i16 %b12, i16 %c12)
+  %r13 = call i16 @llvm.fshr.i16(i16 %a13, i16 %b13, i16 %c13)
+  %r14 = call i16 @llvm.fshr.i16(i16 %a14, i16 %b14, i16 %c14)
+  %r15 = call i16 @llvm.fshr.i16(i16 %a15, i16 %b15, i16 %c15)
+  %r16 = call i16 @llvm.fshr.i16(i16 %a16, i16 %b16, i16 %c16)
+  %r17 = call i16 @llvm.fshr.i16(i16 %a17, i16 %b17, i16 %c17)
+  %r18 = call i16 @llvm.fshr.i16(i16 %a18, i16 %b18, i16 %c18)
+  %r19 = call i16 @llvm.fshr.i16(i16 %a19, i16 %b19, i16 %c19)
+  %r20 = call i16 @llvm.fshr.i16(i16 %a20, i16 %b20, i16 %c20)
+  %r21 = call i16 @llvm.fshr.i16(i16 %a21, i16 %b21, i16 %c21)
+  %r22 = call i16 @llvm.fshr.i16(i16 %a22, i16 %b22, i16 %c22)
+  %r23 = call i16 @llvm.fshr.i16(i16 %a23, i16 %b23, i16 %c23)
+  %r24 = call i16 @llvm.fshr.i16(i16 %a24, i16 %b24, i16 %c24)
+  %r25 = call i16 @llvm.fshr.i16(i16 %a25, i16 %b25, i16 %c25)
+  %r26 = call i16 @llvm.fshr.i16(i16 %a26, i16 %b26, i16 %c26)
+  %r27 = call i16 @llvm.fshr.i16(i16 %a27, i16 %b27, i16 %c27)
+  %r28 = call i16 @llvm.fshr.i16(i16 %a28, i16 %b28, i16 %c28)
+  %r29 = call i16 @llvm.fshr.i16(i16 %a29, i16 %b29, i16 %c29)
+  %r30 = call i16 @llvm.fshr.i16(i16 %a30, i16 %b30, i16 %c30)
+  %r31 = call i16 @llvm.fshr.i16(i16 %a31, i16 %b31, i16 %c31)
+  store i16 %r0 , ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 0 ), align 2
+  store i16 %r1 , ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 1 ), align 2
+  store i16 %r2 , ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 2 ), align 2
+  store i16 %r3 , ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 3 ), align 2
+  store i16 %r4 , ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 4 ), align 2
+  store i16 %r5 , ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 5 ), align 2
+  store i16 %r6 , ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 6 ), align 2
+  store i16 %r7 , ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 7 ), align 2
+  store i16 %r8 , ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 8 ), align 2
+  store i16 %r9 , ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 9 ), align 2
+  store i16 %r10, ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 10), align 2
+  store i16 %r11, ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 11), align 2
+  store i16 %r12, ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 12), align 2
+  store i16 %r13, ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 13), align 2
+  store i16 %r14, ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 14), align 2
+  store i16 %r15, ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 15), align 2
+  store i16 %r16, ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 16), align 2
+  store i16 %r17, ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 17), align 2
+  store i16 %r18, ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 18), align 2
+  store i16 %r19, ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 19), align 2
+  store i16 %r20, ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 20), align 2
+  store i16 %r21, ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 21), align 2
+  store i16 %r22, ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 22), align 2
+  store i16 %r23, ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 23), align 2
+  store i16 %r24, ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 24), align 2
+  store i16 %r25, ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 25), align 2
+  store i16 %r26, ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 26), align 2
+  store i16 %r27, ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 27), align 2
+  store i16 %r28, ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 28), align 2
+  store i16 %r29, ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 29), align 2
+  store i16 %r30, ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 30), align 2
+  store i16 %r31, ptr getelementptr inbounds ([32 x i16], ptr @d16, i32 0, i64 31), align 2
+  ret void
+}
+
+define void @fshr_v64i8() {
+; SSE-LABEL: @fshr_v64i8(
+; SSE-NEXT:    [[TMP1:%.*]] = load <16 x i8>, ptr @a8, align 1
+; SSE-NEXT:    [[TMP2:%.*]] = load <16 x i8>, ptr @b8, align 1
+; SSE-NEXT:    [[TMP3:%.*]] = load <16 x i8>, ptr @c8, align 1
+; SSE-NEXT:    [[TMP4:%.*]] = call <16 x i8> @llvm.fshr.v16i8(<16 x i8> [[TMP1]], <16 x i8> [[TMP2]], <16 x i8> [[TMP3]])
+; SSE-NEXT:    [[TMP5:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 16), align 1
+; SSE-NEXT:    [[TMP6:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 16), align 1
+; SSE-NEXT:    [[TMP7:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 16), align 1
+; SSE-NEXT:    [[TMP8:%.*]] = call <16 x i8> @llvm.fshr.v16i8(<16 x i8> [[TMP5]], <16 x i8> [[TMP6]], <16 x i8> [[TMP7]])
+; SSE-NEXT:    [[TMP9:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 32), align 1
+; SSE-NEXT:    [[TMP10:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 32), align 1
+; SSE-NEXT:    [[TMP11:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 48), align 1
+; SSE-NEXT:    [[TMP12:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 48), align 1
+; SSE-NEXT:    [[TMP13:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
+; SSE-NEXT:    [[TMP14:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 48), align 1
+; SSE-NEXT:    [[TMP15:%.*]] = call <16 x i8> @llvm.fshr.v16i8(<16 x i8> [[TMP9]], <16 x i8> [[TMP10]], <16 x i8> [[TMP13]])
+; SSE-NEXT:    [[TMP16:%.*]] = call <16 x i8> @llvm.fshr.v16i8(<16 x i8> [[TMP11]], <16 x i8> [[TMP12]], <16 x i8> [[TMP14]])
+; SSE-NEXT:    store <16 x i8> [[TMP4]], ptr @d8, align 1
+; SSE-NEXT:    store <16 x i8> [[TMP8]], ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 16), align 1
+; SSE-NEXT:    store <16 x i8> [[TMP15]], ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 32), align 1
+; SSE-NEXT:    store <16 x i8> [[TMP16]], ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 48), align 1
+; SSE-NEXT:    ret void
+;
+; AVX-LABEL: @fshr_v64i8(
+; AVX-NEXT:    [[TMP1:%.*]] = load <32 x i8>, ptr @a8, align 1
+; AVX-NEXT:    [[TMP2:%.*]] = load <32 x i8>, ptr @b8, align 1
+; AVX-NEXT:    [[TMP3:%.*]] = load <32 x i8>, ptr @c8, align 1
+; AVX-NEXT:    [[TMP4:%.*]] = call <32 x i8> @llvm.fshr.v32i8(<32 x i8> [[TMP1]], <32 x i8> [[TMP2]], <32 x i8> [[TMP3]])
+; AVX-NEXT:    [[TMP5:%.*]] = load <32 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 32), align 1
+; AVX-NEXT:    [[TMP6:%.*]] = load <32 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 32), align 1
+; AVX-NEXT:    [[TMP7:%.*]] = load <32 x i8>, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
+; AVX-NEXT:    [[TMP8:%.*]] = call <32 x i8> @llvm.fshr.v32i8(<32 x i8> [[TMP5]], <32 x i8> [[TMP6]], <32 x i8> [[TMP7]])
+; AVX-NEXT:    store <32 x i8> [[TMP4]], ptr @d8, align 1
+; AVX-NEXT:    store <32 x i8> [[TMP8]], ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 32), align 1
+; AVX-NEXT:    ret void
+;
+; AVX512-LABEL: @fshr_v64i8(
+; AVX512-NEXT:    [[TMP1:%.*]] = load <64 x i8>, ptr @a8, align 1
+; AVX512-NEXT:    [[TMP2:%.*]] = load <64 x i8>, ptr @b8, align 1
+; AVX512-NEXT:    [[TMP3:%.*]] = load <64 x i8>, ptr @c8, align 1
+; AVX512-NEXT:    [[TMP4:%.*]] = call <64 x i8> @llvm.fshr.v64i8(<64 x i8> [[TMP1]], <64 x i8> [[TMP2]], <64 x i8> [[TMP3]])
+; AVX512-NEXT:    store <64 x i8> [[TMP4]], ptr @d8, align 1
+; AVX512-NEXT:    ret void
+;
+  %a0  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 0 ), align 1
+  %a1  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 1 ), align 1
+  %a2  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 2 ), align 1
+  %a3  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 3 ), align 1
+  %a4  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 4 ), align 1
+  %a5  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 5 ), align 1
+  %a6  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 6 ), align 1
+  %a7  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 7 ), align 1
+  %a8  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 8 ), align 1
+  %a9  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 9 ), align 1
+  %a10 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 10), align 1
+  %a11 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 11), align 1
+  %a12 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 12), align 1
+  %a13 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 13), align 1
+  %a14 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 14), align 1
+  %a15 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 15), align 1
+  %a16 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 16), align 1
+  %a17 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 17), align 1
+  %a18 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 18), align 1
+  %a19 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 19), align 1
+  %a20 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 20), align 1
+  %a21 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 21), align 1
+  %a22 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 22), align 1
+  %a23 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 23), align 1
+  %a24 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 24), align 1
+  %a25 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 25), align 1
+  %a26 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 26), align 1
+  %a27 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 27), align 1
+  %a28 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 28), align 1
+  %a29 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 29), align 1
+  %a30 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 30), align 1
+  %a31 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 31), align 1
+  %a32 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 32), align 1
+  %a33 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 33), align 1
+  %a34 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 34), align 1
+  %a35 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 35), align 1
+  %a36 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 36), align 1
+  %a37 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 37), align 1
+  %a38 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 38), align 1
+  %a39 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 39), align 1
+  %a40 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 40), align 1
+  %a41 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 41), align 1
+  %a42 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 42), align 1
+  %a43 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 43), align 1
+  %a44 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 44), align 1
+  %a45 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 45), align 1
+  %a46 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 46), align 1
+  %a47 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 47), align 1
+  %a48 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 48), align 1
+  %a49 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 49), align 1
+  %a50 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 50), align 1
+  %a51 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 51), align 1
+  %a52 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 52), align 1
+  %a53 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 53), align 1
+  %a54 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 54), align 1
+  %a55 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 55), align 1
+  %a56 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 56), align 1
+  %a57 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 57), align 1
+  %a58 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 58), align 1
+  %a59 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 59), align 1
+  %a60 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 60), align 1
+  %a61 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 61), align 1
+  %a62 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 62), align 1
+  %a63 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @a8, i32 0, i64 63), align 1
+  %b0  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 0 ), align 1
+  %b1  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 1 ), align 1
+  %b2  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 2 ), align 1
+  %b3  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 3 ), align 1
+  %b4  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 4 ), align 1
+  %b5  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 5 ), align 1
+  %b6  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 6 ), align 1
+  %b7  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 7 ), align 1
+  %b8  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 8 ), align 1
+  %b9  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 9 ), align 1
+  %b10 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 10), align 1
+  %b11 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 11), align 1
+  %b12 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 12), align 1
+  %b13 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 13), align 1
+  %b14 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 14), align 1
+  %b15 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 15), align 1
+  %b16 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 16), align 1
+  %b17 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 17), align 1
+  %b18 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 18), align 1
+  %b19 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 19), align 1
+  %b20 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 20), align 1
+  %b21 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 21), align 1
+  %b22 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 22), align 1
+  %b23 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 23), align 1
+  %b24 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 24), align 1
+  %b25 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 25), align 1
+  %b26 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 26), align 1
+  %b27 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 27), align 1
+  %b28 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 28), align 1
+  %b29 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 29), align 1
+  %b30 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 30), align 1
+  %b31 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 31), align 1
+  %b32 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 32), align 1
+  %b33 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 33), align 1
+  %b34 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 34), align 1
+  %b35 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 35), align 1
+  %b36 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 36), align 1
+  %b37 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 37), align 1
+  %b38 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 38), align 1
+  %b39 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 39), align 1
+  %b40 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 40), align 1
+  %b41 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 41), align 1
+  %b42 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 42), align 1
+  %b43 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 43), align 1
+  %b44 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 44), align 1
+  %b45 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 45), align 1
+  %b46 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 46), align 1
+  %b47 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 47), align 1
+  %b48 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 48), align 1
+  %b49 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 49), align 1
+  %b50 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 50), align 1
+  %b51 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 51), align 1
+  %b52 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 52), align 1
+  %b53 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 53), align 1
+  %b54 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 54), align 1
+  %b55 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 55), align 1
+  %b56 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 56), align 1
+  %b57 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 57), align 1
+  %b58 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 58), align 1
+  %b59 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 59), align 1
+  %b60 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 60), align 1
+  %b61 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 61), align 1
+  %b62 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 62), align 1
+  %b63 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @b8, i32 0, i64 63), align 1
+  %c0  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 0 ), align 1
+  %c1  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 1 ), align 1
+  %c2  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 2 ), align 1
+  %c3  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 3 ), align 1
+  %c4  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 4 ), align 1
+  %c5  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 5 ), align 1
+  %c6  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 6 ), align 1
+  %c7  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 7 ), align 1
+  %c8  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 8 ), align 1
+  %c9  = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 9 ), align 1
+  %c10 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 10), align 1
+  %c11 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 11), align 1
+  %c12 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 12), align 1
+  %c13 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 13), align 1
+  %c14 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 14), align 1
+  %c15 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 15), align 1
+  %c16 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 16), align 1
+  %c17 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 17), align 1
+  %c18 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 18), align 1
+  %c19 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 19), align 1
+  %c20 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 20), align 1
+  %c21 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 21), align 1
+  %c22 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 22), align 1
+  %c23 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 23), align 1
+  %c24 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 24), align 1
+  %c25 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 25), align 1
+  %c26 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 26), align 1
+  %c27 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 27), align 1
+  %c28 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 28), align 1
+  %c29 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 29), align 1
+  %c30 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 30), align 1
+  %c31 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 31), align 1
+  %c32 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 32), align 1
+  %c33 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 33), align 1
+  %c34 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 34), align 1
+  %c35 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 35), align 1
+  %c36 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 36), align 1
+  %c37 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 37), align 1
+  %c38 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 38), align 1
+  %c39 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 39), align 1
+  %c40 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 40), align 1
+  %c41 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 41), align 1
+  %c42 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 42), align 1
+  %c43 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 43), align 1
+  %c44 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 44), align 1
+  %c45 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 45), align 1
+  %c46 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 46), align 1
+  %c47 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 47), align 1
+  %c48 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 48), align 1
+  %c49 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 49), align 1
+  %c50 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 50), align 1
+  %c51 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 51), align 1
+  %c52 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 52), align 1
+  %c53 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 53), align 1
+  %c54 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 54), align 1
+  %c55 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 55), align 1
+  %c56 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 56), align 1
+  %c57 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 57), align 1
+  %c58 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 58), align 1
+  %c59 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 59), align 1
+  %c60 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 60), align 1
+  %c61 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 61), align 1
+  %c62 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 62), align 1
+  %c63 = load i8, ptr getelementptr inbounds ([64 x i8], ptr @c8, i32 0, i64 63), align 1
+  %r0  = call i8 @llvm.fshr.i8(i8 %a0 , i8 %b0 , i8 %c0 )
+  %r1  = call i8 @llvm.fshr.i8(i8 %a1 , i8 %b1 , i8 %c1 )
+  %r2  = call i8 @llvm.fshr.i8(i8 %a2 , i8 %b2 , i8 %c2 )
+  %r3  = call i8 @llvm.fshr.i8(i8 %a3 , i8 %b3 , i8 %c3 )
+  %r4  = call i8 @llvm.fshr.i8(i8 %a4 , i8 %b4 , i8 %c4 )
+  %r5  = call i8 @llvm.fshr.i8(i8 %a5 , i8 %b5 , i8 %c5 )
+  %r6  = call i8 @llvm.fshr.i8(i8 %a6 , i8 %b6 , i8 %c6 )
+  %r7  = call i8 @llvm.fshr.i8(i8 %a7 , i8 %b7 , i8 %c7 )
+  %r8  = call i8 @llvm.fshr.i8(i8 %a8 , i8 %b8 , i8 %c8 )
+  %r9  = call i8 @llvm.fshr.i8(i8 %a9 , i8 %b9 , i8 %c9 )
+  %r10 = call i8 @llvm.fshr.i8(i8 %a10, i8 %b10, i8 %c10)
+  %r11 = call i8 @llvm.fshr.i8(i8 %a11, i8 %b11, i8 %c11)
+  %r12 = call i8 @llvm.fshr.i8(i8 %a12, i8 %b12, i8 %c12)
+  %r13 = call i8 @llvm.fshr.i8(i8 %a13, i8 %b13, i8 %c13)
+  %r14 = call i8 @llvm.fshr.i8(i8 %a14, i8 %b14, i8 %c14)
+  %r15 = call i8 @llvm.fshr.i8(i8 %a15, i8 %b15, i8 %c15)
+  %r16 = call i8 @llvm.fshr.i8(i8 %a16, i8 %b16, i8 %c16)
+  %r17 = call i8 @llvm.fshr.i8(i8 %a17, i8 %b17, i8 %c17)
+  %r18 = call i8 @llvm.fshr.i8(i8 %a18, i8 %b18, i8 %c18)
+  %r19 = call i8 @llvm.fshr.i8(i8 %a19, i8 %b19, i8 %c19)
+  %r20 = call i8 @llvm.fshr.i8(i8 %a20, i8 %b20, i8 %c20)
+  %r21 = call i8 @llvm.fshr.i8(i8 %a21, i8 %b21, i8 %c21)
+  %r22 = call i8 @llvm.fshr.i8(i8 %a22, i8 %b22, i8 %c22)
+  %r23 = call i8 @llvm.fshr.i8(i8 %a23, i8 %b23, i8 %c23)
+  %r24 = call i8 @llvm.fshr.i8(i8 %a24, i8 %b24, i8 %c24)
+  %r25 = call i8 @llvm.fshr.i8(i8 %a25, i8 %b25, i8 %c25)
+  %r26 = call i8 @llvm.fshr.i8(i8 %a26, i8 %b26, i8 %c26)
+  %r27 = call i8 @llvm.fshr.i8(i8 %a27, i8 %b27, i8 %c27)
+  %r28 = call i8 @llvm.fshr.i8(i8 %a28, i8 %b28, i8 %c28)
+  %r29 = call i8 @llvm.fshr.i8(i8 %a29, i8 %b29, i8 %c29)
+  %r30 = call i8 @llvm.fshr.i8(i8 %a30, i8 %b30, i8 %c30)
+  %r31 = call i8 @llvm.fshr.i8(i8 %a31, i8 %b31, i8 %c31)
+  %r32 = call i8 @llvm.fshr.i8(i8 %a32, i8 %b32, i8 %c32)
+  %r33 = call i8 @llvm.fshr.i8(i8 %a33, i8 %b33, i8 %c33)
+  %r34 = call i8 @llvm.fshr.i8(i8 %a34, i8 %b34, i8 %c34)
+  %r35 = call i8 @llvm.fshr.i8(i8 %a35, i8 %b35, i8 %c35)
+  %r36 = call i8 @llvm.fshr.i8(i8 %a36, i8 %b36, i8 %c36)
+  %r37 = call i8 @llvm.fshr.i8(i8 %a37, i8 %b37, i8 %c37)
+  %r38 = call i8 @llvm.fshr.i8(i8 %a38, i8 %b38, i8 %c38)
+  %r39 = call i8 @llvm.fshr.i8(i8 %a39, i8 %b39, i8 %c39)
+  %r40 = call i8 @llvm.fshr.i8(i8 %a40, i8 %b40, i8 %c40)
+  %r41 = call i8 @llvm.fshr.i8(i8 %a41, i8 %b41, i8 %c41)
+  %r42 = call i8 @llvm.fshr.i8(i8 %a42, i8 %b42, i8 %c42)
+  %r43 = call i8 @llvm.fshr.i8(i8 %a43, i8 %b43, i8 %c43)
+  %r44 = call i8 @llvm.fshr.i8(i8 %a44, i8 %b44, i8 %c44)
+  %r45 = call i8 @llvm.fshr.i8(i8 %a45, i8 %b45, i8 %c45)
+  %r46 = call i8 @llvm.fshr.i8(i8 %a46, i8 %b46, i8 %c46)
+  %r47 = call i8 @llvm.fshr.i8(i8 %a47, i8 %b47, i8 %c47)
+  %r48 = call i8 @llvm.fshr.i8(i8 %a48, i8 %b48, i8 %c48)
+  %r49 = call i8 @llvm.fshr.i8(i8 %a49, i8 %b49, i8 %c49)
+  %r50 = call i8 @llvm.fshr.i8(i8 %a50, i8 %b50, i8 %c50)
+  %r51 = call i8 @llvm.fshr.i8(i8 %a51, i8 %b51, i8 %c51)
+  %r52 = call i8 @llvm.fshr.i8(i8 %a52, i8 %b52, i8 %c52)
+  %r53 = call i8 @llvm.fshr.i8(i8 %a53, i8 %b53, i8 %c53)
+  %r54 = call i8 @llvm.fshr.i8(i8 %a54, i8 %b54, i8 %c54)
+  %r55 = call i8 @llvm.fshr.i8(i8 %a55, i8 %b55, i8 %c55)
+  %r56 = call i8 @llvm.fshr.i8(i8 %a56, i8 %b56, i8 %c56)
+  %r57 = call i8 @llvm.fshr.i8(i8 %a57, i8 %b57, i8 %c57)
+  %r58 = call i8 @llvm.fshr.i8(i8 %a58, i8 %b58, i8 %c58)
+  %r59 = call i8 @llvm.fshr.i8(i8 %a59, i8 %b59, i8 %c59)
+  %r60 = call i8 @llvm.fshr.i8(i8 %a60, i8 %b60, i8 %c60)
+  %r61 = call i8 @llvm.fshr.i8(i8 %a61, i8 %b61, i8 %c61)
+  %r62 = call i8 @llvm.fshr.i8(i8 %a62, i8 %b62, i8 %c62)
+  %r63 = call i8 @llvm.fshr.i8(i8 %a63, i8 %b63, i8 %c63)
+  store i8 %r0 , ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 0 ), align 1
+  store i8 %r1 , ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 1 ), align 1
+  store i8 %r2 , ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 2 ), align 1
+  store i8 %r3 , ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 3 ), align 1
+  store i8 %r4 , ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 4 ), align 1
+  store i8 %r5 , ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 5 ), align 1
+  store i8 %r6 , ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 6 ), align 1
+  store i8 %r7 , ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 7 ), align 1
+  store i8 %r8 , ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 8 ), align 1
+  store i8 %r9 , ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 9 ), align 1
+  store i8 %r10, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 10), align 1
+  store i8 %r11, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 11), align 1
+  store i8 %r12, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 12), align 1
+  store i8 %r13, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 13), align 1
+  store i8 %r14, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 14), align 1
+  store i8 %r15, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 15), align 1
+  store i8 %r16, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 16), align 1
+  store i8 %r17, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 17), align 1
+  store i8 %r18, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 18), align 1
+  store i8 %r19, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 19), align 1
+  store i8 %r20, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 20), align 1
+  store i8 %r21, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 21), align 1
+  store i8 %r22, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 22), align 1
+  store i8 %r23, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 23), align 1
+  store i8 %r24, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 24), align 1
+  store i8 %r25, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 25), align 1
+  store i8 %r26, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 26), align 1
+  store i8 %r27, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 27), align 1
+  store i8 %r28, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 28), align 1
+  store i8 %r29, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 29), align 1
+  store i8 %r30, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 30), align 1
+  store i8 %r31, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 31), align 1
+  store i8 %r32, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 32), align 1
+  store i8 %r33, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 33), align 1
+  store i8 %r34, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 34), align 1
+  store i8 %r35, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 35), align 1
+  store i8 %r36, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 36), align 1
+  store i8 %r37, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 37), align 1
+  store i8 %r38, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 38), align 1
+  store i8 %r39, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 39), align 1
+  store i8 %r40, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 40), align 1
+  store i8 %r41, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 41), align 1
+  store i8 %r42, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 42), align 1
+  store i8 %r43, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 43), align 1
+  store i8 %r44, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 44), align 1
+  store i8 %r45, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 45), align 1
+  store i8 %r46, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 46), align 1
+  store i8 %r47, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 47), align 1
+  store i8 %r48, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 48), align 1
+  store i8 %r49, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 49), align 1
+  store i8 %r50, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 50), align 1
+  store i8 %r51, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 51), align 1
+  store i8 %r52, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 52), align 1
+  store i8 %r53, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 53), align 1
+  store i8 %r54, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 54), align 1
+  store i8 %r55, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 55), align 1
+  store i8 %r56, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 56), align 1
+  store i8 %r57, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 57), align 1
+  store i8 %r58, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 58), align 1
+  store i8 %r59, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 59), align 1
+  store i8 %r60, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 60), align 1
+  store i8 %r61, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 61), align 1
+  store i8 %r62, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 62), align 1
+  store i8 %r63, ptr getelementptr inbounds ([64 x i8], ptr @d8, i32 0, i64 63), align 1
+  ret void
+}


        


More information about the llvm-commits mailing list