[llvm] a10ac65 - [AArch64] Extend load insert into zero patterns to SVE.

David Green via llvm-commits llvm-commits at lists.llvm.org
Mon Mar 6 15:26:14 PST 2023


Author: David Green
Date: 2023-03-06T23:26:08Z
New Revision: a10ac6554db4dee93232139d5c29f8d91ee01f3b

URL: https://github.com/llvm/llvm-project/commit/a10ac6554db4dee93232139d5c29f8d91ee01f3b
DIFF: https://github.com/llvm/llvm-project/commit/a10ac6554db4dee93232139d5c29f8d91ee01f3b.diff

LOG: [AArch64] Extend load insert into zero patterns to SVE.

This extends the patterns for loading into the zeroth lane of a zero vector
from D144086 to SVE, which work in the same way as the existing patterns. Only
full length vectors are added here, not the narrower floating point vector
types.

Added: 
    

Modified: 
    llvm/lib/Target/AArch64/AArch64InstrInfo.td
    llvm/test/CodeGen/AArch64/load-insert-zero.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
index c2ab263551b8..c3fa7df8d2d6 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
@@ -3326,8 +3326,8 @@ def : InstAlias<"ldrsw $Rt, [$Rn, $offset]",
 
 // A LDR will implicitly zero the rest of the vector, so vector_insert(zeros,
 // load, 0) can use a single load.
-multiclass LoadInsertZeroPatterns<SDPatternOperator LoadOp, ValueType VT, ValueType HVT, ValueType ScalarVT,
-                                  Instruction LoadInst, Instruction UnscaledLoadInst,
+multiclass LoadInsertZeroPatterns<SDPatternOperator LoadOp, ValueType VT, ValueType HVT, ValueType SVT,
+                                  ValueType ScalarVT, Instruction LoadInst, Instruction UnscaledLoadInst,
                                   ComplexPattern Addr, ComplexPattern UnscaledAddr, Operand AddrImm,
                                   SubRegIndex SubReg> {
   // Scaled
@@ -3347,23 +3347,32 @@ multiclass LoadInsertZeroPatterns<SDPatternOperator LoadOp, ValueType VT, ValueT
   def : Pat <(vector_insert (HVT immAllZerosV),
                  (ScalarVT (LoadOp (UnscaledAddr GPR64sp:$Rn, simm9:$offset))), (i64 0)),
              (SUBREG_TO_REG (i64 0), (UnscaledLoadInst GPR64sp:$Rn, simm9:$offset), SubReg)>;
+
+  // SVE patterns
+  def : Pat <(vector_insert (SVT immAllZerosV),
+                 (ScalarVT (LoadOp (Addr GPR64sp:$Rn, AddrImm:$offset))), (i64 0)),
+             (SUBREG_TO_REG (i64 0), (LoadInst GPR64sp:$Rn, AddrImm:$offset), SubReg)>;
+  // Unscaled
+  def : Pat <(vector_insert (SVT immAllZerosV),
+                 (ScalarVT (LoadOp (UnscaledAddr GPR64sp:$Rn, simm9:$offset))), (i64 0)),
+             (SUBREG_TO_REG (i64 0), (UnscaledLoadInst GPR64sp:$Rn, simm9:$offset), SubReg)>;
 }
 
-defm : LoadInsertZeroPatterns<extloadi8,  v16i8,  v8i8,   i32,  LDRBui, LDRBui,
+defm : LoadInsertZeroPatterns<extloadi8,  v16i8,  v8i8,   nxv16i8,  i32,  LDRBui, LDRBui,
                               am_indexed8,  am_unscaled8,  uimm12s1, bsub>;
-defm : LoadInsertZeroPatterns<extloadi16, v8i16,  v4i16,  i32,  LDRHui, LDURHi,
+defm : LoadInsertZeroPatterns<extloadi16, v8i16,  v4i16,  nxv8i16,  i32,  LDRHui, LDURHi,
                               am_indexed16, am_unscaled16, uimm12s2, hsub>;
-defm : LoadInsertZeroPatterns<load,       v4i32,  v2i32,  i32,  LDRSui, LDURSi,
+defm : LoadInsertZeroPatterns<load,       v4i32,  v2i32,  nxv4i32,  i32,  LDRSui, LDURSi,
                               am_indexed32, am_unscaled32, uimm12s4, ssub>;
-defm : LoadInsertZeroPatterns<load,       v2i64,  v1i64,  i64,  LDRDui, LDURDi,
+defm : LoadInsertZeroPatterns<load,       v2i64,  v1i64,  nxv2i64,  i64,  LDRDui, LDURDi,
                               am_indexed64, am_unscaled64, uimm12s8, dsub>;
-defm : LoadInsertZeroPatterns<load,       v8f16,  v4f16,  f16,  LDRHui, LDURHi,
+defm : LoadInsertZeroPatterns<load,       v8f16,  v4f16,  nxv8f16,  f16,  LDRHui, LDURHi,
                               am_indexed16, am_unscaled16, uimm12s2, hsub>;
-defm : LoadInsertZeroPatterns<load,       v8bf16, v4bf16, bf16, LDRHui, LDURHi,
+defm : LoadInsertZeroPatterns<load,       v8bf16, v4bf16, nxv8bf16, bf16, LDRHui, LDURHi,
                               am_indexed16, am_unscaled16, uimm12s2, hsub>;
-defm : LoadInsertZeroPatterns<load,       v4f32,  v2f32,  f32,  LDRSui, LDURSi,
+defm : LoadInsertZeroPatterns<load,       v4f32,  v2f32,  nxv4f32,  f32,  LDRSui, LDURSi,
                               am_indexed32, am_unscaled32, uimm12s4, ssub>;
-defm : LoadInsertZeroPatterns<load,       v2f64,  v1f64,  f64,  LDRDui, LDURDi,
+defm : LoadInsertZeroPatterns<load,       v2f64,  v1f64,  nxv2f64,  f64,  LDRDui, LDURDi,
                               am_indexed64, am_unscaled64, uimm12s8, dsub>;
 
 // Pre-fetch.

diff  --git a/llvm/test/CodeGen/AArch64/load-insert-zero.ll b/llvm/test/CodeGen/AArch64/load-insert-zero.ll
index 79eaf2c5b07f..1adbe69c76f9 100644
--- a/llvm/test/CodeGen/AArch64/load-insert-zero.ll
+++ b/llvm/test/CodeGen/AArch64/load-insert-zero.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=aarch64-none-eabi -mattr=+fullfp16,+bf16 | FileCheck %s
+; RUN: llc < %s -mtriple=aarch64-none-eabi -mattr=+fullfp16,+bf16,+sve | FileCheck %s
 
 define <8 x i8> @loadv8i8(ptr %p) {
 ; CHECK-LABEL: loadv8i8:
@@ -448,5 +448,366 @@ define void @predictor_4x4_neon_new(ptr nocapture noundef writeonly %0, i64 noun
   ret void
 }
 
+
+define <vscale x 8 x i8> @loadnxv8i8(ptr %p) {
+; CHECK-LABEL: loadnxv8i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldrb w8, [x0]
+; CHECK-NEXT:    ptrue p0.h, vl1
+; CHECK-NEXT:    mov z0.h, #0 // =0x0
+; CHECK-NEXT:    mov z0.h, p0/m, w8
+; CHECK-NEXT:    ret
+  %l = load i8, ptr %p
+  %v = insertelement <vscale x 8 x i8> zeroinitializer, i8 %l, i32 0
+  ret <vscale x 8 x i8> %v
+}
+
+define <vscale x 16 x i8> @loadnxv16i8(ptr %p) {
+; CHECK-LABEL: loadnxv16i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr b0, [x0]
+; CHECK-NEXT:    ret
+  %l = load i8, ptr %p
+  %v = insertelement <vscale x 16 x i8> zeroinitializer, i8 %l, i32 0
+  ret <vscale x 16 x i8> %v
+}
+
+define <vscale x 4 x i16> @loadnxv4i16(ptr %p) {
+; CHECK-LABEL: loadnxv4i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldrh w8, [x0]
+; CHECK-NEXT:    ptrue p0.s, vl1
+; CHECK-NEXT:    mov z0.s, #0 // =0x0
+; CHECK-NEXT:    mov z0.s, p0/m, w8
+; CHECK-NEXT:    ret
+  %l = load i16, ptr %p
+  %v = insertelement <vscale x 4 x i16> zeroinitializer, i16 %l, i32 0
+  ret <vscale x 4 x i16> %v
+}
+
+define <vscale x 8 x i16> @loadnxv8i16(ptr %p) {
+; CHECK-LABEL: loadnxv8i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr h0, [x0]
+; CHECK-NEXT:    ret
+  %l = load i16, ptr %p
+  %v = insertelement <vscale x 8 x i16> zeroinitializer, i16 %l, i32 0
+  ret <vscale x 8 x i16> %v
+}
+
+define <vscale x 2 x i32> @loadnxv2i32(ptr %p) {
+; CHECK-LABEL: loadnxv2i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr w8, [x0]
+; CHECK-NEXT:    ptrue p0.d, vl1
+; CHECK-NEXT:    mov z0.d, #0 // =0x0
+; CHECK-NEXT:    mov z0.d, p0/m, x8
+; CHECK-NEXT:    ret
+  %l = load i32, ptr %p
+  %v = insertelement <vscale x 2 x i32> zeroinitializer, i32 %l, i32 0
+  ret <vscale x 2 x i32> %v
+}
+
+define <vscale x 4 x i32> @loadnxv4i32(ptr %p) {
+; CHECK-LABEL: loadnxv4i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr s0, [x0]
+; CHECK-NEXT:    ret
+  %l = load i32, ptr %p
+  %v = insertelement <vscale x 4 x i32> zeroinitializer, i32 %l, i32 0
+  ret <vscale x 4 x i32> %v
+}
+
+define <vscale x 2 x i64> @loadnxv2i64(ptr %p) {
+; CHECK-LABEL: loadnxv2i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ret
+  %l = load i64, ptr %p
+  %v = insertelement <vscale x 2 x i64> zeroinitializer, i64 %l, i32 0
+  ret <vscale x 2 x i64> %v
+}
+
+
+define <vscale x 4 x half> @loadnxv4f16(ptr %p) {
+; CHECK-LABEL: loadnxv4f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov w8, wzr
+; CHECK-NEXT:    ldr h1, [x0]
+; CHECK-NEXT:    index z0.s, #0, #1
+; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    mov z2.s, w8
+; CHECK-NEXT:    cmpeq p0.s, p0/z, z0.s, z2.s
+; CHECK-NEXT:    mov z0.h, #0 // =0x0
+; CHECK-NEXT:    mov z0.h, p0/m, h1
+; CHECK-NEXT:    ret
+  %l = load half, ptr %p
+  %v = insertelement <vscale x 4 x half> zeroinitializer, half %l, i32 0
+  ret <vscale x 4 x half> %v
+}
+
+define <vscale x 8 x half> @loadnxv8f16(ptr %p) {
+; CHECK-LABEL: loadnxv8f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr h0, [x0]
+; CHECK-NEXT:    ret
+  %l = load half, ptr %p
+  %v = insertelement <vscale x 8 x half> zeroinitializer, half %l, i32 0
+  ret <vscale x 8 x half> %v
+}
+
+define <vscale x 4 x bfloat> @loadnxv4bf16(ptr %p) {
+; CHECK-LABEL: loadnxv4bf16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov w8, wzr
+; CHECK-NEXT:    ldr h1, [x0]
+; CHECK-NEXT:    index z0.s, #0, #1
+; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    mov z2.s, w8
+; CHECK-NEXT:    cmpeq p0.s, p0/z, z0.s, z2.s
+; CHECK-NEXT:    mov z0.h, #0 // =0x0
+; CHECK-NEXT:    mov z0.h, p0/m, h1
+; CHECK-NEXT:    ret
+  %l = load bfloat, ptr %p
+  %v = insertelement <vscale x 4 x bfloat> zeroinitializer, bfloat %l, i32 0
+  ret <vscale x 4 x bfloat> %v
+}
+
+define <vscale x 8 x bfloat> @loadnxv8bf16(ptr %p) {
+; CHECK-LABEL: loadnxv8bf16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr h0, [x0]
+; CHECK-NEXT:    ret
+  %l = load bfloat, ptr %p
+  %v = insertelement <vscale x 8 x bfloat> zeroinitializer, bfloat %l, i32 0
+  ret <vscale x 8 x bfloat> %v
+}
+
+define <vscale x 2 x float> @loadnxv2f32(ptr %p) {
+; CHECK-LABEL: loadnxv2f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov x8, xzr
+; CHECK-NEXT:    ldr s1, [x0]
+; CHECK-NEXT:    index z0.d, #0, #1
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    mov z2.d, x8
+; CHECK-NEXT:    cmpeq p0.d, p0/z, z0.d, z2.d
+; CHECK-NEXT:    mov z0.s, #0 // =0x0
+; CHECK-NEXT:    mov z0.s, p0/m, s1
+; CHECK-NEXT:    ret
+  %l = load float, ptr %p
+  %v = insertelement <vscale x 2 x float> zeroinitializer, float %l, i32 0
+  ret <vscale x 2 x float> %v
+}
+
+define <vscale x 4 x float> @loadnxv4f32(ptr %p) {
+; CHECK-LABEL: loadnxv4f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr s0, [x0]
+; CHECK-NEXT:    ret
+  %l = load float, ptr %p
+  %v = insertelement <vscale x 4 x float> zeroinitializer, float %l, i32 0
+  ret <vscale x 4 x float> %v
+}
+
+define <vscale x 2 x double> @loadnxv2f64(ptr %p) {
+; CHECK-LABEL: loadnxv2f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ret
+  %l = load double, ptr %p
+  %v = insertelement <vscale x 2 x double> zeroinitializer, double %l, i32 0
+  ret <vscale x 2 x double> %v
+}
+
+
+; Unscaled
+
+define <vscale x 8 x i8> @loadnxv8i8_offset(ptr %p) {
+; CHECK-LABEL: loadnxv8i8_offset:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldrb w8, [x0, #1]
+; CHECK-NEXT:    ptrue p0.h, vl1
+; CHECK-NEXT:    mov z0.h, #0 // =0x0
+; CHECK-NEXT:    mov z0.h, p0/m, w8
+; CHECK-NEXT:    ret
+  %g = getelementptr inbounds i8, ptr %p, i64 1
+  %l = load i8, ptr %g
+  %v = insertelement <vscale x 8 x i8> zeroinitializer, i8 %l, i32 0
+  ret <vscale x 8 x i8> %v
+}
+
+define <vscale x 16 x i8> @loadnxv16i8_offset(ptr %p) {
+; CHECK-LABEL: loadnxv16i8_offset:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr b0, [x0, #1]
+; CHECK-NEXT:    ret
+  %g = getelementptr inbounds i8, ptr %p, i64 1
+  %l = load i8, ptr %g
+  %v = insertelement <vscale x 16 x i8> zeroinitializer, i8 %l, i32 0
+  ret <vscale x 16 x i8> %v
+}
+
+define <vscale x 4 x i16> @loadnxv4i16_offset(ptr %p) {
+; CHECK-LABEL: loadnxv4i16_offset:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldurh w8, [x0, #1]
+; CHECK-NEXT:    ptrue p0.s, vl1
+; CHECK-NEXT:    mov z0.s, #0 // =0x0
+; CHECK-NEXT:    mov z0.s, p0/m, w8
+; CHECK-NEXT:    ret
+  %g = getelementptr inbounds i8, ptr %p, i64 1
+  %l = load i16, ptr %g
+  %v = insertelement <vscale x 4 x i16> zeroinitializer, i16 %l, i32 0
+  ret <vscale x 4 x i16> %v
+}
+
+define <vscale x 8 x i16> @loadnxv8i16_offset(ptr %p) {
+; CHECK-LABEL: loadnxv8i16_offset:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldur h0, [x0, #1]
+; CHECK-NEXT:    ret
+  %g = getelementptr inbounds i8, ptr %p, i64 1
+  %l = load i16, ptr %g
+  %v = insertelement <vscale x 8 x i16> zeroinitializer, i16 %l, i32 0
+  ret <vscale x 8 x i16> %v
+}
+
+define <vscale x 2 x i32> @loadnxv2i32_offset(ptr %p) {
+; CHECK-LABEL: loadnxv2i32_offset:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldur w8, [x0, #1]
+; CHECK-NEXT:    ptrue p0.d, vl1
+; CHECK-NEXT:    mov z0.d, #0 // =0x0
+; CHECK-NEXT:    mov z0.d, p0/m, x8
+; CHECK-NEXT:    ret
+  %g = getelementptr inbounds i8, ptr %p, i64 1
+  %l = load i32, ptr %g
+  %v = insertelement <vscale x 2 x i32> zeroinitializer, i32 %l, i32 0
+  ret <vscale x 2 x i32> %v
+}
+
+define <vscale x 4 x i32> @loadnxv4i32_offset(ptr %p) {
+; CHECK-LABEL: loadnxv4i32_offset:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldur s0, [x0, #1]
+; CHECK-NEXT:    ret
+  %g = getelementptr inbounds i8, ptr %p, i64 1
+  %l = load i32, ptr %g
+  %v = insertelement <vscale x 4 x i32> zeroinitializer, i32 %l, i32 0
+  ret <vscale x 4 x i32> %v
+}
+
+define <vscale x 2 x i64> @loadnxv2i64_offset(ptr %p) {
+; CHECK-LABEL: loadnxv2i64_offset:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldur d0, [x0, #1]
+; CHECK-NEXT:    ret
+  %g = getelementptr inbounds i8, ptr %p, i64 1
+  %l = load i64, ptr %g
+  %v = insertelement <vscale x 2 x i64> zeroinitializer, i64 %l, i32 0
+  ret <vscale x 2 x i64> %v
+}
+
+
+define <vscale x 4 x half> @loadnxv4f16_offset(ptr %p) {
+; CHECK-LABEL: loadnxv4f16_offset:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov w8, wzr
+; CHECK-NEXT:    ldur h1, [x0, #1]
+; CHECK-NEXT:    index z0.s, #0, #1
+; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    mov z2.s, w8
+; CHECK-NEXT:    cmpeq p0.s, p0/z, z0.s, z2.s
+; CHECK-NEXT:    mov z0.h, #0 // =0x0
+; CHECK-NEXT:    mov z0.h, p0/m, h1
+; CHECK-NEXT:    ret
+  %g = getelementptr inbounds i8, ptr %p, i64 1
+  %l = load half, ptr %g
+  %v = insertelement <vscale x 4 x half> zeroinitializer, half %l, i32 0
+  ret <vscale x 4 x half> %v
+}
+
+define <vscale x 8 x half> @loadnxv8f16_offset(ptr %p) {
+; CHECK-LABEL: loadnxv8f16_offset:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldur h0, [x0, #1]
+; CHECK-NEXT:    ret
+  %g = getelementptr inbounds i8, ptr %p, i64 1
+  %l = load half, ptr %g
+  %v = insertelement <vscale x 8 x half> zeroinitializer, half %l, i32 0
+  ret <vscale x 8 x half> %v
+}
+
+define <vscale x 4 x bfloat> @loadnxv4bf16_offset(ptr %p) {
+; CHECK-LABEL: loadnxv4bf16_offset:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov w8, wzr
+; CHECK-NEXT:    ldur h1, [x0, #1]
+; CHECK-NEXT:    index z0.s, #0, #1
+; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    mov z2.s, w8
+; CHECK-NEXT:    cmpeq p0.s, p0/z, z0.s, z2.s
+; CHECK-NEXT:    mov z0.h, #0 // =0x0
+; CHECK-NEXT:    mov z0.h, p0/m, h1
+; CHECK-NEXT:    ret
+  %g = getelementptr inbounds i8, ptr %p, i64 1
+  %l = load bfloat, ptr %g
+  %v = insertelement <vscale x 4 x bfloat> zeroinitializer, bfloat %l, i32 0
+  ret <vscale x 4 x bfloat> %v
+}
+
+define <vscale x 8 x bfloat> @loadnxv8bf16_offset(ptr %p) {
+; CHECK-LABEL: loadnxv8bf16_offset:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldur h0, [x0, #1]
+; CHECK-NEXT:    ret
+  %g = getelementptr inbounds i8, ptr %p, i64 1
+  %l = load bfloat, ptr %g
+  %v = insertelement <vscale x 8 x bfloat> zeroinitializer, bfloat %l, i32 0
+  ret <vscale x 8 x bfloat> %v
+}
+
+define <vscale x 2 x float> @loadnxv2f32_offset(ptr %p) {
+; CHECK-LABEL: loadnxv2f32_offset:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov x8, xzr
+; CHECK-NEXT:    ldur s1, [x0, #1]
+; CHECK-NEXT:    index z0.d, #0, #1
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    mov z2.d, x8
+; CHECK-NEXT:    cmpeq p0.d, p0/z, z0.d, z2.d
+; CHECK-NEXT:    mov z0.s, #0 // =0x0
+; CHECK-NEXT:    mov z0.s, p0/m, s1
+; CHECK-NEXT:    ret
+  %g = getelementptr inbounds i8, ptr %p, i64 1
+  %l = load float, ptr %g
+  %v = insertelement <vscale x 2 x float> zeroinitializer, float %l, i32 0
+  ret <vscale x 2 x float> %v
+}
+
+define <vscale x 4 x float> @loadnxv4f32_offset(ptr %p) {
+; CHECK-LABEL: loadnxv4f32_offset:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldur s0, [x0, #1]
+; CHECK-NEXT:    ret
+  %g = getelementptr inbounds i8, ptr %p, i64 1
+  %l = load float, ptr %g
+  %v = insertelement <vscale x 4 x float> zeroinitializer, float %l, i32 0
+  ret <vscale x 4 x float> %v
+}
+
+define <vscale x 2 x double> @loadnxv2f64_offset(ptr %p) {
+; CHECK-LABEL: loadnxv2f64_offset:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldur d0, [x0, #1]
+; CHECK-NEXT:    ret
+  %g = getelementptr inbounds i8, ptr %p, i64 1
+  %l = load double, ptr %g
+  %v = insertelement <vscale x 2 x double> zeroinitializer, double %l, i32 0
+  ret <vscale x 2 x double> %v
+}
+
+
 declare <8 x i8> @llvm.aarch64.neon.rshrn.v8i8(<8 x i16>, i32) #1
 declare <8 x i8> @llvm.aarch64.neon.urhadd.v8i8(<8 x i8>, <8 x i8>) #1


        


More information about the llvm-commits mailing list