[llvm] cmse mitigation (PR #89944)

via llvm-commits llvm-commits at lists.llvm.org
Wed Apr 24 08:59:57 PDT 2024


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->

@llvm/pr-subscribers-backend-arm

Author: Lucas Duarte Prates (pratlucas)

<details>
<summary>Changes</summary>

- [ARM] CMSE security mitigation on function arguments
- [ARM] CMSE security mitigation on returned values


---

Patch is 28.48 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/89944.diff


4 Files Affected:

- (modified) llvm/lib/Target/ARM/ARMISelLowering.cpp (+32-4) 
- (modified) llvm/lib/Target/ARM/ARMISelLowering.h (+1-1) 
- (added) llvm/test/CodeGen/ARM/cmse-harden-call-returned-values.ll (+449) 
- (added) llvm/test/CodeGen/ARM/cmse-harden-entry-arguments.ll (+235) 


``````````diff
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index d0e9f61c0bd122..f1594071cdfe50 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -156,6 +156,17 @@ static const MCPhysReg GPRArgRegs[] = {
   ARM::R0, ARM::R1, ARM::R2, ARM::R3
 };
 
+static SDValue handleCMSEValue(const SDValue &Value, const ISD::InputArg &Arg,
+                               SelectionDAG &DAG, const SDLoc &DL, EVT RegVT) {
+  assert(Arg.ArgVT.isScalarInteger() && RegVT.isScalarInteger());
+  assert(Arg.ArgVT.bitsLT(RegVT));
+  SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, Arg.ArgVT, Value);
+  SDValue Ext =
+      DAG.getNode(Arg.Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND, DL,
+                  RegVT, Trunc);
+  return Ext;
+}
+
 void ARMTargetLowering::addTypeForNEON(MVT VT, MVT PromotedLdStVT) {
   if (VT != PromotedLdStVT) {
     setOperationAction(ISD::LOAD, VT, Promote);
@@ -2196,7 +2207,7 @@ SDValue ARMTargetLowering::LowerCallResult(
     SDValue Chain, SDValue InGlue, CallingConv::ID CallConv, bool isVarArg,
     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, bool isThisReturn,
-    SDValue ThisVal) const {
+    SDValue ThisVal, bool isCmseNSCall) const {
   // Assign locations to each value returned by this call.
   SmallVector<CCValAssign, 16> RVLocs;
   CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
@@ -2274,6 +2285,15 @@ SDValue ARMTargetLowering::LowerCallResult(
         (VA.getValVT() == MVT::f16 || VA.getValVT() == MVT::bf16))
       Val = MoveToHPR(dl, DAG, VA.getLocVT(), VA.getValVT(), Val);
 
+    // On CMSE Non-secure Calls, call results (returned values) whose bitwidth
+    // is less than 32 bits must be sign- or zero-extended after the call for
+    // security reasons. Although the ABI mandates an extension done by the
+    // callee, the latter cannot be trusted to follow the rules of the ABI.
+    const ISD::InputArg &Arg = Ins[VA.getValNo()];
+    if (isCmseNSCall && Arg.ArgVT.isScalarInteger() &&
+        VA.getLocVT().isScalarInteger() && Arg.ArgVT.bitsLT(VA.getLocVT()))
+      Val = handleCMSEValue(Val, Arg, DAG, dl, VA.getLocVT());
+
     InVals.push_back(Val);
   }
 
@@ -2886,7 +2906,7 @@ ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
   // return.
   return LowerCallResult(Chain, InGlue, CallConv, isVarArg, Ins, dl, DAG,
                          InVals, isThisReturn,
-                         isThisReturn ? OutVals[0] : SDValue());
+                         isThisReturn ? OutVals[0] : SDValue(), isCmseNSCall);
 }
 
 /// HandleByVal - Every parameter *after* a byval parameter is passed
@@ -4479,8 +4499,6 @@ SDValue ARMTargetLowering::LowerFormalArguments(
                  *DAG.getContext());
   CCInfo.AnalyzeFormalArguments(Ins, CCAssignFnForCall(CallConv, isVarArg));
 
-  SmallVector<SDValue, 16> ArgValues;
-  SDValue ArgValue;
   Function::const_arg_iterator CurOrigArg = MF.getFunction().arg_begin();
   unsigned CurArgIdx = 0;
 
@@ -4535,6 +4553,7 @@ SDValue ARMTargetLowering::LowerFormalArguments(
     // Arguments stored in registers.
     if (VA.isRegLoc()) {
       EVT RegVT = VA.getLocVT();
+      SDValue ArgValue;
 
       if (VA.needsCustom() && VA.getLocVT() == MVT::v2f64) {
         // f64 and vector types are split up into multiple registers or
@@ -4617,6 +4636,15 @@ SDValue ARMTargetLowering::LowerFormalArguments(
           (VA.getValVT() == MVT::f16 || VA.getValVT() == MVT::bf16))
         ArgValue = MoveToHPR(dl, DAG, VA.getLocVT(), VA.getValVT(), ArgValue);
 
+      // On CMSE Entry Functions, formal integer arguments whose bitwidth is
+      // less than 32 bits must be sign- or zero-extended in the callee for
+      // security reasons. Although the ABI mandates an extension done by the
+      // caller, the latter cannot be trusted to follow the rules of the ABI.
+      const ISD::InputArg &Arg = Ins[VA.getValNo()];
+      if (AFI->isCmseNSEntryFunction() && Arg.ArgVT.isScalarInteger() &&
+          RegVT.isScalarInteger() && Arg.ArgVT.bitsLT(RegVT))
+        ArgValue = handleCMSEValue(ArgValue, Arg, DAG, dl, RegVT);
+
       InVals.push_back(ArgValue);
     } else { // VA.isRegLoc()
       // Only arguments passed on the stack should make it here.
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.h b/llvm/lib/Target/ARM/ARMISelLowering.h
index 26ef295e3d3fc3..c9371c128cb7d7 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.h
+++ b/llvm/lib/Target/ARM/ARMISelLowering.h
@@ -895,7 +895,7 @@ class VectorType;
                             const SmallVectorImpl<ISD::InputArg> &Ins,
                             const SDLoc &dl, SelectionDAG &DAG,
                             SmallVectorImpl<SDValue> &InVals, bool isThisReturn,
-                            SDValue ThisVal) const;
+                            SDValue ThisVal, bool isCmseNSCall) const;
 
     bool supportSplitCSR(MachineFunction *MF) const override {
       return MF->getFunction().getCallingConv() == CallingConv::CXX_FAST_TLS &&
diff --git a/llvm/test/CodeGen/ARM/cmse-harden-call-returned-values.ll b/llvm/test/CodeGen/ARM/cmse-harden-call-returned-values.ll
new file mode 100644
index 00000000000000..75f067adbf115b
--- /dev/null
+++ b/llvm/test/CodeGen/ARM/cmse-harden-call-returned-values.ll
@@ -0,0 +1,449 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
+; RUN: llc %s -mtriple=thumbv8m.main     -o - | FileCheck %s --check-prefix V8M
+; RUN: llc %s -mtriple=thumbebv8m.main   -o - | FileCheck %s --check-prefix V8M
+; RUN: llc %s -mtriple=thumbv8.1m.main   -o - | FileCheck %s --check-prefix V81M
+; RUN: llc %s -mtriple=thumbebv8.1m.main -o - | FileCheck %s --check-prefix V81M
+
+ at get_idx = hidden local_unnamed_addr global ptr null, align 4
+ at arr = hidden local_unnamed_addr global [256 x i32] zeroinitializer, align 4
+
+define i32 @access_i16() {
+; V8M-LABEL: access_i16:
+; V8M:       @ %bb.0: @ %entry
+; V8M-NEXT:    push {r7, lr}
+; V8M-NEXT:    movw r0, :lower16:get_idx
+; V8M-NEXT:    movt r0, :upper16:get_idx
+; V8M-NEXT:    ldr r0, [r0]
+; V8M-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, r11}
+; V8M-NEXT:    bic r0, r0, #1
+; V8M-NEXT:    sub sp, #136
+; V8M-NEXT:    vlstm sp
+; V8M-NEXT:    mov r1, r0
+; V8M-NEXT:    mov r2, r0
+; V8M-NEXT:    mov r3, r0
+; V8M-NEXT:    mov r4, r0
+; V8M-NEXT:    mov r5, r0
+; V8M-NEXT:    mov r6, r0
+; V8M-NEXT:    mov r7, r0
+; V8M-NEXT:    mov r8, r0
+; V8M-NEXT:    mov r9, r0
+; V8M-NEXT:    mov r10, r0
+; V8M-NEXT:    mov r11, r0
+; V8M-NEXT:    mov r12, r0
+; V8M-NEXT:    msr apsr_nzcvq, r0
+; V8M-NEXT:    blxns r0
+; V8M-NEXT:    vlldm sp
+; V8M-NEXT:    add sp, #136
+; V8M-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11}
+; V8M-NEXT:    movw r1, :lower16:arr
+; V8M-NEXT:    sxth r0, r0
+; V8M-NEXT:    movt r1, :upper16:arr
+; V8M-NEXT:    ldr.w r0, [r1, r0, lsl #2]
+; V8M-NEXT:    pop {r7, pc}
+;
+; V81M-LABEL: access_i16:
+; V81M:       @ %bb.0: @ %entry
+; V81M-NEXT:    push {r7, lr}
+; V81M-NEXT:    movw r0, :lower16:get_idx
+; V81M-NEXT:    movt r0, :upper16:get_idx
+; V81M-NEXT:    ldr r0, [r0]
+; V81M-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, r11}
+; V81M-NEXT:    bic r0, r0, #1
+; V81M-NEXT:    sub sp, #136
+; V81M-NEXT:    vlstm sp
+; V81M-NEXT:    clrm {r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, apsr}
+; V81M-NEXT:    blxns r0
+; V81M-NEXT:    vlldm sp
+; V81M-NEXT:    add sp, #136
+; V81M-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11}
+; V81M-NEXT:    movw r1, :lower16:arr
+; V81M-NEXT:    sxth r0, r0
+; V81M-NEXT:    movt r1, :upper16:arr
+; V81M-NEXT:    ldr.w r0, [r1, r0, lsl #2]
+; V81M-NEXT:    pop {r7, pc}
+entry:
+  %0 = load ptr, ptr @get_idx, align 4
+  %call = tail call signext i16 %0() "cmse_nonsecure_call"
+  %idxprom = sext i16 %call to i32
+  %arrayidx = getelementptr inbounds [256 x i32], ptr @arr, i32 0, i32 %idxprom
+  %1 = load i32, ptr %arrayidx, align 4
+  ret i32 %1
+}
+
+define i32 @access_u16() {
+; V8M-LABEL: access_u16:
+; V8M:       @ %bb.0: @ %entry
+; V8M-NEXT:    push {r7, lr}
+; V8M-NEXT:    movw r0, :lower16:get_idx
+; V8M-NEXT:    movt r0, :upper16:get_idx
+; V8M-NEXT:    ldr r0, [r0]
+; V8M-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, r11}
+; V8M-NEXT:    bic r0, r0, #1
+; V8M-NEXT:    sub sp, #136
+; V8M-NEXT:    vlstm sp
+; V8M-NEXT:    mov r1, r0
+; V8M-NEXT:    mov r2, r0
+; V8M-NEXT:    mov r3, r0
+; V8M-NEXT:    mov r4, r0
+; V8M-NEXT:    mov r5, r0
+; V8M-NEXT:    mov r6, r0
+; V8M-NEXT:    mov r7, r0
+; V8M-NEXT:    mov r8, r0
+; V8M-NEXT:    mov r9, r0
+; V8M-NEXT:    mov r10, r0
+; V8M-NEXT:    mov r11, r0
+; V8M-NEXT:    mov r12, r0
+; V8M-NEXT:    msr apsr_nzcvq, r0
+; V8M-NEXT:    blxns r0
+; V8M-NEXT:    vlldm sp
+; V8M-NEXT:    add sp, #136
+; V8M-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11}
+; V8M-NEXT:    movw r1, :lower16:arr
+; V8M-NEXT:    uxth r0, r0
+; V8M-NEXT:    movt r1, :upper16:arr
+; V8M-NEXT:    ldr.w r0, [r1, r0, lsl #2]
+; V8M-NEXT:    pop {r7, pc}
+;
+; V81M-LABEL: access_u16:
+; V81M:       @ %bb.0: @ %entry
+; V81M-NEXT:    push {r7, lr}
+; V81M-NEXT:    movw r0, :lower16:get_idx
+; V81M-NEXT:    movt r0, :upper16:get_idx
+; V81M-NEXT:    ldr r0, [r0]
+; V81M-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, r11}
+; V81M-NEXT:    bic r0, r0, #1
+; V81M-NEXT:    sub sp, #136
+; V81M-NEXT:    vlstm sp
+; V81M-NEXT:    clrm {r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, apsr}
+; V81M-NEXT:    blxns r0
+; V81M-NEXT:    vlldm sp
+; V81M-NEXT:    add sp, #136
+; V81M-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11}
+; V81M-NEXT:    movw r1, :lower16:arr
+; V81M-NEXT:    uxth r0, r0
+; V81M-NEXT:    movt r1, :upper16:arr
+; V81M-NEXT:    ldr.w r0, [r1, r0, lsl #2]
+; V81M-NEXT:    pop {r7, pc}
+entry:
+  %0 = load ptr, ptr @get_idx, align 4
+  %call = tail call zeroext i16 %0() "cmse_nonsecure_call"
+  %idxprom = zext i16 %call to i32
+  %arrayidx = getelementptr inbounds [256 x i32], ptr @arr, i32 0, i32 %idxprom
+  %1 = load i32, ptr %arrayidx, align 4
+  ret i32 %1
+}
+
+define i32 @access_i8() {
+; V8M-LABEL: access_i8:
+; V8M:       @ %bb.0: @ %entry
+; V8M-NEXT:    push {r7, lr}
+; V8M-NEXT:    movw r0, :lower16:get_idx
+; V8M-NEXT:    movt r0, :upper16:get_idx
+; V8M-NEXT:    ldr r0, [r0]
+; V8M-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, r11}
+; V8M-NEXT:    bic r0, r0, #1
+; V8M-NEXT:    sub sp, #136
+; V8M-NEXT:    vlstm sp
+; V8M-NEXT:    mov r1, r0
+; V8M-NEXT:    mov r2, r0
+; V8M-NEXT:    mov r3, r0
+; V8M-NEXT:    mov r4, r0
+; V8M-NEXT:    mov r5, r0
+; V8M-NEXT:    mov r6, r0
+; V8M-NEXT:    mov r7, r0
+; V8M-NEXT:    mov r8, r0
+; V8M-NEXT:    mov r9, r0
+; V8M-NEXT:    mov r10, r0
+; V8M-NEXT:    mov r11, r0
+; V8M-NEXT:    mov r12, r0
+; V8M-NEXT:    msr apsr_nzcvq, r0
+; V8M-NEXT:    blxns r0
+; V8M-NEXT:    vlldm sp
+; V8M-NEXT:    add sp, #136
+; V8M-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11}
+; V8M-NEXT:    movw r1, :lower16:arr
+; V8M-NEXT:    sxtb r0, r0
+; V8M-NEXT:    movt r1, :upper16:arr
+; V8M-NEXT:    ldr.w r0, [r1, r0, lsl #2]
+; V8M-NEXT:    pop {r7, pc}
+;
+; V81M-LABEL: access_i8:
+; V81M:       @ %bb.0: @ %entry
+; V81M-NEXT:    push {r7, lr}
+; V81M-NEXT:    movw r0, :lower16:get_idx
+; V81M-NEXT:    movt r0, :upper16:get_idx
+; V81M-NEXT:    ldr r0, [r0]
+; V81M-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, r11}
+; V81M-NEXT:    bic r0, r0, #1
+; V81M-NEXT:    sub sp, #136
+; V81M-NEXT:    vlstm sp
+; V81M-NEXT:    clrm {r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, apsr}
+; V81M-NEXT:    blxns r0
+; V81M-NEXT:    vlldm sp
+; V81M-NEXT:    add sp, #136
+; V81M-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11}
+; V81M-NEXT:    movw r1, :lower16:arr
+; V81M-NEXT:    sxtb r0, r0
+; V81M-NEXT:    movt r1, :upper16:arr
+; V81M-NEXT:    ldr.w r0, [r1, r0, lsl #2]
+; V81M-NEXT:    pop {r7, pc}
+entry:
+  %0 = load ptr, ptr @get_idx, align 4
+  %call = tail call signext i8 %0() "cmse_nonsecure_call"
+  %idxprom = sext i8 %call to i32
+  %arrayidx = getelementptr inbounds [256 x i32], ptr @arr, i32 0, i32 %idxprom
+  %1 = load i32, ptr %arrayidx, align 4
+  ret i32 %1
+}
+
+define i32 @access_u8() {
+; V8M-LABEL: access_u8:
+; V8M:       @ %bb.0: @ %entry
+; V8M-NEXT:    push {r7, lr}
+; V8M-NEXT:    movw r0, :lower16:get_idx
+; V8M-NEXT:    movt r0, :upper16:get_idx
+; V8M-NEXT:    ldr r0, [r0]
+; V8M-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, r11}
+; V8M-NEXT:    bic r0, r0, #1
+; V8M-NEXT:    sub sp, #136
+; V8M-NEXT:    vlstm sp
+; V8M-NEXT:    mov r1, r0
+; V8M-NEXT:    mov r2, r0
+; V8M-NEXT:    mov r3, r0
+; V8M-NEXT:    mov r4, r0
+; V8M-NEXT:    mov r5, r0
+; V8M-NEXT:    mov r6, r0
+; V8M-NEXT:    mov r7, r0
+; V8M-NEXT:    mov r8, r0
+; V8M-NEXT:    mov r9, r0
+; V8M-NEXT:    mov r10, r0
+; V8M-NEXT:    mov r11, r0
+; V8M-NEXT:    mov r12, r0
+; V8M-NEXT:    msr apsr_nzcvq, r0
+; V8M-NEXT:    blxns r0
+; V8M-NEXT:    vlldm sp
+; V8M-NEXT:    add sp, #136
+; V8M-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11}
+; V8M-NEXT:    movw r1, :lower16:arr
+; V8M-NEXT:    uxtb r0, r0
+; V8M-NEXT:    movt r1, :upper16:arr
+; V8M-NEXT:    ldr.w r0, [r1, r0, lsl #2]
+; V8M-NEXT:    pop {r7, pc}
+;
+; V81M-LABEL: access_u8:
+; V81M:       @ %bb.0: @ %entry
+; V81M-NEXT:    push {r7, lr}
+; V81M-NEXT:    movw r0, :lower16:get_idx
+; V81M-NEXT:    movt r0, :upper16:get_idx
+; V81M-NEXT:    ldr r0, [r0]
+; V81M-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, r11}
+; V81M-NEXT:    bic r0, r0, #1
+; V81M-NEXT:    sub sp, #136
+; V81M-NEXT:    vlstm sp
+; V81M-NEXT:    clrm {r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, apsr}
+; V81M-NEXT:    blxns r0
+; V81M-NEXT:    vlldm sp
+; V81M-NEXT:    add sp, #136
+; V81M-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11}
+; V81M-NEXT:    movw r1, :lower16:arr
+; V81M-NEXT:    uxtb r0, r0
+; V81M-NEXT:    movt r1, :upper16:arr
+; V81M-NEXT:    ldr.w r0, [r1, r0, lsl #2]
+; V81M-NEXT:    pop {r7, pc}
+entry:
+  %0 = load ptr, ptr @get_idx, align 4
+  %call = tail call zeroext i8 %0() "cmse_nonsecure_call"
+  %idxprom = zext i8 %call to i32
+  %arrayidx = getelementptr inbounds [256 x i32], ptr @arr, i32 0, i32 %idxprom
+  %1 = load i32, ptr %arrayidx, align 4
+  ret i32 %1
+}
+
+define i32 @access_i1() {
+; V8M-LABEL: access_i1:
+; V8M:       @ %bb.0: @ %entry
+; V8M-NEXT:    push {r7, lr}
+; V8M-NEXT:    movw r0, :lower16:get_idx
+; V8M-NEXT:    movt r0, :upper16:get_idx
+; V8M-NEXT:    ldr r0, [r0]
+; V8M-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, r11}
+; V8M-NEXT:    bic r0, r0, #1
+; V8M-NEXT:    sub sp, #136
+; V8M-NEXT:    vlstm sp
+; V8M-NEXT:    mov r1, r0
+; V8M-NEXT:    mov r2, r0
+; V8M-NEXT:    mov r3, r0
+; V8M-NEXT:    mov r4, r0
+; V8M-NEXT:    mov r5, r0
+; V8M-NEXT:    mov r6, r0
+; V8M-NEXT:    mov r7, r0
+; V8M-NEXT:    mov r8, r0
+; V8M-NEXT:    mov r9, r0
+; V8M-NEXT:    mov r10, r0
+; V8M-NEXT:    mov r11, r0
+; V8M-NEXT:    mov r12, r0
+; V8M-NEXT:    msr apsr_nzcvq, r0
+; V8M-NEXT:    blxns r0
+; V8M-NEXT:    vlldm sp
+; V8M-NEXT:    add sp, #136
+; V8M-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11}
+; V8M-NEXT:    movw r1, :lower16:arr
+; V8M-NEXT:    and r0, r0, #1
+; V8M-NEXT:    movt r1, :upper16:arr
+; V8M-NEXT:    ldr.w r0, [r1, r0, lsl #2]
+; V8M-NEXT:    pop {r7, pc}
+;
+; V81M-LABEL: access_i1:
+; V81M:       @ %bb.0: @ %entry
+; V81M-NEXT:    push {r7, lr}
+; V81M-NEXT:    movw r0, :lower16:get_idx
+; V81M-NEXT:    movt r0, :upper16:get_idx
+; V81M-NEXT:    ldr r0, [r0]
+; V81M-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, r11}
+; V81M-NEXT:    bic r0, r0, #1
+; V81M-NEXT:    sub sp, #136
+; V81M-NEXT:    vlstm sp
+; V81M-NEXT:    clrm {r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, apsr}
+; V81M-NEXT:    blxns r0
+; V81M-NEXT:    vlldm sp
+; V81M-NEXT:    add sp, #136
+; V81M-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11}
+; V81M-NEXT:    movw r1, :lower16:arr
+; V81M-NEXT:    and r0, r0, #1
+; V81M-NEXT:    movt r1, :upper16:arr
+; V81M-NEXT:    ldr.w r0, [r1, r0, lsl #2]
+; V81M-NEXT:    pop {r7, pc}
+entry:
+  %0 = load ptr, ptr @get_idx, align 4
+  %call = tail call zeroext i1 %0() "cmse_nonsecure_call"
+  %idxprom = zext i1 %call to i32
+  %arrayidx = getelementptr inbounds [256 x i32], ptr @arr, i32 0, i32 %idxprom
+  %1 = load i32, ptr %arrayidx, align 4
+  ret i32 %1
+}
+
+define i32 @access_i5() {
+; V8M-LABEL: access_i5:
+; V8M:       @ %bb.0: @ %entry
+; V8M-NEXT:    push {r7, lr}
+; V8M-NEXT:    movw r0, :lower16:get_idx
+; V8M-NEXT:    movt r0, :upper16:get_idx
+; V8M-NEXT:    ldr r0, [r0]
+; V8M-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, r11}
+; V8M-NEXT:    bic r0, r0, #1
+; V8M-NEXT:    sub sp, #136
+; V8M-NEXT:    vlstm sp
+; V8M-NEXT:    mov r1, r0
+; V8M-NEXT:    mov r2, r0
+; V8M-NEXT:    mov r3, r0
+; V8M-NEXT:    mov r4, r0
+; V8M-NEXT:    mov r5, r0
+; V8M-NEXT:    mov r6, r0
+; V8M-NEXT:    mov r7, r0
+; V8M-NEXT:    mov r8, r0
+; V8M-NEXT:    mov r9, r0
+; V8M-NEXT:    mov r10, r0
+; V8M-NEXT:    mov r11, r0
+; V8M-NEXT:    mov r12, r0
+; V8M-NEXT:    msr apsr_nzcvq, r0
+; V8M-NEXT:    blxns r0
+; V8M-NEXT:    vlldm sp
+; V8M-NEXT:    add sp, #136
+; V8M-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11}
+; V8M-NEXT:    movw r1, :lower16:arr
+; V8M-NEXT:    sbfx r0, r0, #0, #5
+; V8M-NEXT:    movt r1, :upper16:arr
+; V8M-NEXT:    ldr.w r0, [r1, r0, lsl #2]
+; V8M-NEXT:    pop {r7, pc}
+;
+; V81M-LABEL: access_i5:
+; V81M:       @ %bb.0: @ %entry
+; V81M-NEXT:    push {r7, lr}
+; V81M-NEXT:    movw r0, :lower16:get_idx
+; V81M-NEXT:    movt r0, :upper16:get_idx
+; V81M-NEXT:    ldr r0, [r0]
+; V81M-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, r11}
+; V81M-NEXT:    bic r0, r0, #1
+; V81M-NEXT:    sub sp, #136
+; V81M-NEXT:    vlstm sp
+; V81M-NEXT:    clrm {r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, apsr}
+; V81M-NEXT:    blxns r0
+; V81M-NEXT:    vlldm sp
+; V81M-NEXT:    add sp, #136
+; V81M-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11}
+; V81M-NEXT:    movw r1, :lower16:arr
+; V81M-NEXT:    sbfx r0, r0, #0, #5
+; V81M-NEXT:    movt r1, :upper16:arr
+; V81M-NEXT:    ldr.w r0, [r1, r0, lsl #2]
+; V81M-NEXT:    pop {r7, pc}
+entry:
+  %0 = load ptr, ptr @get_idx, align 4
+  %call = tail call signext i5 %0() "cmse_nonsecure_call"
+  %idxprom = sext i5 %call to i32
+  %arrayidx = getelementptr inbounds [256 x i32], ptr @arr, i32 0, i32 %idxprom
+  %1 = load i32, ptr %arrayidx, align 4
+  ret i32 %1
+}
+
+define i32 @access_u5() {
+; V8M-LABEL: access_u5:
+; V8M:       @ %bb.0: @ %entry
+; V8M-NEXT:    push {r7, lr}
+; V8M-NEXT:    movw r0, :lower16:get_idx
+; V8M-NEXT:    movt r0, :upper16:get_idx
+; V8M-NEXT:    ldr r0, [r0]
+; V8M-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, r11}
+; V8M-NEXT:    bic r0, r0, #1
+; V8M-NEXT:    sub sp, #136
+; V8M-NEXT:    vlstm sp
+; V8M-NEXT:    mov r1, r0
+; V8M-NEXT:    mov r2, r0
+; V8M-NEXT:    mov r3, r0
+; V8M-NEXT:    mov r4, r0
+; V8M-NEXT:    mov r5, r0
+; V8M-NEXT:    mov r6, r0
+; V8M-NEXT:    mov r7, r0
+; V8M-NEXT:    mov r8, r0
+; V8M-NEXT:    mov r9, r0
+; V8M-NEXT:    mov r10, r0
+; V8M-NEXT:    mov r11, r0
+; V8M-NEXT:    mov r12, r0
+; V8M-NEXT:    msr apsr_nzcvq, r0
+; V8M-NEXT:    blxns r0
+; V8M-NEXT:    vlldm sp
+; V8M-NEXT:    add sp, #136
+; V8M-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11}
+; V8M-NEXT:    movw r1, :lower16:arr
+; V8M-NEXT:    and r0, r0, #31
+; V8M-NEXT:    movt r1, :upper16:arr
+; V8M-NEXT:    ldr.w r0, [r1, r0, lsl #2]
+; V8M-NEXT:    pop {r7, pc}
+;
+; V81M-LABEL: access_u5:
+; V81M:       @ %bb.0: @ %entry
+; V81M-NEXT:    push {r7, lr}
+; V81M-NEXT:    movw r0, :lower16:get_idx
+; V81M-NEXT:    movt r0, :upper16:get_idx
+; V81M-NEXT:    ldr r0, [r0]
+; V81M-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, r11}
+; V81M-NEXT:    bic r0, r0, #1
+; V81M-NEXT:    sub sp, #136
+; V81M-NEXT:    vlstm sp
+; V81M-NEXT:    clrm {r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, apsr}
+; V81M-NEXT:    blxns r0
+; V81M-NEXT:    vlldm sp
+; V81M-NEXT:    add sp, #136
+; V81M-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11}
+; V81M-NEXT:    movw r1, :lower16:arr
+; V81M-NEXT:    and r0, r0, #31
+; V81M-NEXT:    movt r1, :upper16:arr
+; V81M-NEXT:    ldr.w r0, [r1, r0, lsl #2]
+; V81M-NEXT:    pop {r7, pc}
+entry:
+  %0 = load ptr, ptr @get_idx, align 4
+  %call = tail call zeroext i5 %0() "cmse_nonsecure_call"
+  %idxprom = zext i5 %call to i32
+  %array...
[truncated]

``````````

</details>


https://github.com/llvm/llvm-project/pull/89944


More information about the llvm-commits mailing list