[llvm] 6a857fe - [RISCV][GISel] Promote s32 G_CONSTANT on RV64.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Mon Nov 11 23:35:12 PST 2024


Author: Craig Topper
Date: 2024-11-11T23:35:02-08:00
New Revision: 6a857fe8b960cb26cf03a0c825250726589a7771

URL: https://github.com/llvm/llvm-project/commit/6a857fe8b960cb26cf03a0c825250726589a7771
DIFF: https://github.com/llvm/llvm-project/commit/6a857fe8b960cb26cf03a0c825250726589a7771.diff

LOG: [RISCV][GISel] Promote s32 G_CONSTANT on RV64.

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
    llvm/test/CodeGen/RISCV/GlobalISel/alu-roundtrip.ll
    llvm/test/CodeGen/RISCV/GlobalISel/bitmanip.ll
    llvm/test/CodeGen/RISCV/GlobalISel/combine.ll
    llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/constant64.mir
    llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/sext-rv64.mir
    llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/shift-rv64.mir
    llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/zext-rv64.mir
    llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-abs-rv64.mir
    llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-bitreverse-rv64.mir
    llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-bswap-rv64.mir
    llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-const-rv64.mir
    llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-constbarrier-rv64.mir
    llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-ctlz-rv64.mir
    llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-ctpop-rv64.mir
    llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-cttz-rv64.mir
    llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-fshl-fshr-rv64.mir
    llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-load-rv64.mir
    llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-rotate-rv64.mir
    llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-sat-rv64.mir
    llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-store-rv64.mir
    llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-anyext.mir
    llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-extract-subvector.mir
    llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-icmp.mir
    llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-insert-subvector.mir
    llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-sext.mir
    llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-splatvector-rv64.mir
    llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-zext.mir
    llvm/test/CodeGen/RISCV/GlobalISel/rv64zbb-zbkb.ll
    llvm/test/CodeGen/RISCV/GlobalISel/rv64zbb.ll
    llvm/test/CodeGen/RISCV/GlobalISel/rv64zbkb.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
index 778f7bd4fb3bbe..b843fbbcfcb87f 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
@@ -230,10 +230,11 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST)
   }
 
   getActionDefinitionsBuilder(G_CONSTANT)
-      .legalFor({s32, p0})
+      .legalFor({p0})
+      .legalFor(!ST.is64Bit(), {s32})
       .customFor(ST.is64Bit(), {s64})
       .widenScalarToNextPow2(0)
-      .clampScalar(0, s32, sXLen);
+      .clampScalar(0, sXLen, sXLen);
 
   // TODO: transform illegal vector types into legal vector type
   getActionDefinitionsBuilder(

diff  --git a/llvm/test/CodeGen/RISCV/GlobalISel/alu-roundtrip.ll b/llvm/test/CodeGen/RISCV/GlobalISel/alu-roundtrip.ll
index 38cdd6a2cdcdc0..330f8b16065f13 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/alu-roundtrip.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/alu-roundtrip.ll
@@ -123,10 +123,7 @@ define i32 @subi_i32(i32 %a) {
 ;
 ; RV64IM-LABEL: subi_i32:
 ; RV64IM:       # %bb.0: # %entry
-; RV64IM-NEXT:    li a1, 1
-; RV64IM-NEXT:    slli a1, a1, 32
-; RV64IM-NEXT:    addi a1, a1, -1234
-; RV64IM-NEXT:    add a0, a0, a1
+; RV64IM-NEXT:    addi a0, a0, -1234
 ; RV64IM-NEXT:    ret
 entry:
   %0 = sub i32 %a, 1234

diff  --git a/llvm/test/CodeGen/RISCV/GlobalISel/bitmanip.ll b/llvm/test/CodeGen/RISCV/GlobalISel/bitmanip.ll
index 10bbc82b3d40b4..f33ba1d7a302ef 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/bitmanip.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/bitmanip.ll
@@ -115,18 +115,17 @@ define i7 @bitreverse_i7(i7 %x) {
 ; RV64-NEXT:    slli a2, a0, 4
 ; RV64-NEXT:    andi a2, a2, 32
 ; RV64-NEXT:    or a1, a1, a2
-; RV64-NEXT:    li a2, 2
-; RV64-NEXT:    slli a3, a0, 2
-; RV64-NEXT:    andi a3, a3, 16
+; RV64-NEXT:    slli a2, a0, 2
+; RV64-NEXT:    andi a2, a2, 16
 ; RV64-NEXT:    andi a0, a0, 127
-; RV64-NEXT:    andi a4, a0, 8
-; RV64-NEXT:    or a3, a3, a4
-; RV64-NEXT:    or a1, a1, a3
-; RV64-NEXT:    srli a3, a0, 2
-; RV64-NEXT:    andi a3, a3, 4
-; RV64-NEXT:    srli a4, a0, 4
-; RV64-NEXT:    and a2, a4, a2
-; RV64-NEXT:    or a2, a3, a2
+; RV64-NEXT:    andi a3, a0, 8
+; RV64-NEXT:    or a2, a2, a3
+; RV64-NEXT:    or a1, a1, a2
+; RV64-NEXT:    srli a2, a0, 2
+; RV64-NEXT:    andi a2, a2, 4
+; RV64-NEXT:    srli a3, a0, 4
+; RV64-NEXT:    andi a3, a3, 2
+; RV64-NEXT:    or a2, a2, a3
 ; RV64-NEXT:    or a1, a1, a2
 ; RV64-NEXT:    srli a0, a0, 6
 ; RV64-NEXT:    or a0, a1, a0
@@ -172,39 +171,36 @@ define i24 @bitreverse_i24(i24 %x) {
 ;
 ; RV64-LABEL: bitreverse_i24:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    lui a1, 4096
-; RV64-NEXT:    addiw a1, a1, -1
-; RV64-NEXT:    slli a2, a0, 16
-; RV64-NEXT:    and a0, a0, a1
+; RV64-NEXT:    slli a1, a0, 16
+; RV64-NEXT:    lui a2, 4096
+; RV64-NEXT:    addiw a2, a2, -1
+; RV64-NEXT:    and a0, a0, a2
 ; RV64-NEXT:    srli a0, a0, 16
-; RV64-NEXT:    or a0, a0, a2
-; RV64-NEXT:    lui a2, 65521
-; RV64-NEXT:    addiw a2, a2, -241
-; RV64-NEXT:    slli a2, a2, 4
-; RV64-NEXT:    and a3, a2, a1
+; RV64-NEXT:    or a0, a0, a1
+; RV64-NEXT:    lui a1, 1048335
+; RV64-NEXT:    addiw a1, a1, 240
+; RV64-NEXT:    and a3, a1, a2
 ; RV64-NEXT:    and a3, a0, a3
 ; RV64-NEXT:    srli a3, a3, 4
 ; RV64-NEXT:    slli a0, a0, 4
-; RV64-NEXT:    and a0, a0, a2
+; RV64-NEXT:    and a0, a0, a1
 ; RV64-NEXT:    or a0, a3, a0
-; RV64-NEXT:    lui a2, 261939
-; RV64-NEXT:    addiw a2, a2, 819
-; RV64-NEXT:    slli a2, a2, 2
-; RV64-NEXT:    and a3, a2, a1
+; RV64-NEXT:    lui a1, 1047757
+; RV64-NEXT:    addiw a1, a1, -820
+; RV64-NEXT:    and a3, a1, a2
 ; RV64-NEXT:    and a3, a0, a3
 ; RV64-NEXT:    srli a3, a3, 2
 ; RV64-NEXT:    slli a0, a0, 2
-; RV64-NEXT:    and a0, a0, a2
+; RV64-NEXT:    and a0, a0, a1
 ; RV64-NEXT:    or a0, a3, a0
-; RV64-NEXT:    lui a2, 523605
-; RV64-NEXT:    addiw a2, a2, 1365
-; RV64-NEXT:    slli a2, a2, 1
-; RV64-NEXT:    and a1, a2, a1
-; RV64-NEXT:    and a1, a0, a1
-; RV64-NEXT:    srli a1, a1, 1
+; RV64-NEXT:    lui a1, 1047211
+; RV64-NEXT:    addiw a1, a1, -1366
+; RV64-NEXT:    and a2, a1, a2
+; RV64-NEXT:    and a2, a0, a2
+; RV64-NEXT:    srli a2, a2, 1
 ; RV64-NEXT:    slli a0, a0, 1
-; RV64-NEXT:    and a0, a0, a2
-; RV64-NEXT:    or a0, a1, a0
+; RV64-NEXT:    and a0, a0, a1
+; RV64-NEXT:    or a0, a2, a0
 ; RV64-NEXT:    ret
   %rev = call i24 @llvm.bitreverse.i24(i24 %x)
   ret i24 %rev

diff  --git a/llvm/test/CodeGen/RISCV/GlobalISel/combine.ll b/llvm/test/CodeGen/RISCV/GlobalISel/combine.ll
index 93d14c288db2eb..9c7fd6895d377a 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/combine.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/combine.ll
@@ -42,18 +42,10 @@ define i32 @mul_to_shift(i32 %x) {
 ; RV32-NEXT:    slli a0, a0, 2
 ; RV32-NEXT:    ret
 ;
-; RV64-O0-LABEL: mul_to_shift:
-; RV64-O0:       # %bb.0:
-; RV64-O0-NEXT:    li a1, 2
-; RV64-O0-NEXT:    sll a0, a0, a1
-; RV64-O0-NEXT:    ret
-;
-; RV64-OPT-LABEL: mul_to_shift:
-; RV64-OPT:       # %bb.0:
-; RV64-OPT-NEXT:    slli a0, a0, 2
-; RV64-OPT-NEXT:    ret
+; RV64-LABEL: mul_to_shift:
+; RV64:       # %bb.0:
+; RV64-NEXT:    slli a0, a0, 2
+; RV64-NEXT:    ret
   %a = mul i32 %x, 4
   ret i32 %a
 }
-;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
-; RV64: {{.*}}

diff  --git a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/constant64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/constant64.mir
index 724ecc781ab5a0..646152e2e4ed49 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/constant64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/constant64.mir
@@ -141,9 +141,8 @@ body:            |
     ; CHECK-NEXT: [[LUI:%[0-9]+]]:gpr = LUI 524288
     ; CHECK-NEXT: $x10 = COPY [[LUI]]
     ; CHECK-NEXT: PseudoRET implicit $x10
-    %0:gprb(s32) = G_CONSTANT i32 -2147483648
-    %1:gprb(s64) = G_ANYEXT %0(s32)
-    $x10 = COPY %1(s64)
+    %0:gprb(s64) = G_CONSTANT i64 -2147483648
+    $x10 = COPY %0(s64)
     PseudoRET implicit $x10
 
 ...
@@ -163,9 +162,8 @@ body:            |
     ; CHECK-NEXT: [[ADDIW:%[0-9]+]]:gpr = ADDIW [[LUI]], 648
     ; CHECK-NEXT: $x10 = COPY [[ADDIW]]
     ; CHECK-NEXT: PseudoRET implicit $x10
-    %0:gprb(s32) = G_CONSTANT i32 -2147483000
-    %1:gprb(s64) = G_ANYEXT %0(s32)
-    $x10 = COPY %1(s64)
+    %0:gprb(s64) = G_CONSTANT i64 -2147483000
+    $x10 = COPY %0(s64)
     PseudoRET implicit $x10
 
 ...
@@ -185,9 +183,8 @@ body:            |
     ; CHECK-NEXT: [[ADDIW:%[0-9]+]]:gpr = ADDIW [[LUI]], -1
     ; CHECK-NEXT: $x10 = COPY [[ADDIW]]
     ; CHECK-NEXT: PseudoRET implicit $x10
-    %0:gprb(s32) = G_CONSTANT i32 2147483647
-    %1:gprb(s64) = G_ANYEXT %0(s32)
-    $x10 = COPY %1(s64)
+    %0:gprb(s64) = G_CONSTANT i64 2147483647
+    $x10 = COPY %0(s64)
     PseudoRET implicit $x10
 
 ...
@@ -207,51 +204,8 @@ body:            |
     ; CHECK-NEXT: [[ADDIW:%[0-9]+]]:gpr = ADDIW [[LUI]], -648
     ; CHECK-NEXT: $x10 = COPY [[ADDIW]]
     ; CHECK-NEXT: PseudoRET implicit $x10
-    %0:gprb(s32) = G_CONSTANT i32 2147483000
-    %1:gprb(s64) = G_ANYEXT %0(s32)
-    $x10 = COPY %1(s64)
-    PseudoRET implicit $x10
-
-...
----
-name:            const_i32_256
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:            |
-  bb.0:
-    liveins: $x10
-
-    ; CHECK-LABEL: name: const_i32_256
-    ; CHECK: liveins: $x10
-    ; CHECK-NEXT: {{  $}}
-    ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 256
-    ; CHECK-NEXT: $x10 = COPY [[ADDI]]
-    ; CHECK-NEXT: PseudoRET implicit $x10
-    %0:gprb(s32) = G_CONSTANT i32 256
-    %1:gprb(s64) = G_ANYEXT %0(s32)
-    $x10 = COPY %1(s64)
-    PseudoRET implicit $x10
-
-...
----
-name:            const_i32_0
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:            |
-  bb.0:
-    liveins: $x10
-
-    ; CHECK-LABEL: name: const_i32_0
-    ; CHECK: liveins: $x10
-    ; CHECK-NEXT: {{  $}}
-    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x0
-    ; CHECK-NEXT: $x10 = COPY [[COPY]]
-    ; CHECK-NEXT: PseudoRET implicit $x10
-    %0:gprb(s32) = G_CONSTANT i32 0
-    %1:gprb(s64) = G_ANYEXT %0(s32)
-    $x10 = COPY %1(s64)
+    %0:gprb(s64) = G_CONSTANT i64 2147483000
+    $x10 = COPY %0(s64)
     PseudoRET implicit $x10
 
 ...

diff  --git a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/sext-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/sext-rv64.mir
index 50b1257c4ffec7..36f33f4dee9e1b 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/sext-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/sext-rv64.mir
@@ -7,13 +7,18 @@ regBankSelected: true
 tracksRegLiveness: true
 body:            |
   bb.0:
+    liveins: $x10
+
     ; CHECK-LABEL: name: sext_32_64
-    ; CHECK: [[LUI:%[0-9]+]]:gpr = LUI 524288
-    ; CHECK-NEXT: [[ADDIW:%[0-9]+]]:gpr = ADDIW [[LUI]], 0
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; CHECK-NEXT: [[ADDIW:%[0-9]+]]:gpr = ADDIW [[COPY]], 0
     ; CHECK-NEXT: $x8 = COPY [[ADDIW]]
-    %0:gprb(s32) = G_CONSTANT i32 -2147483648
-    %1:gprb(s64) = G_SEXT %0
-    $x8 = COPY %1(s64)
+    %0:gprb(s64) = COPY $x10
+    %1:gprb(s32) = G_TRUNC %0
+    %2:gprb(s64) = G_SEXT %1
+    $x8 = COPY %2(s64)
 ...
 ---
 name:            sext_inreg_64_32

diff  --git a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/shift-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/shift-rv64.mir
index b2d857a9ac6aeb..1a8d1558dcf55b 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/shift-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/shift-rv64.mir
@@ -26,31 +26,6 @@ body:             |
     PseudoRET implicit $x10
 ...
 
----
-name:            shl_zext
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0:
-    liveins: $x10
-
-    ; CHECK-LABEL: name: shl_zext
-    ; CHECK: liveins: $x10
-    ; CHECK-NEXT: {{  $}}
-    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
-    ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 1
-    ; CHECK-NEXT: [[SLL:%[0-9]+]]:gpr = SLL [[COPY]], [[ADDI]]
-    ; CHECK-NEXT: $x10 = COPY [[SLL]]
-    ; CHECK-NEXT: PseudoRET implicit $x10
-    %0:gprb(s64) = COPY $x10
-    %1:gprb(s32) = G_CONSTANT i32 1
-    %2:gprb(s64) = G_ZEXT %1
-    %3:gprb(s64) = G_SHL %0, %2(s64)
-    $x10 = COPY %3(s64)
-    PseudoRET implicit $x10
-...
-
 ---
 name:            shl_and
 legalized:       true

diff  --git a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/zext-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/zext-rv64.mir
index 687be8a13932fe..12c2fd03b8fea1 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/zext-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/zext-rv64.mir
@@ -8,17 +8,24 @@ regBankSelected: true
 tracksRegLiveness: true
 body:            |
   bb.0:
+    liveins: $x10
+
     ; CHECK-LABEL: name: zext_32_64
-    ; CHECK: [[LUI:%[0-9]+]]:gpr = LUI 524288
-    ; CHECK-NEXT: [[SLLI:%[0-9]+]]:gpr = SLLI [[LUI]], 32
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; CHECK-NEXT: [[SLLI:%[0-9]+]]:gpr = SLLI [[COPY]], 32
     ; CHECK-NEXT: [[SRLI:%[0-9]+]]:gpr = SRLI [[SLLI]], 32
     ; CHECK-NEXT: $x8 = COPY [[SRLI]]
     ;
     ; ZBA-LABEL: name: zext_32_64
-    ; ZBA: [[LUI:%[0-9]+]]:gpr = LUI 524288
-    ; ZBA-NEXT: [[ADD_UW:%[0-9]+]]:gpr = ADD_UW [[LUI]], $x0
+    ; ZBA: liveins: $x10
+    ; ZBA-NEXT: {{  $}}
+    ; ZBA-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; ZBA-NEXT: [[ADD_UW:%[0-9]+]]:gpr = ADD_UW [[COPY]], $x0
     ; ZBA-NEXT: $x8 = COPY [[ADD_UW]]
-    %0:gprb(s32) = G_CONSTANT i32 -2147483648
-    %1:gprb(s64) = G_ZEXT %0
-    $x8 = COPY %1(s64)
+    %0:gprb(s64) = COPY $x10
+    %1:gprb(s32) = G_TRUNC %0
+    %2:gprb(s64) = G_ZEXT %1
+    $x8 = COPY %2(s64)
 ...

diff  --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-abs-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-abs-rv64.mir
index f4a88377d4a31a..22ce8a0fd0dfa3 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-abs-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-abs-rv64.mir
@@ -11,18 +11,16 @@ body:             |
     ; RV64I-LABEL: name: abs_i8
     ; RV64I: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
     ; RV64I-NEXT: [[ASSERT_ZEXT:%[0-9]+]]:_(s64) = G_ASSERT_ZEXT [[COPY]], 8
-    ; RV64I-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 7
-    ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; RV64I-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 255
-    ; RV64I-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[ANYEXT]], [[C1]]
-    ; RV64I-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 56
-    ; RV64I-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[ASSERT_ZEXT]], [[C2]](s64)
-    ; RV64I-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SHL]], [[C2]](s64)
-    ; RV64I-NEXT: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[ASHR]], [[AND]](s64)
+    ; RV64I-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 7
+    ; RV64I-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 56
+    ; RV64I-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[ASSERT_ZEXT]], [[C1]](s64)
+    ; RV64I-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SHL]], [[C1]](s64)
+    ; RV64I-NEXT: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[ASHR]], [[C]](s64)
     ; RV64I-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[ASSERT_ZEXT]], [[ASHR1]]
     ; RV64I-NEXT: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[ADD]], [[ASHR1]]
-    ; RV64I-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[XOR]], [[C1]]
-    ; RV64I-NEXT: $x10 = COPY [[AND1]](s64)
+    ; RV64I-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 255
+    ; RV64I-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[XOR]], [[C2]]
+    ; RV64I-NEXT: $x10 = COPY [[AND]](s64)
     ; RV64I-NEXT: PseudoRET implicit $x10
     ;
     ; RV64ZBB-LABEL: name: abs_i8
@@ -51,16 +49,13 @@ body:             |
     ; RV64I-LABEL: name: abs_i16
     ; RV64I: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
     ; RV64I-NEXT: [[ASSERT_SEXT:%[0-9]+]]:_(s64) = G_ASSERT_SEXT [[COPY]], 16
-    ; RV64I-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 15
-    ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; RV64I-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 65535
-    ; RV64I-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[ANYEXT]], [[C1]]
-    ; RV64I-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[ASSERT_SEXT]], [[AND]](s64)
+    ; RV64I-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 15
+    ; RV64I-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[ASSERT_SEXT]], [[C]](s64)
     ; RV64I-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[ASSERT_SEXT]], [[ASHR]]
     ; RV64I-NEXT: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[ADD]], [[ASHR]]
-    ; RV64I-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 48
-    ; RV64I-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[XOR]], [[C2]](s64)
-    ; RV64I-NEXT: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[SHL]], [[C2]](s64)
+    ; RV64I-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 48
+    ; RV64I-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[XOR]], [[C1]](s64)
+    ; RV64I-NEXT: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[SHL]], [[C1]](s64)
     ; RV64I-NEXT: $x10 = COPY [[ASHR1]](s64)
     ; RV64I-NEXT: PseudoRET implicit $x10
     ;
@@ -88,9 +83,8 @@ body:             |
     ; RV64I-LABEL: name: abs_i32
     ; RV64I: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
     ; RV64I-NEXT: [[ASSERT_SEXT:%[0-9]+]]:_(s64) = G_ASSERT_SEXT [[COPY]], 32
-    ; RV64I-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
-    ; RV64I-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[C]](s32)
-    ; RV64I-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[ASSERT_SEXT]], [[ZEXT]](s64)
+    ; RV64I-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 31
+    ; RV64I-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[ASSERT_SEXT]], [[C]](s64)
     ; RV64I-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[ASSERT_SEXT]], [[ASHR]]
     ; RV64I-NEXT: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[ADD]], [[ASHR]]
     ; RV64I-NEXT: [[SEXT_INREG:%[0-9]+]]:_(s64) = G_SEXT_INREG [[XOR]], 32

diff  --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-bitreverse-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-bitreverse-rv64.mir
index d7063dfd9a0f03..31c9b2c0dbccf7 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-bitreverse-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-bitreverse-rv64.mir
@@ -11,47 +11,36 @@ body:             |
     ; CHECK: liveins: $x10
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
-    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[C]](s64)
     ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 255
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[ANYEXT]], [[C1]]
-    ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[AND]](s64)
-    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C1]]
-    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND1]], [[AND]](s64)
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C1]]
+    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND]], [[C]](s64)
     ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR]], [[SHL]]
-    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
-    ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -16
-    ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C3]](s32)
-    ; CHECK-NEXT: [[AND2:%[0-9]+]]:_(s64) = G_AND [[OR]], [[ANYEXT1]]
-    ; CHECK-NEXT: [[ANYEXT2:%[0-9]+]]:_(s64) = G_ANYEXT [[C2]](s32)
-    ; CHECK-NEXT: [[AND3:%[0-9]+]]:_(s64) = G_AND [[ANYEXT2]], [[C1]]
-    ; CHECK-NEXT: [[AND4:%[0-9]+]]:_(s64) = G_AND [[AND2]], [[C1]]
-    ; CHECK-NEXT: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[AND4]], [[AND3]](s64)
-    ; CHECK-NEXT: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[OR]], [[AND3]](s64)
-    ; CHECK-NEXT: [[AND5:%[0-9]+]]:_(s64) = G_AND [[SHL1]], [[ANYEXT1]]
-    ; CHECK-NEXT: [[OR1:%[0-9]+]]:_(s64) = G_OR [[LSHR1]], [[AND5]]
-    ; CHECK-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
-    ; CHECK-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 -52
-    ; CHECK-NEXT: [[ANYEXT3:%[0-9]+]]:_(s64) = G_ANYEXT [[C5]](s32)
-    ; CHECK-NEXT: [[AND6:%[0-9]+]]:_(s64) = G_AND [[OR1]], [[ANYEXT3]]
-    ; CHECK-NEXT: [[ANYEXT4:%[0-9]+]]:_(s64) = G_ANYEXT [[C4]](s32)
-    ; CHECK-NEXT: [[AND7:%[0-9]+]]:_(s64) = G_AND [[ANYEXT4]], [[C1]]
-    ; CHECK-NEXT: [[AND8:%[0-9]+]]:_(s64) = G_AND [[AND6]], [[C1]]
-    ; CHECK-NEXT: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[AND8]], [[AND7]](s64)
-    ; CHECK-NEXT: [[SHL2:%[0-9]+]]:_(s64) = G_SHL [[OR1]], [[AND7]](s64)
-    ; CHECK-NEXT: [[AND9:%[0-9]+]]:_(s64) = G_AND [[SHL2]], [[ANYEXT3]]
-    ; CHECK-NEXT: [[OR2:%[0-9]+]]:_(s64) = G_OR [[LSHR2]], [[AND9]]
-    ; CHECK-NEXT: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; CHECK-NEXT: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 -86
-    ; CHECK-NEXT: [[ANYEXT5:%[0-9]+]]:_(s64) = G_ANYEXT [[C7]](s32)
-    ; CHECK-NEXT: [[AND10:%[0-9]+]]:_(s64) = G_AND [[OR2]], [[ANYEXT5]]
-    ; CHECK-NEXT: [[ANYEXT6:%[0-9]+]]:_(s64) = G_ANYEXT [[C6]](s32)
-    ; CHECK-NEXT: [[AND11:%[0-9]+]]:_(s64) = G_AND [[ANYEXT6]], [[C1]]
-    ; CHECK-NEXT: [[AND12:%[0-9]+]]:_(s64) = G_AND [[AND10]], [[C1]]
-    ; CHECK-NEXT: [[LSHR3:%[0-9]+]]:_(s64) = G_LSHR [[AND12]], [[AND11]](s64)
-    ; CHECK-NEXT: [[SHL3:%[0-9]+]]:_(s64) = G_SHL [[OR2]], [[AND11]](s64)
-    ; CHECK-NEXT: [[AND13:%[0-9]+]]:_(s64) = G_AND [[SHL3]], [[ANYEXT5]]
-    ; CHECK-NEXT: [[OR3:%[0-9]+]]:_(s64) = G_OR [[LSHR3]], [[AND13]]
+    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
+    ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 -16
+    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[OR]], [[C3]]
+    ; CHECK-NEXT: [[AND2:%[0-9]+]]:_(s64) = G_AND [[AND1]], [[C1]]
+    ; CHECK-NEXT: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[AND2]], [[C2]](s64)
+    ; CHECK-NEXT: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[OR]], [[C2]](s64)
+    ; CHECK-NEXT: [[AND3:%[0-9]+]]:_(s64) = G_AND [[SHL1]], [[C3]]
+    ; CHECK-NEXT: [[OR1:%[0-9]+]]:_(s64) = G_OR [[LSHR1]], [[AND3]]
+    ; CHECK-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
+    ; CHECK-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 -52
+    ; CHECK-NEXT: [[AND4:%[0-9]+]]:_(s64) = G_AND [[OR1]], [[C5]]
+    ; CHECK-NEXT: [[AND5:%[0-9]+]]:_(s64) = G_AND [[AND4]], [[C1]]
+    ; CHECK-NEXT: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[AND5]], [[C4]](s64)
+    ; CHECK-NEXT: [[SHL2:%[0-9]+]]:_(s64) = G_SHL [[OR1]], [[C4]](s64)
+    ; CHECK-NEXT: [[AND6:%[0-9]+]]:_(s64) = G_AND [[SHL2]], [[C5]]
+    ; CHECK-NEXT: [[OR2:%[0-9]+]]:_(s64) = G_OR [[LSHR2]], [[AND6]]
+    ; CHECK-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; CHECK-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 -86
+    ; CHECK-NEXT: [[AND7:%[0-9]+]]:_(s64) = G_AND [[OR2]], [[C7]]
+    ; CHECK-NEXT: [[AND8:%[0-9]+]]:_(s64) = G_AND [[AND7]], [[C1]]
+    ; CHECK-NEXT: [[LSHR3:%[0-9]+]]:_(s64) = G_LSHR [[AND8]], [[C6]](s64)
+    ; CHECK-NEXT: [[SHL3:%[0-9]+]]:_(s64) = G_SHL [[OR2]], [[C6]](s64)
+    ; CHECK-NEXT: [[AND9:%[0-9]+]]:_(s64) = G_AND [[SHL3]], [[C7]]
+    ; CHECK-NEXT: [[OR3:%[0-9]+]]:_(s64) = G_OR [[LSHR3]], [[AND9]]
     ; CHECK-NEXT: $x10 = COPY [[OR3]](s64)
     ; CHECK-NEXT: PseudoRET implicit $x10
     %1:_(s64) = COPY $x10
@@ -72,47 +61,36 @@ body:             |
     ; CHECK: liveins: $x10
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
-    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
-    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
+    ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[C]](s64)
     ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 65535
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[ANYEXT]], [[C1]]
-    ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[AND]](s64)
-    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C1]]
-    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND1]], [[AND]](s64)
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C1]]
+    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND]], [[C]](s64)
     ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR]], [[SHL]]
-    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
-    ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -3856
-    ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C3]](s32)
-    ; CHECK-NEXT: [[AND2:%[0-9]+]]:_(s64) = G_AND [[OR]], [[ANYEXT1]]
-    ; CHECK-NEXT: [[ANYEXT2:%[0-9]+]]:_(s64) = G_ANYEXT [[C2]](s32)
-    ; CHECK-NEXT: [[AND3:%[0-9]+]]:_(s64) = G_AND [[ANYEXT2]], [[C1]]
-    ; CHECK-NEXT: [[AND4:%[0-9]+]]:_(s64) = G_AND [[AND2]], [[C1]]
-    ; CHECK-NEXT: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[AND4]], [[AND3]](s64)
-    ; CHECK-NEXT: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[OR]], [[AND3]](s64)
-    ; CHECK-NEXT: [[AND5:%[0-9]+]]:_(s64) = G_AND [[SHL1]], [[ANYEXT1]]
-    ; CHECK-NEXT: [[OR1:%[0-9]+]]:_(s64) = G_OR [[LSHR1]], [[AND5]]
-    ; CHECK-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
-    ; CHECK-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 -13108
-    ; CHECK-NEXT: [[ANYEXT3:%[0-9]+]]:_(s64) = G_ANYEXT [[C5]](s32)
-    ; CHECK-NEXT: [[AND6:%[0-9]+]]:_(s64) = G_AND [[OR1]], [[ANYEXT3]]
-    ; CHECK-NEXT: [[ANYEXT4:%[0-9]+]]:_(s64) = G_ANYEXT [[C4]](s32)
-    ; CHECK-NEXT: [[AND7:%[0-9]+]]:_(s64) = G_AND [[ANYEXT4]], [[C1]]
-    ; CHECK-NEXT: [[AND8:%[0-9]+]]:_(s64) = G_AND [[AND6]], [[C1]]
-    ; CHECK-NEXT: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[AND8]], [[AND7]](s64)
-    ; CHECK-NEXT: [[SHL2:%[0-9]+]]:_(s64) = G_SHL [[OR1]], [[AND7]](s64)
-    ; CHECK-NEXT: [[AND9:%[0-9]+]]:_(s64) = G_AND [[SHL2]], [[ANYEXT3]]
-    ; CHECK-NEXT: [[OR2:%[0-9]+]]:_(s64) = G_OR [[LSHR2]], [[AND9]]
-    ; CHECK-NEXT: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; CHECK-NEXT: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 -21846
-    ; CHECK-NEXT: [[ANYEXT5:%[0-9]+]]:_(s64) = G_ANYEXT [[C7]](s32)
-    ; CHECK-NEXT: [[AND10:%[0-9]+]]:_(s64) = G_AND [[OR2]], [[ANYEXT5]]
-    ; CHECK-NEXT: [[ANYEXT6:%[0-9]+]]:_(s64) = G_ANYEXT [[C6]](s32)
-    ; CHECK-NEXT: [[AND11:%[0-9]+]]:_(s64) = G_AND [[ANYEXT6]], [[C1]]
-    ; CHECK-NEXT: [[AND12:%[0-9]+]]:_(s64) = G_AND [[AND10]], [[C1]]
-    ; CHECK-NEXT: [[LSHR3:%[0-9]+]]:_(s64) = G_LSHR [[AND12]], [[AND11]](s64)
-    ; CHECK-NEXT: [[SHL3:%[0-9]+]]:_(s64) = G_SHL [[OR2]], [[AND11]](s64)
-    ; CHECK-NEXT: [[AND13:%[0-9]+]]:_(s64) = G_AND [[SHL3]], [[ANYEXT5]]
-    ; CHECK-NEXT: [[OR3:%[0-9]+]]:_(s64) = G_OR [[LSHR3]], [[AND13]]
+    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
+    ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 -3856
+    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[OR]], [[C3]]
+    ; CHECK-NEXT: [[AND2:%[0-9]+]]:_(s64) = G_AND [[AND1]], [[C1]]
+    ; CHECK-NEXT: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[AND2]], [[C2]](s64)
+    ; CHECK-NEXT: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[OR]], [[C2]](s64)
+    ; CHECK-NEXT: [[AND3:%[0-9]+]]:_(s64) = G_AND [[SHL1]], [[C3]]
+    ; CHECK-NEXT: [[OR1:%[0-9]+]]:_(s64) = G_OR [[LSHR1]], [[AND3]]
+    ; CHECK-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
+    ; CHECK-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 -13108
+    ; CHECK-NEXT: [[AND4:%[0-9]+]]:_(s64) = G_AND [[OR1]], [[C5]]
+    ; CHECK-NEXT: [[AND5:%[0-9]+]]:_(s64) = G_AND [[AND4]], [[C1]]
+    ; CHECK-NEXT: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[AND5]], [[C4]](s64)
+    ; CHECK-NEXT: [[SHL2:%[0-9]+]]:_(s64) = G_SHL [[OR1]], [[C4]](s64)
+    ; CHECK-NEXT: [[AND6:%[0-9]+]]:_(s64) = G_AND [[SHL2]], [[C5]]
+    ; CHECK-NEXT: [[OR2:%[0-9]+]]:_(s64) = G_OR [[LSHR2]], [[AND6]]
+    ; CHECK-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; CHECK-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 -21846
+    ; CHECK-NEXT: [[AND7:%[0-9]+]]:_(s64) = G_AND [[OR2]], [[C7]]
+    ; CHECK-NEXT: [[AND8:%[0-9]+]]:_(s64) = G_AND [[AND7]], [[C1]]
+    ; CHECK-NEXT: [[LSHR3:%[0-9]+]]:_(s64) = G_LSHR [[AND8]], [[C6]](s64)
+    ; CHECK-NEXT: [[SHL3:%[0-9]+]]:_(s64) = G_SHL [[OR2]], [[C6]](s64)
+    ; CHECK-NEXT: [[AND9:%[0-9]+]]:_(s64) = G_AND [[SHL3]], [[C7]]
+    ; CHECK-NEXT: [[OR3:%[0-9]+]]:_(s64) = G_OR [[LSHR3]], [[AND9]]
     ; CHECK-NEXT: $x10 = COPY [[OR3]](s64)
     ; CHECK-NEXT: PseudoRET implicit $x10
     %1:_(s64) = COPY $x10
@@ -133,52 +111,43 @@ body:             |
     ; CHECK: liveins: $x10
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
-    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
-    ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[C]](s32)
-    ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[ZEXT]](s64)
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
+    ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[C]](s64)
     ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4294967295
     ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C1]]
-    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND]], [[ZEXT]](s64)
+    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND]], [[C]](s64)
     ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR]], [[SHL]]
-    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 65280
-    ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
-    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C2]](s32)
-    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[ANYEXT]]
-    ; CHECK-NEXT: [[ZEXT1:%[0-9]+]]:_(s64) = G_ZEXT [[C3]](s32)
-    ; CHECK-NEXT: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[AND1]], [[ZEXT1]](s64)
+    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 65280
+    ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
+    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C2]]
+    ; CHECK-NEXT: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[AND1]], [[C3]](s64)
     ; CHECK-NEXT: [[OR1:%[0-9]+]]:_(s64) = G_OR [[OR]], [[SHL1]]
-    ; CHECK-NEXT: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[AND]], [[ZEXT1]](s64)
-    ; CHECK-NEXT: [[AND2:%[0-9]+]]:_(s64) = G_AND [[LSHR1]], [[ANYEXT]]
+    ; CHECK-NEXT: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[AND]], [[C3]](s64)
+    ; CHECK-NEXT: [[AND2:%[0-9]+]]:_(s64) = G_AND [[LSHR1]], [[C2]]
     ; CHECK-NEXT: [[OR2:%[0-9]+]]:_(s64) = G_OR [[OR1]], [[AND2]]
-    ; CHECK-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
-    ; CHECK-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 -252645136
-    ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C5]](s32)
-    ; CHECK-NEXT: [[AND3:%[0-9]+]]:_(s64) = G_AND [[OR2]], [[ANYEXT1]]
+    ; CHECK-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
+    ; CHECK-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 -252645136
+    ; CHECK-NEXT: [[AND3:%[0-9]+]]:_(s64) = G_AND [[OR2]], [[C5]]
     ; CHECK-NEXT: [[AND4:%[0-9]+]]:_(s64) = G_AND [[AND3]], [[C1]]
-    ; CHECK-NEXT: [[ZEXT2:%[0-9]+]]:_(s64) = G_ZEXT [[C4]](s32)
-    ; CHECK-NEXT: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[AND4]], [[ZEXT2]](s64)
-    ; CHECK-NEXT: [[SHL2:%[0-9]+]]:_(s64) = G_SHL [[OR2]], [[ZEXT2]](s64)
-    ; CHECK-NEXT: [[AND5:%[0-9]+]]:_(s64) = G_AND [[SHL2]], [[ANYEXT1]]
+    ; CHECK-NEXT: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[AND4]], [[C4]](s64)
+    ; CHECK-NEXT: [[SHL2:%[0-9]+]]:_(s64) = G_SHL [[OR2]], [[C4]](s64)
+    ; CHECK-NEXT: [[AND5:%[0-9]+]]:_(s64) = G_AND [[SHL2]], [[C5]]
     ; CHECK-NEXT: [[OR3:%[0-9]+]]:_(s64) = G_OR [[LSHR2]], [[AND5]]
-    ; CHECK-NEXT: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
-    ; CHECK-NEXT: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 -858993460
-    ; CHECK-NEXT: [[ANYEXT2:%[0-9]+]]:_(s64) = G_ANYEXT [[C7]](s32)
-    ; CHECK-NEXT: [[AND6:%[0-9]+]]:_(s64) = G_AND [[OR3]], [[ANYEXT2]]
+    ; CHECK-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
+    ; CHECK-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 -858993460
+    ; CHECK-NEXT: [[AND6:%[0-9]+]]:_(s64) = G_AND [[OR3]], [[C7]]
     ; CHECK-NEXT: [[AND7:%[0-9]+]]:_(s64) = G_AND [[AND6]], [[C1]]
-    ; CHECK-NEXT: [[ZEXT3:%[0-9]+]]:_(s64) = G_ZEXT [[C6]](s32)
-    ; CHECK-NEXT: [[LSHR3:%[0-9]+]]:_(s64) = G_LSHR [[AND7]], [[ZEXT3]](s64)
-    ; CHECK-NEXT: [[SHL3:%[0-9]+]]:_(s64) = G_SHL [[OR3]], [[ZEXT3]](s64)
-    ; CHECK-NEXT: [[AND8:%[0-9]+]]:_(s64) = G_AND [[SHL3]], [[ANYEXT2]]
+    ; CHECK-NEXT: [[LSHR3:%[0-9]+]]:_(s64) = G_LSHR [[AND7]], [[C6]](s64)
+    ; CHECK-NEXT: [[SHL3:%[0-9]+]]:_(s64) = G_SHL [[OR3]], [[C6]](s64)
+    ; CHECK-NEXT: [[AND8:%[0-9]+]]:_(s64) = G_AND [[SHL3]], [[C7]]
     ; CHECK-NEXT: [[OR4:%[0-9]+]]:_(s64) = G_OR [[LSHR3]], [[AND8]]
-    ; CHECK-NEXT: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; CHECK-NEXT: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1431655766
-    ; CHECK-NEXT: [[ANYEXT3:%[0-9]+]]:_(s64) = G_ANYEXT [[C9]](s32)
-    ; CHECK-NEXT: [[AND9:%[0-9]+]]:_(s64) = G_AND [[OR4]], [[ANYEXT3]]
+    ; CHECK-NEXT: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; CHECK-NEXT: [[C9:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1431655766
+    ; CHECK-NEXT: [[AND9:%[0-9]+]]:_(s64) = G_AND [[OR4]], [[C9]]
     ; CHECK-NEXT: [[AND10:%[0-9]+]]:_(s64) = G_AND [[AND9]], [[C1]]
-    ; CHECK-NEXT: [[ZEXT4:%[0-9]+]]:_(s64) = G_ZEXT [[C8]](s32)
-    ; CHECK-NEXT: [[LSHR4:%[0-9]+]]:_(s64) = G_LSHR [[AND10]], [[ZEXT4]](s64)
-    ; CHECK-NEXT: [[SHL4:%[0-9]+]]:_(s64) = G_SHL [[OR4]], [[ZEXT4]](s64)
-    ; CHECK-NEXT: [[AND11:%[0-9]+]]:_(s64) = G_AND [[SHL4]], [[ANYEXT3]]
+    ; CHECK-NEXT: [[LSHR4:%[0-9]+]]:_(s64) = G_LSHR [[AND10]], [[C8]](s64)
+    ; CHECK-NEXT: [[SHL4:%[0-9]+]]:_(s64) = G_SHL [[OR4]], [[C8]](s64)
+    ; CHECK-NEXT: [[AND11:%[0-9]+]]:_(s64) = G_AND [[SHL4]], [[C9]]
     ; CHECK-NEXT: [[OR5:%[0-9]+]]:_(s64) = G_OR [[LSHR4]], [[AND11]]
     ; CHECK-NEXT: $x10 = COPY [[OR5]](s64)
     ; CHECK-NEXT: PseudoRET implicit $x10
@@ -270,19 +239,15 @@ body:             |
     ; CHECK: liveins: $x10
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
-    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 3
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[ANYEXT]], [[C1]]
-    ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[AND]](s64)
-    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
-    ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C2]](s32)
-    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[SHL]], [[ANYEXT1]]
-    ; CHECK-NEXT: [[AND2:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C1]]
-    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND2]], [[AND]](s64)
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY [[ANYEXT]](s64)
-    ; CHECK-NEXT: [[AND3:%[0-9]+]]:_(s64) = G_AND [[LSHR]], [[COPY1]]
-    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[AND1]], [[AND3]]
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[C]](s64)
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[SHL]], [[C1]]
+    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3
+    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C2]]
+    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND1]], [[C]](s64)
+    ; CHECK-NEXT: [[AND2:%[0-9]+]]:_(s64) = G_AND [[LSHR]], [[C]]
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[AND]], [[AND2]]
     ; CHECK-NEXT: $x10 = COPY [[OR]](s64)
     ; CHECK-NEXT: PseudoRET implicit $x10
     %1:_(s64) = COPY $x10
@@ -303,26 +268,20 @@ body:             |
     ; CHECK: liveins: $x10
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
-    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
-    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 7
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[ANYEXT]], [[C1]]
-    ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[AND]](s64)
-    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
-    ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C2]](s32)
-    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[SHL]], [[ANYEXT1]]
-    ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; CHECK-NEXT: [[ANYEXT2:%[0-9]+]]:_(s64) = G_ANYEXT [[C3]](s32)
-    ; CHECK-NEXT: [[AND2:%[0-9]+]]:_(s64) = G_AND [[ANYEXT2]], [[C1]]
-    ; CHECK-NEXT: [[AND3:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C1]]
-    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND3]], [[AND2]](s64)
-    ; CHECK-NEXT: [[AND4:%[0-9]+]]:_(s64) = G_AND [[LSHR]], [[ANYEXT]]
-    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[AND1]], [[AND4]]
-    ; CHECK-NEXT: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[AND3]], [[AND]](s64)
-    ; CHECK-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; CHECK-NEXT: [[ANYEXT3:%[0-9]+]]:_(s64) = G_ANYEXT [[C4]](s32)
-    ; CHECK-NEXT: [[AND5:%[0-9]+]]:_(s64) = G_AND [[LSHR1]], [[ANYEXT3]]
-    ; CHECK-NEXT: [[OR1:%[0-9]+]]:_(s64) = G_OR [[OR]], [[AND5]]
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
+    ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[C]](s64)
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[SHL]], [[C1]]
+    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 7
+    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C3]]
+    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND1]], [[C2]](s64)
+    ; CHECK-NEXT: [[AND2:%[0-9]+]]:_(s64) = G_AND [[LSHR]], [[C]]
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[AND]], [[AND2]]
+    ; CHECK-NEXT: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[AND1]], [[C]](s64)
+    ; CHECK-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; CHECK-NEXT: [[AND3:%[0-9]+]]:_(s64) = G_AND [[LSHR1]], [[C4]]
+    ; CHECK-NEXT: [[OR1:%[0-9]+]]:_(s64) = G_OR [[OR]], [[AND3]]
     ; CHECK-NEXT: $x10 = COPY [[OR1]](s64)
     ; CHECK-NEXT: PseudoRET implicit $x10
     %1:_(s64) = COPY $x10
@@ -343,32 +302,24 @@ body:             |
     ; CHECK: liveins: $x10
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
-    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
-    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 15
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[ANYEXT]], [[C1]]
-    ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[AND]](s64)
-    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
-    ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C2]](s32)
-    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[SHL]], [[ANYEXT1]]
-    ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; CHECK-NEXT: [[ANYEXT2:%[0-9]+]]:_(s64) = G_ANYEXT [[C3]](s32)
-    ; CHECK-NEXT: [[AND2:%[0-9]+]]:_(s64) = G_AND [[ANYEXT2]], [[C1]]
-    ; CHECK-NEXT: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[AND2]](s64)
-    ; CHECK-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
-    ; CHECK-NEXT: [[ANYEXT3:%[0-9]+]]:_(s64) = G_ANYEXT [[C4]](s32)
-    ; CHECK-NEXT: [[AND3:%[0-9]+]]:_(s64) = G_AND [[SHL1]], [[ANYEXT3]]
-    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[AND1]], [[AND3]]
-    ; CHECK-NEXT: [[AND4:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C1]]
-    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND4]], [[AND2]](s64)
-    ; CHECK-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
-    ; CHECK-NEXT: [[ANYEXT4:%[0-9]+]]:_(s64) = G_ANYEXT [[C5]](s32)
-    ; CHECK-NEXT: [[AND5:%[0-9]+]]:_(s64) = G_AND [[LSHR]], [[ANYEXT4]]
-    ; CHECK-NEXT: [[OR1:%[0-9]+]]:_(s64) = G_OR [[OR]], [[AND5]]
-    ; CHECK-NEXT: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[AND4]], [[AND]](s64)
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY [[ANYEXT2]](s64)
-    ; CHECK-NEXT: [[AND6:%[0-9]+]]:_(s64) = G_AND [[LSHR1]], [[COPY1]]
-    ; CHECK-NEXT: [[OR2:%[0-9]+]]:_(s64) = G_OR [[OR1]], [[AND6]]
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 3
+    ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[C]](s64)
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[SHL]], [[C1]]
+    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; CHECK-NEXT: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[C2]](s64)
+    ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
+    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[SHL1]], [[C3]]
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[AND]], [[AND1]]
+    ; CHECK-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 15
+    ; CHECK-NEXT: [[AND2:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C4]]
+    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND2]], [[C2]](s64)
+    ; CHECK-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
+    ; CHECK-NEXT: [[AND3:%[0-9]+]]:_(s64) = G_AND [[LSHR]], [[C5]]
+    ; CHECK-NEXT: [[OR1:%[0-9]+]]:_(s64) = G_OR [[OR]], [[AND3]]
+    ; CHECK-NEXT: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[AND2]], [[C]](s64)
+    ; CHECK-NEXT: [[AND4:%[0-9]+]]:_(s64) = G_AND [[LSHR1]], [[C2]]
+    ; CHECK-NEXT: [[OR2:%[0-9]+]]:_(s64) = G_OR [[OR1]], [[AND4]]
     ; CHECK-NEXT: $x10 = COPY [[OR2]](s64)
     ; CHECK-NEXT: PseudoRET implicit $x10
     %1:_(s64) = COPY $x10
@@ -389,51 +340,37 @@ body:             |
     ; CHECK: liveins: $x10
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
-    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 6
-    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 127
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[ANYEXT]], [[C1]]
-    ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[AND]](s64)
-    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
-    ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C2]](s32)
-    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[SHL]], [[ANYEXT1]]
-    ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
-    ; CHECK-NEXT: [[ANYEXT2:%[0-9]+]]:_(s64) = G_ANYEXT [[C3]](s32)
-    ; CHECK-NEXT: [[AND2:%[0-9]+]]:_(s64) = G_AND [[ANYEXT2]], [[C1]]
-    ; CHECK-NEXT: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[AND2]](s64)
-    ; CHECK-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
-    ; CHECK-NEXT: [[ANYEXT3:%[0-9]+]]:_(s64) = G_ANYEXT [[C4]](s32)
-    ; CHECK-NEXT: [[AND3:%[0-9]+]]:_(s64) = G_AND [[SHL1]], [[ANYEXT3]]
-    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[AND1]], [[AND3]]
-    ; CHECK-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
-    ; CHECK-NEXT: [[ANYEXT4:%[0-9]+]]:_(s64) = G_ANYEXT [[C5]](s32)
-    ; CHECK-NEXT: [[AND4:%[0-9]+]]:_(s64) = G_AND [[ANYEXT4]], [[C1]]
-    ; CHECK-NEXT: [[SHL2:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[AND4]](s64)
-    ; CHECK-NEXT: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
-    ; CHECK-NEXT: [[ANYEXT5:%[0-9]+]]:_(s64) = G_ANYEXT [[C6]](s32)
-    ; CHECK-NEXT: [[AND5:%[0-9]+]]:_(s64) = G_AND [[SHL2]], [[ANYEXT5]]
-    ; CHECK-NEXT: [[OR1:%[0-9]+]]:_(s64) = G_OR [[OR]], [[AND5]]
-    ; CHECK-NEXT: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; CHECK-NEXT: [[ANYEXT6:%[0-9]+]]:_(s64) = G_ANYEXT [[C7]](s32)
-    ; CHECK-NEXT: [[AND6:%[0-9]+]]:_(s64) = G_AND [[ANYEXT6]], [[C1]]
-    ; CHECK-NEXT: [[AND7:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C1]]
-    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND7]], [[AND6]](s64)
-    ; CHECK-NEXT: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
-    ; CHECK-NEXT: [[ANYEXT7:%[0-9]+]]:_(s64) = G_ANYEXT [[C8]](s32)
-    ; CHECK-NEXT: [[AND8:%[0-9]+]]:_(s64) = G_AND [[LSHR]], [[ANYEXT7]]
-    ; CHECK-NEXT: [[OR2:%[0-9]+]]:_(s64) = G_OR [[OR1]], [[AND8]]
-    ; CHECK-NEXT: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[AND7]], [[AND4]](s64)
-    ; CHECK-NEXT: [[AND9:%[0-9]+]]:_(s64) = G_AND [[LSHR1]], [[ANYEXT2]]
-    ; CHECK-NEXT: [[OR3:%[0-9]+]]:_(s64) = G_OR [[OR2]], [[AND9]]
-    ; CHECK-NEXT: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[AND7]], [[AND2]](s64)
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY [[ANYEXT4]](s64)
-    ; CHECK-NEXT: [[AND10:%[0-9]+]]:_(s64) = G_AND [[LSHR2]], [[COPY1]]
-    ; CHECK-NEXT: [[OR4:%[0-9]+]]:_(s64) = G_OR [[OR3]], [[AND10]]
-    ; CHECK-NEXT: [[LSHR3:%[0-9]+]]:_(s64) = G_LSHR [[AND7]], [[AND]](s64)
-    ; CHECK-NEXT: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; CHECK-NEXT: [[ANYEXT8:%[0-9]+]]:_(s64) = G_ANYEXT [[C9]](s32)
-    ; CHECK-NEXT: [[AND11:%[0-9]+]]:_(s64) = G_AND [[LSHR3]], [[ANYEXT8]]
-    ; CHECK-NEXT: [[OR5:%[0-9]+]]:_(s64) = G_OR [[OR4]], [[AND11]]
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 6
+    ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[C]](s64)
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 64
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[SHL]], [[C1]]
+    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
+    ; CHECK-NEXT: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[C2]](s64)
+    ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
+    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[SHL1]], [[C3]]
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[AND]], [[AND1]]
+    ; CHECK-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
+    ; CHECK-NEXT: [[SHL2:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[C4]](s64)
+    ; CHECK-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
+    ; CHECK-NEXT: [[AND2:%[0-9]+]]:_(s64) = G_AND [[SHL2]], [[C5]]
+    ; CHECK-NEXT: [[OR1:%[0-9]+]]:_(s64) = G_OR [[OR]], [[AND2]]
+    ; CHECK-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; CHECK-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 127
+    ; CHECK-NEXT: [[AND3:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C7]]
+    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND3]], [[C6]](s64)
+    ; CHECK-NEXT: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
+    ; CHECK-NEXT: [[AND4:%[0-9]+]]:_(s64) = G_AND [[LSHR]], [[C8]]
+    ; CHECK-NEXT: [[OR2:%[0-9]+]]:_(s64) = G_OR [[OR1]], [[AND4]]
+    ; CHECK-NEXT: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[AND3]], [[C4]](s64)
+    ; CHECK-NEXT: [[AND5:%[0-9]+]]:_(s64) = G_AND [[LSHR1]], [[C2]]
+    ; CHECK-NEXT: [[OR3:%[0-9]+]]:_(s64) = G_OR [[OR2]], [[AND5]]
+    ; CHECK-NEXT: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[AND3]], [[C2]](s64)
+    ; CHECK-NEXT: [[AND6:%[0-9]+]]:_(s64) = G_AND [[LSHR2]], [[C4]]
+    ; CHECK-NEXT: [[OR4:%[0-9]+]]:_(s64) = G_OR [[OR3]], [[AND6]]
+    ; CHECK-NEXT: [[LSHR3:%[0-9]+]]:_(s64) = G_LSHR [[AND3]], [[C]](s64)
+    ; CHECK-NEXT: [[C9:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; CHECK-NEXT: [[AND7:%[0-9]+]]:_(s64) = G_AND [[LSHR3]], [[C9]]
+    ; CHECK-NEXT: [[OR5:%[0-9]+]]:_(s64) = G_OR [[OR4]], [[AND7]]
     ; CHECK-NEXT: $x10 = COPY [[OR5]](s64)
     ; CHECK-NEXT: PseudoRET implicit $x10
     %1:_(s64) = COPY $x10
@@ -454,47 +391,36 @@ body:             |
     ; CHECK: liveins: $x10
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
-    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
-    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
+    ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[C]](s64)
     ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16777215
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[ANYEXT]], [[C1]]
-    ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[AND]](s64)
-    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C1]]
-    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND1]], [[AND]](s64)
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C1]]
+    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND]], [[C]](s64)
     ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR]], [[SHL]]
-    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
-    ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -986896
-    ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C3]](s32)
-    ; CHECK-NEXT: [[AND2:%[0-9]+]]:_(s64) = G_AND [[OR]], [[ANYEXT1]]
-    ; CHECK-NEXT: [[ANYEXT2:%[0-9]+]]:_(s64) = G_ANYEXT [[C2]](s32)
-    ; CHECK-NEXT: [[AND3:%[0-9]+]]:_(s64) = G_AND [[ANYEXT2]], [[C1]]
-    ; CHECK-NEXT: [[AND4:%[0-9]+]]:_(s64) = G_AND [[AND2]], [[C1]]
-    ; CHECK-NEXT: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[AND4]], [[AND3]](s64)
-    ; CHECK-NEXT: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[OR]], [[AND3]](s64)
-    ; CHECK-NEXT: [[AND5:%[0-9]+]]:_(s64) = G_AND [[SHL1]], [[ANYEXT1]]
-    ; CHECK-NEXT: [[OR1:%[0-9]+]]:_(s64) = G_OR [[LSHR1]], [[AND5]]
-    ; CHECK-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
-    ; CHECK-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 -3355444
-    ; CHECK-NEXT: [[ANYEXT3:%[0-9]+]]:_(s64) = G_ANYEXT [[C5]](s32)
-    ; CHECK-NEXT: [[AND6:%[0-9]+]]:_(s64) = G_AND [[OR1]], [[ANYEXT3]]
-    ; CHECK-NEXT: [[ANYEXT4:%[0-9]+]]:_(s64) = G_ANYEXT [[C4]](s32)
-    ; CHECK-NEXT: [[AND7:%[0-9]+]]:_(s64) = G_AND [[ANYEXT4]], [[C1]]
-    ; CHECK-NEXT: [[AND8:%[0-9]+]]:_(s64) = G_AND [[AND6]], [[C1]]
-    ; CHECK-NEXT: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[AND8]], [[AND7]](s64)
-    ; CHECK-NEXT: [[SHL2:%[0-9]+]]:_(s64) = G_SHL [[OR1]], [[AND7]](s64)
-    ; CHECK-NEXT: [[AND9:%[0-9]+]]:_(s64) = G_AND [[SHL2]], [[ANYEXT3]]
-    ; CHECK-NEXT: [[OR2:%[0-9]+]]:_(s64) = G_OR [[LSHR2]], [[AND9]]
-    ; CHECK-NEXT: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; CHECK-NEXT: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 -5592406
-    ; CHECK-NEXT: [[ANYEXT5:%[0-9]+]]:_(s64) = G_ANYEXT [[C7]](s32)
-    ; CHECK-NEXT: [[AND10:%[0-9]+]]:_(s64) = G_AND [[OR2]], [[ANYEXT5]]
-    ; CHECK-NEXT: [[ANYEXT6:%[0-9]+]]:_(s64) = G_ANYEXT [[C6]](s32)
-    ; CHECK-NEXT: [[AND11:%[0-9]+]]:_(s64) = G_AND [[ANYEXT6]], [[C1]]
-    ; CHECK-NEXT: [[AND12:%[0-9]+]]:_(s64) = G_AND [[AND10]], [[C1]]
-    ; CHECK-NEXT: [[LSHR3:%[0-9]+]]:_(s64) = G_LSHR [[AND12]], [[AND11]](s64)
-    ; CHECK-NEXT: [[SHL3:%[0-9]+]]:_(s64) = G_SHL [[OR2]], [[AND11]](s64)
-    ; CHECK-NEXT: [[AND13:%[0-9]+]]:_(s64) = G_AND [[SHL3]], [[ANYEXT5]]
-    ; CHECK-NEXT: [[OR3:%[0-9]+]]:_(s64) = G_OR [[LSHR3]], [[AND13]]
+    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
+    ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 -986896
+    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[OR]], [[C3]]
+    ; CHECK-NEXT: [[AND2:%[0-9]+]]:_(s64) = G_AND [[AND1]], [[C1]]
+    ; CHECK-NEXT: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[AND2]], [[C2]](s64)
+    ; CHECK-NEXT: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[OR]], [[C2]](s64)
+    ; CHECK-NEXT: [[AND3:%[0-9]+]]:_(s64) = G_AND [[SHL1]], [[C3]]
+    ; CHECK-NEXT: [[OR1:%[0-9]+]]:_(s64) = G_OR [[LSHR1]], [[AND3]]
+    ; CHECK-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
+    ; CHECK-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 -3355444
+    ; CHECK-NEXT: [[AND4:%[0-9]+]]:_(s64) = G_AND [[OR1]], [[C5]]
+    ; CHECK-NEXT: [[AND5:%[0-9]+]]:_(s64) = G_AND [[AND4]], [[C1]]
+    ; CHECK-NEXT: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[AND5]], [[C4]](s64)
+    ; CHECK-NEXT: [[SHL2:%[0-9]+]]:_(s64) = G_SHL [[OR1]], [[C4]](s64)
+    ; CHECK-NEXT: [[AND6:%[0-9]+]]:_(s64) = G_AND [[SHL2]], [[C5]]
+    ; CHECK-NEXT: [[OR2:%[0-9]+]]:_(s64) = G_OR [[LSHR2]], [[AND6]]
+    ; CHECK-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; CHECK-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 -5592406
+    ; CHECK-NEXT: [[AND7:%[0-9]+]]:_(s64) = G_AND [[OR2]], [[C7]]
+    ; CHECK-NEXT: [[AND8:%[0-9]+]]:_(s64) = G_AND [[AND7]], [[C1]]
+    ; CHECK-NEXT: [[LSHR3:%[0-9]+]]:_(s64) = G_LSHR [[AND8]], [[C6]](s64)
+    ; CHECK-NEXT: [[SHL3:%[0-9]+]]:_(s64) = G_SHL [[OR2]], [[C6]](s64)
+    ; CHECK-NEXT: [[AND9:%[0-9]+]]:_(s64) = G_AND [[SHL3]], [[C7]]
+    ; CHECK-NEXT: [[OR3:%[0-9]+]]:_(s64) = G_OR [[LSHR3]], [[AND9]]
     ; CHECK-NEXT: $x10 = COPY [[OR3]](s64)
     ; CHECK-NEXT: PseudoRET implicit $x10
     %1:_(s64) = COPY $x10

diff  --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-bswap-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-bswap-rv64.mir
index a732f93cca6805..b5d28765889e1e 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-bswap-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-bswap-rv64.mir
@@ -16,15 +16,13 @@ body:             |
     ; RV64I-NEXT: {{  $}}
     ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
     ; RV64I-NEXT: [[ASSERT_ZEXT:%[0-9]+]]:_(s64) = G_ASSERT_ZEXT [[COPY]], 16
-    ; RV64I-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
-    ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; RV64I-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 65535
-    ; RV64I-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[ANYEXT]], [[C1]]
-    ; RV64I-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[ASSERT_ZEXT]], [[AND]](s64)
-    ; RV64I-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[ASSERT_ZEXT]], [[AND]](s64)
+    ; RV64I-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
+    ; RV64I-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[ASSERT_ZEXT]], [[C]](s64)
+    ; RV64I-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[ASSERT_ZEXT]], [[C]](s64)
     ; RV64I-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR]], [[SHL]]
-    ; RV64I-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[OR]], [[C1]]
-    ; RV64I-NEXT: $x10 = COPY [[AND1]](s64)
+    ; RV64I-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 65535
+    ; RV64I-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[OR]], [[C1]]
+    ; RV64I-NEXT: $x10 = COPY [[AND]](s64)
     ; RV64I-NEXT: PseudoRET implicit $x10
     ;
     ; RV64ZBB_OR_RV64ZBKB-LABEL: name: bswap_i16
@@ -55,20 +53,17 @@ body:             |
     ; RV64I-NEXT: {{  $}}
     ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
     ; RV64I-NEXT: [[ASSERT_ZEXT:%[0-9]+]]:_(s64) = G_ASSERT_ZEXT [[COPY]], 32
-    ; RV64I-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
-    ; RV64I-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[C]](s32)
-    ; RV64I-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[ASSERT_ZEXT]], [[ZEXT]](s64)
-    ; RV64I-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[ASSERT_ZEXT]], [[ZEXT]](s64)
+    ; RV64I-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
+    ; RV64I-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[ASSERT_ZEXT]], [[C]](s64)
+    ; RV64I-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[ASSERT_ZEXT]], [[C]](s64)
     ; RV64I-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR]], [[SHL]]
-    ; RV64I-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65280
-    ; RV64I-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
-    ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
-    ; RV64I-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[ASSERT_ZEXT]], [[ANYEXT]]
-    ; RV64I-NEXT: [[ZEXT1:%[0-9]+]]:_(s64) = G_ZEXT [[C2]](s32)
-    ; RV64I-NEXT: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[AND]], [[ZEXT1]](s64)
+    ; RV64I-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 65280
+    ; RV64I-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
+    ; RV64I-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[ASSERT_ZEXT]], [[C1]]
+    ; RV64I-NEXT: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[AND]], [[C2]](s64)
     ; RV64I-NEXT: [[OR1:%[0-9]+]]:_(s64) = G_OR [[OR]], [[SHL1]]
-    ; RV64I-NEXT: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[ASSERT_ZEXT]], [[ZEXT1]](s64)
-    ; RV64I-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[LSHR1]], [[ANYEXT]]
+    ; RV64I-NEXT: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[ASSERT_ZEXT]], [[C2]](s64)
+    ; RV64I-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[LSHR1]], [[C1]]
     ; RV64I-NEXT: [[OR2:%[0-9]+]]:_(s64) = G_OR [[OR1]], [[AND1]]
     ; RV64I-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4294967295
     ; RV64I-NEXT: [[AND2:%[0-9]+]]:_(s64) = G_AND [[OR2]], [[C3]]

diff  --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-const-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-const-rv64.mir
index 5e88d7726b9ef9..57fc513dc9e3ea 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-const-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-const-rv64.mir
@@ -6,9 +6,8 @@ name:            const_i8
 body:             |
   bb.0.entry:
     ; CHECK-LABEL: name: const_i8
-    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -127
-    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; CHECK-NEXT: $x10 = COPY [[ANYEXT]](s64)
+    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -127
+    ; CHECK-NEXT: $x10 = COPY [[C]](s64)
     ; CHECK-NEXT: PseudoRET implicit $x10
     %0:_(s8) = G_CONSTANT i8 129
     %1:_(s64) = G_ANYEXT %0(s8)
@@ -21,9 +20,8 @@ name:            const_i15
 body:             |
   bb.0.entry:
     ; CHECK-LABEL: name: const_i15
-    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 15
-    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; CHECK-NEXT: $x10 = COPY [[ANYEXT]](s64)
+    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 15
+    ; CHECK-NEXT: $x10 = COPY [[C]](s64)
     ; CHECK-NEXT: PseudoRET implicit $x10
     %0:_(s15) = G_CONSTANT i15 15
     %1:_(s64) = G_ANYEXT %0(s15)
@@ -36,9 +34,8 @@ name:            const_i16
 body:             |
   bb.0.entry:
     ; CHECK-LABEL: name: const_i16
-    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 767
-    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; CHECK-NEXT: $x10 = COPY [[ANYEXT]](s64)
+    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 767
+    ; CHECK-NEXT: $x10 = COPY [[C]](s64)
     ; CHECK-NEXT: PseudoRET implicit $x10
     %0:_(s16) = G_CONSTANT i16 -64769
     %1:_(s64) = G_ANYEXT %0(s16)
@@ -51,9 +48,8 @@ name:            const_i32
 body:             |
   bb.0.entry:
     ; CHECK-LABEL: name: const_i32
-    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -64769
-    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; CHECK-NEXT: $x10 = COPY [[ANYEXT]](s64)
+    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -64769
+    ; CHECK-NEXT: $x10 = COPY [[C]](s64)
     ; CHECK-NEXT: PseudoRET implicit $x10
     %0:_(s32) = G_CONSTANT i32 -64769
     %1:_(s64) = G_ANYEXT %0(s32)
@@ -146,10 +142,9 @@ name:            const_add_i32
 body:             |
   bb.0.entry:
     ; CHECK-LABEL: name: const_add_i32
-    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -64769
+    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -64769
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
-    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY]], [[ANYEXT]]
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY]], [[C]]
     ; CHECK-NEXT: $x10 = COPY [[ADD]](s64)
     ; CHECK-NEXT: PseudoRET implicit $x10
     %0:_(s32) = G_CONSTANT i32 -64769
@@ -166,10 +161,9 @@ name:            const_add_i8
 body:             |
   bb.0.entry:
     ; CHECK-LABEL: name: const_add_i8
-    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
-    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY]], [[ANYEXT]]
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY]], [[C]]
     ; CHECK-NEXT: $x10 = COPY [[ADD]](s64)
     ; CHECK-NEXT: PseudoRET implicit $x10
     %0:_(s8) = G_CONSTANT i8 1

diff  --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-constbarrier-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-constbarrier-rv64.mir
index 029205cc6bc224..dc5f062cb9eac4 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-constbarrier-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-constbarrier-rv64.mir
@@ -5,8 +5,9 @@ name:            constbarrier_i32
 body:             |
   bb.0.entry:
     ; CHECK-LABEL: name: constbarrier_i32
-    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16368
-    ; CHECK-NEXT: [[CONSTANT_FOLD_BARRIER:%[0-9]+]]:_(s32) = G_CONSTANT_FOLD_BARRIER [[C]]
+    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16368
+    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[C]](s64)
+    ; CHECK-NEXT: [[CONSTANT_FOLD_BARRIER:%[0-9]+]]:_(s32) = G_CONSTANT_FOLD_BARRIER [[TRUNC]]
     ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[CONSTANT_FOLD_BARRIER]](s32)
     ; CHECK-NEXT: $x10 = COPY [[ANYEXT]](s64)
     ; CHECK-NEXT: PseudoRET implicit $x10
@@ -37,8 +38,9 @@ name:            constbarrier_i16
 body:             |
   bb.0.entry:
     ; CHECK-LABEL: name: constbarrier_i16
-    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2048
-    ; CHECK-NEXT: [[CONSTANT_FOLD_BARRIER:%[0-9]+]]:_(s32) = G_CONSTANT_FOLD_BARRIER [[C]]
+    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2048
+    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[C]](s64)
+    ; CHECK-NEXT: [[CONSTANT_FOLD_BARRIER:%[0-9]+]]:_(s32) = G_CONSTANT_FOLD_BARRIER [[TRUNC]]
     ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[CONSTANT_FOLD_BARRIER]](s32)
     ; CHECK-NEXT: $x10 = COPY [[ANYEXT]](s64)
     ; CHECK-NEXT: PseudoRET implicit $x10
@@ -88,9 +90,8 @@ name:            constbarrier_nxv2i32
 body:             |
   bb.0.entry:
     ; CHECK-LABEL: name: constbarrier_nxv2i32
-    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SPLAT_VECTOR [[C]](s64)
     ; CHECK-NEXT: [[CONSTANT_FOLD_BARRIER:%[0-9]+]]:_(<vscale x 2 x s32>) = G_CONSTANT_FOLD_BARRIER [[SPLAT_VECTOR]]
     ; CHECK-NEXT: $v8 = COPY [[CONSTANT_FOLD_BARRIER]](<vscale x 2 x s32>)
     ; CHECK-NEXT: PseudoRET implicit $v8

diff  --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-ctlz-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-ctlz-rv64.mir
index f3f72b7b5668b4..6cc5477b85a4ed 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-ctlz-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-ctlz-rv64.mir
@@ -14,54 +14,39 @@ body:             |
     ; RV64I: liveins: $x10
     ; RV64I-NEXT: {{  $}}
     ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
-    ; RV64I-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+    ; RV64I-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
     ; RV64I-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 255
-    ; RV64I-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[ANYEXT]], [[C1]]
-    ; RV64I-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C1]]
-    ; RV64I-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND1]], [[AND]](s64)
+    ; RV64I-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C1]]
+    ; RV64I-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND]], [[C]](s64)
     ; RV64I-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[COPY]], [[LSHR]]
-    ; RV64I-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
-    ; RV64I-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C2]](s32)
-    ; RV64I-NEXT: [[AND2:%[0-9]+]]:_(s64) = G_AND [[ANYEXT1]], [[C1]]
-    ; RV64I-NEXT: [[AND3:%[0-9]+]]:_(s64) = G_AND [[OR]], [[C1]]
-    ; RV64I-NEXT: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[AND3]], [[AND2]](s64)
+    ; RV64I-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
+    ; RV64I-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[OR]], [[C1]]
+    ; RV64I-NEXT: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[AND1]], [[C2]](s64)
     ; RV64I-NEXT: [[OR1:%[0-9]+]]:_(s64) = G_OR [[OR]], [[LSHR1]]
-    ; RV64I-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
-    ; RV64I-NEXT: [[ANYEXT2:%[0-9]+]]:_(s64) = G_ANYEXT [[C3]](s32)
-    ; RV64I-NEXT: [[AND4:%[0-9]+]]:_(s64) = G_AND [[ANYEXT2]], [[C1]]
-    ; RV64I-NEXT: [[AND5:%[0-9]+]]:_(s64) = G_AND [[OR1]], [[C1]]
-    ; RV64I-NEXT: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[AND5]], [[AND4]](s64)
+    ; RV64I-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
+    ; RV64I-NEXT: [[AND2:%[0-9]+]]:_(s64) = G_AND [[OR1]], [[C1]]
+    ; RV64I-NEXT: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[AND2]], [[C3]](s64)
     ; RV64I-NEXT: [[OR2:%[0-9]+]]:_(s64) = G_OR [[OR1]], [[LSHR2]]
-    ; RV64I-NEXT: [[AND6:%[0-9]+]]:_(s64) = G_AND [[OR2]], [[C1]]
-    ; RV64I-NEXT: [[LSHR3:%[0-9]+]]:_(s64) = G_LSHR [[AND6]], [[AND]](s64)
-    ; RV64I-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 85
-    ; RV64I-NEXT: [[ANYEXT3:%[0-9]+]]:_(s64) = G_ANYEXT [[C4]](s32)
-    ; RV64I-NEXT: [[AND7:%[0-9]+]]:_(s64) = G_AND [[LSHR3]], [[ANYEXT3]]
-    ; RV64I-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[OR2]], [[AND7]]
-    ; RV64I-NEXT: [[AND8:%[0-9]+]]:_(s64) = G_AND [[SUB]], [[C1]]
-    ; RV64I-NEXT: [[LSHR4:%[0-9]+]]:_(s64) = G_LSHR [[AND8]], [[AND2]](s64)
-    ; RV64I-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 51
-    ; RV64I-NEXT: [[ANYEXT4:%[0-9]+]]:_(s64) = G_ANYEXT [[C5]](s32)
-    ; RV64I-NEXT: [[AND9:%[0-9]+]]:_(s64) = G_AND [[LSHR4]], [[ANYEXT4]]
-    ; RV64I-NEXT: [[AND10:%[0-9]+]]:_(s64) = G_AND [[SUB]], [[ANYEXT4]]
-    ; RV64I-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[AND9]], [[AND10]]
-    ; RV64I-NEXT: [[AND11:%[0-9]+]]:_(s64) = G_AND [[ADD]], [[C1]]
-    ; RV64I-NEXT: [[LSHR5:%[0-9]+]]:_(s64) = G_LSHR [[AND11]], [[AND4]](s64)
+    ; RV64I-NEXT: [[AND3:%[0-9]+]]:_(s64) = G_AND [[OR2]], [[C1]]
+    ; RV64I-NEXT: [[LSHR3:%[0-9]+]]:_(s64) = G_LSHR [[AND3]], [[C]](s64)
+    ; RV64I-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 85
+    ; RV64I-NEXT: [[AND4:%[0-9]+]]:_(s64) = G_AND [[LSHR3]], [[C4]]
+    ; RV64I-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[OR2]], [[AND4]]
+    ; RV64I-NEXT: [[AND5:%[0-9]+]]:_(s64) = G_AND [[SUB]], [[C1]]
+    ; RV64I-NEXT: [[LSHR4:%[0-9]+]]:_(s64) = G_LSHR [[AND5]], [[C2]](s64)
+    ; RV64I-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 51
+    ; RV64I-NEXT: [[AND6:%[0-9]+]]:_(s64) = G_AND [[LSHR4]], [[C5]]
+    ; RV64I-NEXT: [[AND7:%[0-9]+]]:_(s64) = G_AND [[SUB]], [[C5]]
+    ; RV64I-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[AND6]], [[AND7]]
+    ; RV64I-NEXT: [[LSHR5:%[0-9]+]]:_(s64) = G_LSHR [[ADD]], [[C3]](s64)
     ; RV64I-NEXT: [[ADD1:%[0-9]+]]:_(s64) = G_ADD [[LSHR5]], [[ADD]]
-    ; RV64I-NEXT: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 15
-    ; RV64I-NEXT: [[ANYEXT5:%[0-9]+]]:_(s64) = G_ANYEXT [[C6]](s32)
-    ; RV64I-NEXT: [[AND12:%[0-9]+]]:_(s64) = G_AND [[ADD1]], [[ANYEXT5]]
-    ; RV64I-NEXT: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY [[ANYEXT]](s64)
-    ; RV64I-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[AND12]], [[COPY1]]
-    ; RV64I-NEXT: [[ANYEXT6:%[0-9]+]]:_(s64) = G_ANYEXT [[C7]](s32)
-    ; RV64I-NEXT: [[AND13:%[0-9]+]]:_(s64) = G_AND [[ANYEXT6]], [[C1]]
-    ; RV64I-NEXT: [[AND14:%[0-9]+]]:_(s64) = G_AND [[MUL]], [[C1]]
-    ; RV64I-NEXT: [[LSHR6:%[0-9]+]]:_(s64) = G_LSHR [[AND14]], [[AND13]](s64)
-    ; RV64I-NEXT: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
-    ; RV64I-NEXT: [[ANYEXT7:%[0-9]+]]:_(s64) = G_ANYEXT [[C8]](s32)
-    ; RV64I-NEXT: [[SUB1:%[0-9]+]]:_(s64) = G_SUB [[ANYEXT7]], [[LSHR6]]
+    ; RV64I-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 15
+    ; RV64I-NEXT: [[AND8:%[0-9]+]]:_(s64) = G_AND [[ADD1]], [[C6]]
+    ; RV64I-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64I-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[AND8]], [[C]]
+    ; RV64I-NEXT: [[LSHR6:%[0-9]+]]:_(s64) = G_LSHR [[MUL]], [[C7]](s64)
+    ; RV64I-NEXT: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
+    ; RV64I-NEXT: [[SUB1:%[0-9]+]]:_(s64) = G_SUB [[C8]], [[LSHR6]]
     ; RV64I-NEXT: $x10 = COPY [[SUB1]](s64)
     ; RV64I-NEXT: PseudoRET implicit $x10
     ;
@@ -72,9 +57,8 @@ body:             |
     ; RV64ZBB-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 255
     ; RV64ZBB-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C]]
     ; RV64ZBB-NEXT: [[CLZW:%[0-9]+]]:_(s64) = G_CLZW [[AND]]
-    ; RV64ZBB-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
-    ; RV64ZBB-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
-    ; RV64ZBB-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[CLZW]], [[ANYEXT]]
+    ; RV64ZBB-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
+    ; RV64ZBB-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[CLZW]], [[C1]]
     ; RV64ZBB-NEXT: $x10 = COPY [[SUB]](s64)
     ; RV64ZBB-NEXT: PseudoRET implicit $x10
     %1:_(s64) = COPY $x10
@@ -95,58 +79,44 @@ body:             |
     ; RV64I: liveins: $x10
     ; RV64I-NEXT: {{  $}}
     ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
-    ; RV64I-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+    ; RV64I-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
     ; RV64I-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 65535
-    ; RV64I-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[ANYEXT]], [[C1]]
-    ; RV64I-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C1]]
-    ; RV64I-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND1]], [[AND]](s64)
+    ; RV64I-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C1]]
+    ; RV64I-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND]], [[C]](s64)
     ; RV64I-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[COPY]], [[LSHR]]
-    ; RV64I-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
-    ; RV64I-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C2]](s32)
-    ; RV64I-NEXT: [[AND2:%[0-9]+]]:_(s64) = G_AND [[ANYEXT1]], [[C1]]
-    ; RV64I-NEXT: [[AND3:%[0-9]+]]:_(s64) = G_AND [[OR]], [[C1]]
-    ; RV64I-NEXT: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[AND3]], [[AND2]](s64)
+    ; RV64I-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
+    ; RV64I-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[OR]], [[C1]]
+    ; RV64I-NEXT: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[AND1]], [[C2]](s64)
     ; RV64I-NEXT: [[OR1:%[0-9]+]]:_(s64) = G_OR [[OR]], [[LSHR1]]
-    ; RV64I-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
-    ; RV64I-NEXT: [[ANYEXT2:%[0-9]+]]:_(s64) = G_ANYEXT [[C3]](s32)
-    ; RV64I-NEXT: [[AND4:%[0-9]+]]:_(s64) = G_AND [[ANYEXT2]], [[C1]]
-    ; RV64I-NEXT: [[AND5:%[0-9]+]]:_(s64) = G_AND [[OR1]], [[C1]]
-    ; RV64I-NEXT: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[AND5]], [[AND4]](s64)
+    ; RV64I-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
+    ; RV64I-NEXT: [[AND2:%[0-9]+]]:_(s64) = G_AND [[OR1]], [[C1]]
+    ; RV64I-NEXT: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[AND2]], [[C3]](s64)
     ; RV64I-NEXT: [[OR2:%[0-9]+]]:_(s64) = G_OR [[OR1]], [[LSHR2]]
-    ; RV64I-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
-    ; RV64I-NEXT: [[ANYEXT3:%[0-9]+]]:_(s64) = G_ANYEXT [[C4]](s32)
-    ; RV64I-NEXT: [[AND6:%[0-9]+]]:_(s64) = G_AND [[ANYEXT3]], [[C1]]
-    ; RV64I-NEXT: [[AND7:%[0-9]+]]:_(s64) = G_AND [[OR2]], [[C1]]
-    ; RV64I-NEXT: [[LSHR3:%[0-9]+]]:_(s64) = G_LSHR [[AND7]], [[AND6]](s64)
+    ; RV64I-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
+    ; RV64I-NEXT: [[AND3:%[0-9]+]]:_(s64) = G_AND [[OR2]], [[C1]]
+    ; RV64I-NEXT: [[LSHR3:%[0-9]+]]:_(s64) = G_LSHR [[AND3]], [[C4]](s64)
     ; RV64I-NEXT: [[OR3:%[0-9]+]]:_(s64) = G_OR [[OR2]], [[LSHR3]]
-    ; RV64I-NEXT: [[AND8:%[0-9]+]]:_(s64) = G_AND [[OR3]], [[C1]]
-    ; RV64I-NEXT: [[LSHR4:%[0-9]+]]:_(s64) = G_LSHR [[AND8]], [[AND]](s64)
-    ; RV64I-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 21845
-    ; RV64I-NEXT: [[ANYEXT4:%[0-9]+]]:_(s64) = G_ANYEXT [[C5]](s32)
-    ; RV64I-NEXT: [[AND9:%[0-9]+]]:_(s64) = G_AND [[LSHR4]], [[ANYEXT4]]
-    ; RV64I-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[OR3]], [[AND9]]
-    ; RV64I-NEXT: [[AND10:%[0-9]+]]:_(s64) = G_AND [[SUB]], [[C1]]
-    ; RV64I-NEXT: [[LSHR5:%[0-9]+]]:_(s64) = G_LSHR [[AND10]], [[AND2]](s64)
-    ; RV64I-NEXT: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 13107
-    ; RV64I-NEXT: [[ANYEXT5:%[0-9]+]]:_(s64) = G_ANYEXT [[C6]](s32)
-    ; RV64I-NEXT: [[AND11:%[0-9]+]]:_(s64) = G_AND [[LSHR5]], [[ANYEXT5]]
-    ; RV64I-NEXT: [[AND12:%[0-9]+]]:_(s64) = G_AND [[SUB]], [[ANYEXT5]]
-    ; RV64I-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[AND11]], [[AND12]]
-    ; RV64I-NEXT: [[AND13:%[0-9]+]]:_(s64) = G_AND [[ADD]], [[C1]]
-    ; RV64I-NEXT: [[LSHR6:%[0-9]+]]:_(s64) = G_LSHR [[AND13]], [[AND4]](s64)
+    ; RV64I-NEXT: [[AND4:%[0-9]+]]:_(s64) = G_AND [[OR3]], [[C1]]
+    ; RV64I-NEXT: [[LSHR4:%[0-9]+]]:_(s64) = G_LSHR [[AND4]], [[C]](s64)
+    ; RV64I-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 21845
+    ; RV64I-NEXT: [[AND5:%[0-9]+]]:_(s64) = G_AND [[LSHR4]], [[C5]]
+    ; RV64I-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[OR3]], [[AND5]]
+    ; RV64I-NEXT: [[AND6:%[0-9]+]]:_(s64) = G_AND [[SUB]], [[C1]]
+    ; RV64I-NEXT: [[LSHR5:%[0-9]+]]:_(s64) = G_LSHR [[AND6]], [[C2]](s64)
+    ; RV64I-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 13107
+    ; RV64I-NEXT: [[AND7:%[0-9]+]]:_(s64) = G_AND [[LSHR5]], [[C6]]
+    ; RV64I-NEXT: [[AND8:%[0-9]+]]:_(s64) = G_AND [[SUB]], [[C6]]
+    ; RV64I-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[AND7]], [[AND8]]
+    ; RV64I-NEXT: [[LSHR6:%[0-9]+]]:_(s64) = G_LSHR [[ADD]], [[C3]](s64)
     ; RV64I-NEXT: [[ADD1:%[0-9]+]]:_(s64) = G_ADD [[LSHR6]], [[ADD]]
-    ; RV64I-NEXT: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 3855
-    ; RV64I-NEXT: [[ANYEXT6:%[0-9]+]]:_(s64) = G_ANYEXT [[C7]](s32)
-    ; RV64I-NEXT: [[AND14:%[0-9]+]]:_(s64) = G_AND [[ADD1]], [[ANYEXT6]]
-    ; RV64I-NEXT: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 257
-    ; RV64I-NEXT: [[ANYEXT7:%[0-9]+]]:_(s64) = G_ANYEXT [[C8]](s32)
-    ; RV64I-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[AND14]], [[ANYEXT7]]
-    ; RV64I-NEXT: [[AND15:%[0-9]+]]:_(s64) = G_AND [[MUL]], [[C1]]
-    ; RV64I-NEXT: [[LSHR7:%[0-9]+]]:_(s64) = G_LSHR [[AND15]], [[AND6]](s64)
-    ; RV64I-NEXT: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
-    ; RV64I-NEXT: [[ANYEXT8:%[0-9]+]]:_(s64) = G_ANYEXT [[C9]](s32)
-    ; RV64I-NEXT: [[SUB1:%[0-9]+]]:_(s64) = G_SUB [[ANYEXT8]], [[LSHR7]]
+    ; RV64I-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 3855
+    ; RV64I-NEXT: [[AND9:%[0-9]+]]:_(s64) = G_AND [[ADD1]], [[C7]]
+    ; RV64I-NEXT: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 257
+    ; RV64I-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[AND9]], [[C8]]
+    ; RV64I-NEXT: [[AND10:%[0-9]+]]:_(s64) = G_AND [[MUL]], [[C1]]
+    ; RV64I-NEXT: [[LSHR7:%[0-9]+]]:_(s64) = G_LSHR [[AND10]], [[C4]](s64)
+    ; RV64I-NEXT: [[C9:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
+    ; RV64I-NEXT: [[SUB1:%[0-9]+]]:_(s64) = G_SUB [[C9]], [[LSHR7]]
     ; RV64I-NEXT: $x10 = COPY [[SUB1]](s64)
     ; RV64I-NEXT: PseudoRET implicit $x10
     ;
@@ -157,9 +127,8 @@ body:             |
     ; RV64ZBB-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 65535
     ; RV64ZBB-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C]]
     ; RV64ZBB-NEXT: [[CLZW:%[0-9]+]]:_(s64) = G_CLZW [[AND]]
-    ; RV64ZBB-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
-    ; RV64ZBB-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
-    ; RV64ZBB-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[CLZW]], [[ANYEXT]]
+    ; RV64ZBB-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
+    ; RV64ZBB-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[CLZW]], [[C1]]
     ; RV64ZBB-NEXT: $x10 = COPY [[SUB]](s64)
     ; RV64ZBB-NEXT: PseudoRET implicit $x10
     %1:_(s64) = COPY $x10
@@ -180,61 +149,49 @@ body:             |
     ; RV64I: liveins: $x10
     ; RV64I-NEXT: {{  $}}
     ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
-    ; RV64I-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; RV64I-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
     ; RV64I-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4294967295
     ; RV64I-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C1]]
-    ; RV64I-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[C]](s32)
-    ; RV64I-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND]], [[ZEXT]](s64)
+    ; RV64I-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND]], [[C]](s64)
     ; RV64I-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[COPY]], [[LSHR]]
-    ; RV64I-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
+    ; RV64I-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
     ; RV64I-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[OR]], [[C1]]
-    ; RV64I-NEXT: [[ZEXT1:%[0-9]+]]:_(s64) = G_ZEXT [[C2]](s32)
-    ; RV64I-NEXT: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[AND1]], [[ZEXT1]](s64)
+    ; RV64I-NEXT: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[AND1]], [[C2]](s64)
     ; RV64I-NEXT: [[OR1:%[0-9]+]]:_(s64) = G_OR [[OR]], [[LSHR1]]
-    ; RV64I-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+    ; RV64I-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
     ; RV64I-NEXT: [[AND2:%[0-9]+]]:_(s64) = G_AND [[OR1]], [[C1]]
-    ; RV64I-NEXT: [[ZEXT2:%[0-9]+]]:_(s64) = G_ZEXT [[C3]](s32)
-    ; RV64I-NEXT: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[AND2]], [[ZEXT2]](s64)
+    ; RV64I-NEXT: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[AND2]], [[C3]](s64)
     ; RV64I-NEXT: [[OR2:%[0-9]+]]:_(s64) = G_OR [[OR1]], [[LSHR2]]
-    ; RV64I-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
+    ; RV64I-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
     ; RV64I-NEXT: [[AND3:%[0-9]+]]:_(s64) = G_AND [[OR2]], [[C1]]
-    ; RV64I-NEXT: [[ZEXT3:%[0-9]+]]:_(s64) = G_ZEXT [[C4]](s32)
-    ; RV64I-NEXT: [[LSHR3:%[0-9]+]]:_(s64) = G_LSHR [[AND3]], [[ZEXT3]](s64)
+    ; RV64I-NEXT: [[LSHR3:%[0-9]+]]:_(s64) = G_LSHR [[AND3]], [[C4]](s64)
     ; RV64I-NEXT: [[OR3:%[0-9]+]]:_(s64) = G_OR [[OR2]], [[LSHR3]]
-    ; RV64I-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+    ; RV64I-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
     ; RV64I-NEXT: [[AND4:%[0-9]+]]:_(s64) = G_AND [[OR3]], [[C1]]
-    ; RV64I-NEXT: [[ZEXT4:%[0-9]+]]:_(s64) = G_ZEXT [[C5]](s32)
-    ; RV64I-NEXT: [[LSHR4:%[0-9]+]]:_(s64) = G_LSHR [[AND4]], [[ZEXT4]](s64)
+    ; RV64I-NEXT: [[LSHR4:%[0-9]+]]:_(s64) = G_LSHR [[AND4]], [[C5]](s64)
     ; RV64I-NEXT: [[OR4:%[0-9]+]]:_(s64) = G_OR [[OR3]], [[LSHR4]]
     ; RV64I-NEXT: [[AND5:%[0-9]+]]:_(s64) = G_AND [[OR4]], [[C1]]
-    ; RV64I-NEXT: [[LSHR5:%[0-9]+]]:_(s64) = G_LSHR [[AND5]], [[ZEXT]](s64)
-    ; RV64I-NEXT: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 1431655765
-    ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C6]](s32)
-    ; RV64I-NEXT: [[AND6:%[0-9]+]]:_(s64) = G_AND [[LSHR5]], [[ANYEXT]]
+    ; RV64I-NEXT: [[LSHR5:%[0-9]+]]:_(s64) = G_LSHR [[AND5]], [[C]](s64)
+    ; RV64I-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 1431655765
+    ; RV64I-NEXT: [[AND6:%[0-9]+]]:_(s64) = G_AND [[LSHR5]], [[C6]]
     ; RV64I-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[OR4]], [[AND6]]
     ; RV64I-NEXT: [[AND7:%[0-9]+]]:_(s64) = G_AND [[SUB]], [[C1]]
-    ; RV64I-NEXT: [[LSHR6:%[0-9]+]]:_(s64) = G_LSHR [[AND7]], [[ZEXT1]](s64)
-    ; RV64I-NEXT: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 858993459
-    ; RV64I-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C7]](s32)
-    ; RV64I-NEXT: [[AND8:%[0-9]+]]:_(s64) = G_AND [[LSHR6]], [[ANYEXT1]]
-    ; RV64I-NEXT: [[AND9:%[0-9]+]]:_(s64) = G_AND [[SUB]], [[ANYEXT1]]
+    ; RV64I-NEXT: [[LSHR6:%[0-9]+]]:_(s64) = G_LSHR [[AND7]], [[C2]](s64)
+    ; RV64I-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 858993459
+    ; RV64I-NEXT: [[AND8:%[0-9]+]]:_(s64) = G_AND [[LSHR6]], [[C7]]
+    ; RV64I-NEXT: [[AND9:%[0-9]+]]:_(s64) = G_AND [[SUB]], [[C7]]
     ; RV64I-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[AND8]], [[AND9]]
-    ; RV64I-NEXT: [[AND10:%[0-9]+]]:_(s64) = G_AND [[ADD]], [[C1]]
-    ; RV64I-NEXT: [[LSHR7:%[0-9]+]]:_(s64) = G_LSHR [[AND10]], [[ZEXT2]](s64)
+    ; RV64I-NEXT: [[LSHR7:%[0-9]+]]:_(s64) = G_LSHR [[ADD]], [[C3]](s64)
     ; RV64I-NEXT: [[ADD1:%[0-9]+]]:_(s64) = G_ADD [[LSHR7]], [[ADD]]
-    ; RV64I-NEXT: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 252645135
-    ; RV64I-NEXT: [[ANYEXT2:%[0-9]+]]:_(s64) = G_ANYEXT [[C8]](s32)
-    ; RV64I-NEXT: [[AND11:%[0-9]+]]:_(s64) = G_AND [[ADD1]], [[ANYEXT2]]
-    ; RV64I-NEXT: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 16843009
-    ; RV64I-NEXT: [[C10:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
-    ; RV64I-NEXT: [[ANYEXT3:%[0-9]+]]:_(s64) = G_ANYEXT [[C9]](s32)
-    ; RV64I-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[AND11]], [[ANYEXT3]]
-    ; RV64I-NEXT: [[AND12:%[0-9]+]]:_(s64) = G_AND [[MUL]], [[C1]]
-    ; RV64I-NEXT: [[ZEXT5:%[0-9]+]]:_(s64) = G_ZEXT [[C10]](s32)
-    ; RV64I-NEXT: [[LSHR8:%[0-9]+]]:_(s64) = G_LSHR [[AND12]], [[ZEXT5]](s64)
-    ; RV64I-NEXT: [[C11:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
-    ; RV64I-NEXT: [[ANYEXT4:%[0-9]+]]:_(s64) = G_ANYEXT [[C11]](s32)
-    ; RV64I-NEXT: [[SUB1:%[0-9]+]]:_(s64) = G_SUB [[ANYEXT4]], [[LSHR8]]
+    ; RV64I-NEXT: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 252645135
+    ; RV64I-NEXT: [[AND10:%[0-9]+]]:_(s64) = G_AND [[ADD1]], [[C8]]
+    ; RV64I-NEXT: [[C9:%[0-9]+]]:_(s64) = G_CONSTANT i64 16843009
+    ; RV64I-NEXT: [[C10:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
+    ; RV64I-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[AND10]], [[C9]]
+    ; RV64I-NEXT: [[AND11:%[0-9]+]]:_(s64) = G_AND [[MUL]], [[C1]]
+    ; RV64I-NEXT: [[LSHR8:%[0-9]+]]:_(s64) = G_LSHR [[AND11]], [[C10]](s64)
+    ; RV64I-NEXT: [[C11:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
+    ; RV64I-NEXT: [[SUB1:%[0-9]+]]:_(s64) = G_SUB [[C11]], [[LSHR8]]
     ; RV64I-NEXT: $x10 = COPY [[SUB1]](s64)
     ; RV64I-NEXT: PseudoRET implicit $x10
     ;
@@ -326,54 +283,39 @@ body:             |
     ; RV64I: liveins: $x10
     ; RV64I-NEXT: {{  $}}
     ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
-    ; RV64I-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+    ; RV64I-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
     ; RV64I-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 255
-    ; RV64I-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[ANYEXT]], [[C1]]
-    ; RV64I-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C1]]
-    ; RV64I-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND1]], [[AND]](s64)
+    ; RV64I-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C1]]
+    ; RV64I-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND]], [[C]](s64)
     ; RV64I-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[COPY]], [[LSHR]]
-    ; RV64I-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
-    ; RV64I-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C2]](s32)
-    ; RV64I-NEXT: [[AND2:%[0-9]+]]:_(s64) = G_AND [[ANYEXT1]], [[C1]]
-    ; RV64I-NEXT: [[AND3:%[0-9]+]]:_(s64) = G_AND [[OR]], [[C1]]
-    ; RV64I-NEXT: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[AND3]], [[AND2]](s64)
+    ; RV64I-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
+    ; RV64I-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[OR]], [[C1]]
+    ; RV64I-NEXT: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[AND1]], [[C2]](s64)
     ; RV64I-NEXT: [[OR1:%[0-9]+]]:_(s64) = G_OR [[OR]], [[LSHR1]]
-    ; RV64I-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
-    ; RV64I-NEXT: [[ANYEXT2:%[0-9]+]]:_(s64) = G_ANYEXT [[C3]](s32)
-    ; RV64I-NEXT: [[AND4:%[0-9]+]]:_(s64) = G_AND [[ANYEXT2]], [[C1]]
-    ; RV64I-NEXT: [[AND5:%[0-9]+]]:_(s64) = G_AND [[OR1]], [[C1]]
-    ; RV64I-NEXT: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[AND5]], [[AND4]](s64)
+    ; RV64I-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
+    ; RV64I-NEXT: [[AND2:%[0-9]+]]:_(s64) = G_AND [[OR1]], [[C1]]
+    ; RV64I-NEXT: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[AND2]], [[C3]](s64)
     ; RV64I-NEXT: [[OR2:%[0-9]+]]:_(s64) = G_OR [[OR1]], [[LSHR2]]
-    ; RV64I-NEXT: [[AND6:%[0-9]+]]:_(s64) = G_AND [[OR2]], [[C1]]
-    ; RV64I-NEXT: [[LSHR3:%[0-9]+]]:_(s64) = G_LSHR [[AND6]], [[AND]](s64)
-    ; RV64I-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 85
-    ; RV64I-NEXT: [[ANYEXT3:%[0-9]+]]:_(s64) = G_ANYEXT [[C4]](s32)
-    ; RV64I-NEXT: [[AND7:%[0-9]+]]:_(s64) = G_AND [[LSHR3]], [[ANYEXT3]]
-    ; RV64I-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[OR2]], [[AND7]]
-    ; RV64I-NEXT: [[AND8:%[0-9]+]]:_(s64) = G_AND [[SUB]], [[C1]]
-    ; RV64I-NEXT: [[LSHR4:%[0-9]+]]:_(s64) = G_LSHR [[AND8]], [[AND2]](s64)
-    ; RV64I-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 51
-    ; RV64I-NEXT: [[ANYEXT4:%[0-9]+]]:_(s64) = G_ANYEXT [[C5]](s32)
-    ; RV64I-NEXT: [[AND9:%[0-9]+]]:_(s64) = G_AND [[LSHR4]], [[ANYEXT4]]
-    ; RV64I-NEXT: [[AND10:%[0-9]+]]:_(s64) = G_AND [[SUB]], [[ANYEXT4]]
-    ; RV64I-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[AND9]], [[AND10]]
-    ; RV64I-NEXT: [[AND11:%[0-9]+]]:_(s64) = G_AND [[ADD]], [[C1]]
-    ; RV64I-NEXT: [[LSHR5:%[0-9]+]]:_(s64) = G_LSHR [[AND11]], [[AND4]](s64)
+    ; RV64I-NEXT: [[AND3:%[0-9]+]]:_(s64) = G_AND [[OR2]], [[C1]]
+    ; RV64I-NEXT: [[LSHR3:%[0-9]+]]:_(s64) = G_LSHR [[AND3]], [[C]](s64)
+    ; RV64I-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 85
+    ; RV64I-NEXT: [[AND4:%[0-9]+]]:_(s64) = G_AND [[LSHR3]], [[C4]]
+    ; RV64I-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[OR2]], [[AND4]]
+    ; RV64I-NEXT: [[AND5:%[0-9]+]]:_(s64) = G_AND [[SUB]], [[C1]]
+    ; RV64I-NEXT: [[LSHR4:%[0-9]+]]:_(s64) = G_LSHR [[AND5]], [[C2]](s64)
+    ; RV64I-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 51
+    ; RV64I-NEXT: [[AND6:%[0-9]+]]:_(s64) = G_AND [[LSHR4]], [[C5]]
+    ; RV64I-NEXT: [[AND7:%[0-9]+]]:_(s64) = G_AND [[SUB]], [[C5]]
+    ; RV64I-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[AND6]], [[AND7]]
+    ; RV64I-NEXT: [[LSHR5:%[0-9]+]]:_(s64) = G_LSHR [[ADD]], [[C3]](s64)
     ; RV64I-NEXT: [[ADD1:%[0-9]+]]:_(s64) = G_ADD [[LSHR5]], [[ADD]]
-    ; RV64I-NEXT: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 15
-    ; RV64I-NEXT: [[ANYEXT5:%[0-9]+]]:_(s64) = G_ANYEXT [[C6]](s32)
-    ; RV64I-NEXT: [[AND12:%[0-9]+]]:_(s64) = G_AND [[ADD1]], [[ANYEXT5]]
-    ; RV64I-NEXT: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY [[ANYEXT]](s64)
-    ; RV64I-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[AND12]], [[COPY1]]
-    ; RV64I-NEXT: [[ANYEXT6:%[0-9]+]]:_(s64) = G_ANYEXT [[C7]](s32)
-    ; RV64I-NEXT: [[AND13:%[0-9]+]]:_(s64) = G_AND [[ANYEXT6]], [[C1]]
-    ; RV64I-NEXT: [[AND14:%[0-9]+]]:_(s64) = G_AND [[MUL]], [[C1]]
-    ; RV64I-NEXT: [[LSHR6:%[0-9]+]]:_(s64) = G_LSHR [[AND14]], [[AND13]](s64)
-    ; RV64I-NEXT: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
-    ; RV64I-NEXT: [[ANYEXT7:%[0-9]+]]:_(s64) = G_ANYEXT [[C8]](s32)
-    ; RV64I-NEXT: [[SUB1:%[0-9]+]]:_(s64) = G_SUB [[ANYEXT7]], [[LSHR6]]
+    ; RV64I-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 15
+    ; RV64I-NEXT: [[AND8:%[0-9]+]]:_(s64) = G_AND [[ADD1]], [[C6]]
+    ; RV64I-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64I-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[AND8]], [[C]]
+    ; RV64I-NEXT: [[LSHR6:%[0-9]+]]:_(s64) = G_LSHR [[MUL]], [[C7]](s64)
+    ; RV64I-NEXT: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
+    ; RV64I-NEXT: [[SUB1:%[0-9]+]]:_(s64) = G_SUB [[C8]], [[LSHR6]]
     ; RV64I-NEXT: $x10 = COPY [[SUB1]](s64)
     ; RV64I-NEXT: PseudoRET implicit $x10
     ;
@@ -384,9 +326,8 @@ body:             |
     ; RV64ZBB-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 255
     ; RV64ZBB-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C]]
     ; RV64ZBB-NEXT: [[CLZW:%[0-9]+]]:_(s64) = G_CLZW [[AND]]
-    ; RV64ZBB-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
-    ; RV64ZBB-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
-    ; RV64ZBB-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[CLZW]], [[ANYEXT]]
+    ; RV64ZBB-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
+    ; RV64ZBB-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[CLZW]], [[C1]]
     ; RV64ZBB-NEXT: $x10 = COPY [[SUB]](s64)
     ; RV64ZBB-NEXT: PseudoRET implicit $x10
     %1:_(s64) = COPY $x10
@@ -407,58 +348,44 @@ body:             |
     ; RV64I: liveins: $x10
     ; RV64I-NEXT: {{  $}}
     ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
-    ; RV64I-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+    ; RV64I-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
     ; RV64I-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 65535
-    ; RV64I-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[ANYEXT]], [[C1]]
-    ; RV64I-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C1]]
-    ; RV64I-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND1]], [[AND]](s64)
+    ; RV64I-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C1]]
+    ; RV64I-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND]], [[C]](s64)
     ; RV64I-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[COPY]], [[LSHR]]
-    ; RV64I-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
-    ; RV64I-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C2]](s32)
-    ; RV64I-NEXT: [[AND2:%[0-9]+]]:_(s64) = G_AND [[ANYEXT1]], [[C1]]
-    ; RV64I-NEXT: [[AND3:%[0-9]+]]:_(s64) = G_AND [[OR]], [[C1]]
-    ; RV64I-NEXT: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[AND3]], [[AND2]](s64)
+    ; RV64I-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
+    ; RV64I-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[OR]], [[C1]]
+    ; RV64I-NEXT: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[AND1]], [[C2]](s64)
     ; RV64I-NEXT: [[OR1:%[0-9]+]]:_(s64) = G_OR [[OR]], [[LSHR1]]
-    ; RV64I-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
-    ; RV64I-NEXT: [[ANYEXT2:%[0-9]+]]:_(s64) = G_ANYEXT [[C3]](s32)
-    ; RV64I-NEXT: [[AND4:%[0-9]+]]:_(s64) = G_AND [[ANYEXT2]], [[C1]]
-    ; RV64I-NEXT: [[AND5:%[0-9]+]]:_(s64) = G_AND [[OR1]], [[C1]]
-    ; RV64I-NEXT: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[AND5]], [[AND4]](s64)
+    ; RV64I-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
+    ; RV64I-NEXT: [[AND2:%[0-9]+]]:_(s64) = G_AND [[OR1]], [[C1]]
+    ; RV64I-NEXT: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[AND2]], [[C3]](s64)
     ; RV64I-NEXT: [[OR2:%[0-9]+]]:_(s64) = G_OR [[OR1]], [[LSHR2]]
-    ; RV64I-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
-    ; RV64I-NEXT: [[ANYEXT3:%[0-9]+]]:_(s64) = G_ANYEXT [[C4]](s32)
-    ; RV64I-NEXT: [[AND6:%[0-9]+]]:_(s64) = G_AND [[ANYEXT3]], [[C1]]
-    ; RV64I-NEXT: [[AND7:%[0-9]+]]:_(s64) = G_AND [[OR2]], [[C1]]
-    ; RV64I-NEXT: [[LSHR3:%[0-9]+]]:_(s64) = G_LSHR [[AND7]], [[AND6]](s64)
+    ; RV64I-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
+    ; RV64I-NEXT: [[AND3:%[0-9]+]]:_(s64) = G_AND [[OR2]], [[C1]]
+    ; RV64I-NEXT: [[LSHR3:%[0-9]+]]:_(s64) = G_LSHR [[AND3]], [[C4]](s64)
     ; RV64I-NEXT: [[OR3:%[0-9]+]]:_(s64) = G_OR [[OR2]], [[LSHR3]]
-    ; RV64I-NEXT: [[AND8:%[0-9]+]]:_(s64) = G_AND [[OR3]], [[C1]]
-    ; RV64I-NEXT: [[LSHR4:%[0-9]+]]:_(s64) = G_LSHR [[AND8]], [[AND]](s64)
-    ; RV64I-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 21845
-    ; RV64I-NEXT: [[ANYEXT4:%[0-9]+]]:_(s64) = G_ANYEXT [[C5]](s32)
-    ; RV64I-NEXT: [[AND9:%[0-9]+]]:_(s64) = G_AND [[LSHR4]], [[ANYEXT4]]
-    ; RV64I-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[OR3]], [[AND9]]
-    ; RV64I-NEXT: [[AND10:%[0-9]+]]:_(s64) = G_AND [[SUB]], [[C1]]
-    ; RV64I-NEXT: [[LSHR5:%[0-9]+]]:_(s64) = G_LSHR [[AND10]], [[AND2]](s64)
-    ; RV64I-NEXT: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 13107
-    ; RV64I-NEXT: [[ANYEXT5:%[0-9]+]]:_(s64) = G_ANYEXT [[C6]](s32)
-    ; RV64I-NEXT: [[AND11:%[0-9]+]]:_(s64) = G_AND [[LSHR5]], [[ANYEXT5]]
-    ; RV64I-NEXT: [[AND12:%[0-9]+]]:_(s64) = G_AND [[SUB]], [[ANYEXT5]]
-    ; RV64I-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[AND11]], [[AND12]]
-    ; RV64I-NEXT: [[AND13:%[0-9]+]]:_(s64) = G_AND [[ADD]], [[C1]]
-    ; RV64I-NEXT: [[LSHR6:%[0-9]+]]:_(s64) = G_LSHR [[AND13]], [[AND4]](s64)
+    ; RV64I-NEXT: [[AND4:%[0-9]+]]:_(s64) = G_AND [[OR3]], [[C1]]
+    ; RV64I-NEXT: [[LSHR4:%[0-9]+]]:_(s64) = G_LSHR [[AND4]], [[C]](s64)
+    ; RV64I-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 21845
+    ; RV64I-NEXT: [[AND5:%[0-9]+]]:_(s64) = G_AND [[LSHR4]], [[C5]]
+    ; RV64I-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[OR3]], [[AND5]]
+    ; RV64I-NEXT: [[AND6:%[0-9]+]]:_(s64) = G_AND [[SUB]], [[C1]]
+    ; RV64I-NEXT: [[LSHR5:%[0-9]+]]:_(s64) = G_LSHR [[AND6]], [[C2]](s64)
+    ; RV64I-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 13107
+    ; RV64I-NEXT: [[AND7:%[0-9]+]]:_(s64) = G_AND [[LSHR5]], [[C6]]
+    ; RV64I-NEXT: [[AND8:%[0-9]+]]:_(s64) = G_AND [[SUB]], [[C6]]
+    ; RV64I-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[AND7]], [[AND8]]
+    ; RV64I-NEXT: [[LSHR6:%[0-9]+]]:_(s64) = G_LSHR [[ADD]], [[C3]](s64)
     ; RV64I-NEXT: [[ADD1:%[0-9]+]]:_(s64) = G_ADD [[LSHR6]], [[ADD]]
-    ; RV64I-NEXT: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 3855
-    ; RV64I-NEXT: [[ANYEXT6:%[0-9]+]]:_(s64) = G_ANYEXT [[C7]](s32)
-    ; RV64I-NEXT: [[AND14:%[0-9]+]]:_(s64) = G_AND [[ADD1]], [[ANYEXT6]]
-    ; RV64I-NEXT: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 257
-    ; RV64I-NEXT: [[ANYEXT7:%[0-9]+]]:_(s64) = G_ANYEXT [[C8]](s32)
-    ; RV64I-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[AND14]], [[ANYEXT7]]
-    ; RV64I-NEXT: [[AND15:%[0-9]+]]:_(s64) = G_AND [[MUL]], [[C1]]
-    ; RV64I-NEXT: [[LSHR7:%[0-9]+]]:_(s64) = G_LSHR [[AND15]], [[AND6]](s64)
-    ; RV64I-NEXT: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
-    ; RV64I-NEXT: [[ANYEXT8:%[0-9]+]]:_(s64) = G_ANYEXT [[C9]](s32)
-    ; RV64I-NEXT: [[SUB1:%[0-9]+]]:_(s64) = G_SUB [[ANYEXT8]], [[LSHR7]]
+    ; RV64I-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 3855
+    ; RV64I-NEXT: [[AND9:%[0-9]+]]:_(s64) = G_AND [[ADD1]], [[C7]]
+    ; RV64I-NEXT: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 257
+    ; RV64I-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[AND9]], [[C8]]
+    ; RV64I-NEXT: [[AND10:%[0-9]+]]:_(s64) = G_AND [[MUL]], [[C1]]
+    ; RV64I-NEXT: [[LSHR7:%[0-9]+]]:_(s64) = G_LSHR [[AND10]], [[C4]](s64)
+    ; RV64I-NEXT: [[C9:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
+    ; RV64I-NEXT: [[SUB1:%[0-9]+]]:_(s64) = G_SUB [[C9]], [[LSHR7]]
     ; RV64I-NEXT: $x10 = COPY [[SUB1]](s64)
     ; RV64I-NEXT: PseudoRET implicit $x10
     ;
@@ -469,9 +396,8 @@ body:             |
     ; RV64ZBB-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 65535
     ; RV64ZBB-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C]]
     ; RV64ZBB-NEXT: [[CLZW:%[0-9]+]]:_(s64) = G_CLZW [[AND]]
-    ; RV64ZBB-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
-    ; RV64ZBB-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
-    ; RV64ZBB-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[CLZW]], [[ANYEXT]]
+    ; RV64ZBB-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
+    ; RV64ZBB-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[CLZW]], [[C1]]
     ; RV64ZBB-NEXT: $x10 = COPY [[SUB]](s64)
     ; RV64ZBB-NEXT: PseudoRET implicit $x10
     %1:_(s64) = COPY $x10
@@ -492,61 +418,49 @@ body:             |
     ; RV64I: liveins: $x10
     ; RV64I-NEXT: {{  $}}
     ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
-    ; RV64I-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; RV64I-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
     ; RV64I-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4294967295
     ; RV64I-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C1]]
-    ; RV64I-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[C]](s32)
-    ; RV64I-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND]], [[ZEXT]](s64)
+    ; RV64I-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND]], [[C]](s64)
     ; RV64I-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[COPY]], [[LSHR]]
-    ; RV64I-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
+    ; RV64I-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
     ; RV64I-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[OR]], [[C1]]
-    ; RV64I-NEXT: [[ZEXT1:%[0-9]+]]:_(s64) = G_ZEXT [[C2]](s32)
-    ; RV64I-NEXT: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[AND1]], [[ZEXT1]](s64)
+    ; RV64I-NEXT: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[AND1]], [[C2]](s64)
     ; RV64I-NEXT: [[OR1:%[0-9]+]]:_(s64) = G_OR [[OR]], [[LSHR1]]
-    ; RV64I-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+    ; RV64I-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
     ; RV64I-NEXT: [[AND2:%[0-9]+]]:_(s64) = G_AND [[OR1]], [[C1]]
-    ; RV64I-NEXT: [[ZEXT2:%[0-9]+]]:_(s64) = G_ZEXT [[C3]](s32)
-    ; RV64I-NEXT: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[AND2]], [[ZEXT2]](s64)
+    ; RV64I-NEXT: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[AND2]], [[C3]](s64)
     ; RV64I-NEXT: [[OR2:%[0-9]+]]:_(s64) = G_OR [[OR1]], [[LSHR2]]
-    ; RV64I-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
+    ; RV64I-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
     ; RV64I-NEXT: [[AND3:%[0-9]+]]:_(s64) = G_AND [[OR2]], [[C1]]
-    ; RV64I-NEXT: [[ZEXT3:%[0-9]+]]:_(s64) = G_ZEXT [[C4]](s32)
-    ; RV64I-NEXT: [[LSHR3:%[0-9]+]]:_(s64) = G_LSHR [[AND3]], [[ZEXT3]](s64)
+    ; RV64I-NEXT: [[LSHR3:%[0-9]+]]:_(s64) = G_LSHR [[AND3]], [[C4]](s64)
     ; RV64I-NEXT: [[OR3:%[0-9]+]]:_(s64) = G_OR [[OR2]], [[LSHR3]]
-    ; RV64I-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+    ; RV64I-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
     ; RV64I-NEXT: [[AND4:%[0-9]+]]:_(s64) = G_AND [[OR3]], [[C1]]
-    ; RV64I-NEXT: [[ZEXT4:%[0-9]+]]:_(s64) = G_ZEXT [[C5]](s32)
-    ; RV64I-NEXT: [[LSHR4:%[0-9]+]]:_(s64) = G_LSHR [[AND4]], [[ZEXT4]](s64)
+    ; RV64I-NEXT: [[LSHR4:%[0-9]+]]:_(s64) = G_LSHR [[AND4]], [[C5]](s64)
     ; RV64I-NEXT: [[OR4:%[0-9]+]]:_(s64) = G_OR [[OR3]], [[LSHR4]]
     ; RV64I-NEXT: [[AND5:%[0-9]+]]:_(s64) = G_AND [[OR4]], [[C1]]
-    ; RV64I-NEXT: [[LSHR5:%[0-9]+]]:_(s64) = G_LSHR [[AND5]], [[ZEXT]](s64)
-    ; RV64I-NEXT: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 1431655765
-    ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C6]](s32)
-    ; RV64I-NEXT: [[AND6:%[0-9]+]]:_(s64) = G_AND [[LSHR5]], [[ANYEXT]]
+    ; RV64I-NEXT: [[LSHR5:%[0-9]+]]:_(s64) = G_LSHR [[AND5]], [[C]](s64)
+    ; RV64I-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 1431655765
+    ; RV64I-NEXT: [[AND6:%[0-9]+]]:_(s64) = G_AND [[LSHR5]], [[C6]]
     ; RV64I-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[OR4]], [[AND6]]
     ; RV64I-NEXT: [[AND7:%[0-9]+]]:_(s64) = G_AND [[SUB]], [[C1]]
-    ; RV64I-NEXT: [[LSHR6:%[0-9]+]]:_(s64) = G_LSHR [[AND7]], [[ZEXT1]](s64)
-    ; RV64I-NEXT: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 858993459
-    ; RV64I-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C7]](s32)
-    ; RV64I-NEXT: [[AND8:%[0-9]+]]:_(s64) = G_AND [[LSHR6]], [[ANYEXT1]]
-    ; RV64I-NEXT: [[AND9:%[0-9]+]]:_(s64) = G_AND [[SUB]], [[ANYEXT1]]
+    ; RV64I-NEXT: [[LSHR6:%[0-9]+]]:_(s64) = G_LSHR [[AND7]], [[C2]](s64)
+    ; RV64I-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 858993459
+    ; RV64I-NEXT: [[AND8:%[0-9]+]]:_(s64) = G_AND [[LSHR6]], [[C7]]
+    ; RV64I-NEXT: [[AND9:%[0-9]+]]:_(s64) = G_AND [[SUB]], [[C7]]
     ; RV64I-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[AND8]], [[AND9]]
-    ; RV64I-NEXT: [[AND10:%[0-9]+]]:_(s64) = G_AND [[ADD]], [[C1]]
-    ; RV64I-NEXT: [[LSHR7:%[0-9]+]]:_(s64) = G_LSHR [[AND10]], [[ZEXT2]](s64)
+    ; RV64I-NEXT: [[LSHR7:%[0-9]+]]:_(s64) = G_LSHR [[ADD]], [[C3]](s64)
     ; RV64I-NEXT: [[ADD1:%[0-9]+]]:_(s64) = G_ADD [[LSHR7]], [[ADD]]
-    ; RV64I-NEXT: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 252645135
-    ; RV64I-NEXT: [[ANYEXT2:%[0-9]+]]:_(s64) = G_ANYEXT [[C8]](s32)
-    ; RV64I-NEXT: [[AND11:%[0-9]+]]:_(s64) = G_AND [[ADD1]], [[ANYEXT2]]
-    ; RV64I-NEXT: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 16843009
-    ; RV64I-NEXT: [[C10:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
-    ; RV64I-NEXT: [[ANYEXT3:%[0-9]+]]:_(s64) = G_ANYEXT [[C9]](s32)
-    ; RV64I-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[AND11]], [[ANYEXT3]]
-    ; RV64I-NEXT: [[AND12:%[0-9]+]]:_(s64) = G_AND [[MUL]], [[C1]]
-    ; RV64I-NEXT: [[ZEXT5:%[0-9]+]]:_(s64) = G_ZEXT [[C10]](s32)
-    ; RV64I-NEXT: [[LSHR8:%[0-9]+]]:_(s64) = G_LSHR [[AND12]], [[ZEXT5]](s64)
-    ; RV64I-NEXT: [[C11:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
-    ; RV64I-NEXT: [[ANYEXT4:%[0-9]+]]:_(s64) = G_ANYEXT [[C11]](s32)
-    ; RV64I-NEXT: [[SUB1:%[0-9]+]]:_(s64) = G_SUB [[ANYEXT4]], [[LSHR8]]
+    ; RV64I-NEXT: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 252645135
+    ; RV64I-NEXT: [[AND10:%[0-9]+]]:_(s64) = G_AND [[ADD1]], [[C8]]
+    ; RV64I-NEXT: [[C9:%[0-9]+]]:_(s64) = G_CONSTANT i64 16843009
+    ; RV64I-NEXT: [[C10:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
+    ; RV64I-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[AND10]], [[C9]]
+    ; RV64I-NEXT: [[AND11:%[0-9]+]]:_(s64) = G_AND [[MUL]], [[C1]]
+    ; RV64I-NEXT: [[LSHR8:%[0-9]+]]:_(s64) = G_LSHR [[AND11]], [[C10]](s64)
+    ; RV64I-NEXT: [[C11:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
+    ; RV64I-NEXT: [[SUB1:%[0-9]+]]:_(s64) = G_SUB [[C11]], [[LSHR8]]
     ; RV64I-NEXT: $x10 = COPY [[SUB1]](s64)
     ; RV64I-NEXT: PseudoRET implicit $x10
     ;

diff  --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-ctpop-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-ctpop-rv64.mir
index ff26a846479513..1493514394bd53 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-ctpop-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-ctpop-rv64.mir
@@ -14,42 +14,28 @@ body:             |
     ; RV64I: liveins: $x10
     ; RV64I-NEXT: {{  $}}
     ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
-    ; RV64I-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+    ; RV64I-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
     ; RV64I-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 255
-    ; RV64I-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[ANYEXT]], [[C1]]
-    ; RV64I-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C1]]
-    ; RV64I-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND1]], [[AND]](s64)
-    ; RV64I-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 85
-    ; RV64I-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C2]](s32)
-    ; RV64I-NEXT: [[AND2:%[0-9]+]]:_(s64) = G_AND [[LSHR]], [[ANYEXT1]]
-    ; RV64I-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[COPY]], [[AND2]]
-    ; RV64I-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
-    ; RV64I-NEXT: [[ANYEXT2:%[0-9]+]]:_(s64) = G_ANYEXT [[C3]](s32)
-    ; RV64I-NEXT: [[AND3:%[0-9]+]]:_(s64) = G_AND [[ANYEXT2]], [[C1]]
-    ; RV64I-NEXT: [[AND4:%[0-9]+]]:_(s64) = G_AND [[SUB]], [[C1]]
-    ; RV64I-NEXT: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[AND4]], [[AND3]](s64)
-    ; RV64I-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 51
-    ; RV64I-NEXT: [[ANYEXT3:%[0-9]+]]:_(s64) = G_ANYEXT [[C4]](s32)
-    ; RV64I-NEXT: [[AND5:%[0-9]+]]:_(s64) = G_AND [[LSHR1]], [[ANYEXT3]]
-    ; RV64I-NEXT: [[AND6:%[0-9]+]]:_(s64) = G_AND [[SUB]], [[ANYEXT3]]
-    ; RV64I-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[AND5]], [[AND6]]
-    ; RV64I-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
-    ; RV64I-NEXT: [[ANYEXT4:%[0-9]+]]:_(s64) = G_ANYEXT [[C5]](s32)
-    ; RV64I-NEXT: [[AND7:%[0-9]+]]:_(s64) = G_AND [[ANYEXT4]], [[C1]]
-    ; RV64I-NEXT: [[AND8:%[0-9]+]]:_(s64) = G_AND [[ADD]], [[C1]]
-    ; RV64I-NEXT: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[AND8]], [[AND7]](s64)
+    ; RV64I-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C1]]
+    ; RV64I-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND]], [[C]](s64)
+    ; RV64I-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 85
+    ; RV64I-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[LSHR]], [[C2]]
+    ; RV64I-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[COPY]], [[AND1]]
+    ; RV64I-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
+    ; RV64I-NEXT: [[AND2:%[0-9]+]]:_(s64) = G_AND [[SUB]], [[C1]]
+    ; RV64I-NEXT: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[AND2]], [[C3]](s64)
+    ; RV64I-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 51
+    ; RV64I-NEXT: [[AND3:%[0-9]+]]:_(s64) = G_AND [[LSHR1]], [[C4]]
+    ; RV64I-NEXT: [[AND4:%[0-9]+]]:_(s64) = G_AND [[SUB]], [[C4]]
+    ; RV64I-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[AND3]], [[AND4]]
+    ; RV64I-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
+    ; RV64I-NEXT: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[ADD]], [[C5]](s64)
     ; RV64I-NEXT: [[ADD1:%[0-9]+]]:_(s64) = G_ADD [[LSHR2]], [[ADD]]
-    ; RV64I-NEXT: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 15
-    ; RV64I-NEXT: [[ANYEXT5:%[0-9]+]]:_(s64) = G_ANYEXT [[C6]](s32)
-    ; RV64I-NEXT: [[AND9:%[0-9]+]]:_(s64) = G_AND [[ADD1]], [[ANYEXT5]]
-    ; RV64I-NEXT: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY [[ANYEXT]](s64)
-    ; RV64I-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[AND9]], [[COPY1]]
-    ; RV64I-NEXT: [[ANYEXT6:%[0-9]+]]:_(s64) = G_ANYEXT [[C7]](s32)
-    ; RV64I-NEXT: [[AND10:%[0-9]+]]:_(s64) = G_AND [[ANYEXT6]], [[C1]]
-    ; RV64I-NEXT: [[AND11:%[0-9]+]]:_(s64) = G_AND [[MUL]], [[C1]]
-    ; RV64I-NEXT: [[LSHR3:%[0-9]+]]:_(s64) = G_LSHR [[AND11]], [[AND10]](s64)
+    ; RV64I-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 15
+    ; RV64I-NEXT: [[AND5:%[0-9]+]]:_(s64) = G_AND [[ADD1]], [[C6]]
+    ; RV64I-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64I-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[AND5]], [[C]]
+    ; RV64I-NEXT: [[LSHR3:%[0-9]+]]:_(s64) = G_LSHR [[MUL]], [[C7]](s64)
     ; RV64I-NEXT: $x10 = COPY [[LSHR3]](s64)
     ; RV64I-NEXT: PseudoRET implicit $x10
     ;
@@ -81,43 +67,30 @@ body:             |
     ; RV64I: liveins: $x10
     ; RV64I-NEXT: {{  $}}
     ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
-    ; RV64I-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+    ; RV64I-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
     ; RV64I-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 65535
-    ; RV64I-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[ANYEXT]], [[C1]]
-    ; RV64I-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C1]]
-    ; RV64I-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND1]], [[AND]](s64)
-    ; RV64I-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 21845
-    ; RV64I-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C2]](s32)
-    ; RV64I-NEXT: [[AND2:%[0-9]+]]:_(s64) = G_AND [[LSHR]], [[ANYEXT1]]
-    ; RV64I-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[COPY]], [[AND2]]
-    ; RV64I-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
-    ; RV64I-NEXT: [[ANYEXT2:%[0-9]+]]:_(s64) = G_ANYEXT [[C3]](s32)
-    ; RV64I-NEXT: [[AND3:%[0-9]+]]:_(s64) = G_AND [[ANYEXT2]], [[C1]]
-    ; RV64I-NEXT: [[AND4:%[0-9]+]]:_(s64) = G_AND [[SUB]], [[C1]]
-    ; RV64I-NEXT: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[AND4]], [[AND3]](s64)
-    ; RV64I-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 13107
-    ; RV64I-NEXT: [[ANYEXT3:%[0-9]+]]:_(s64) = G_ANYEXT [[C4]](s32)
-    ; RV64I-NEXT: [[AND5:%[0-9]+]]:_(s64) = G_AND [[LSHR1]], [[ANYEXT3]]
-    ; RV64I-NEXT: [[AND6:%[0-9]+]]:_(s64) = G_AND [[SUB]], [[ANYEXT3]]
-    ; RV64I-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[AND5]], [[AND6]]
-    ; RV64I-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
-    ; RV64I-NEXT: [[ANYEXT4:%[0-9]+]]:_(s64) = G_ANYEXT [[C5]](s32)
-    ; RV64I-NEXT: [[AND7:%[0-9]+]]:_(s64) = G_AND [[ANYEXT4]], [[C1]]
-    ; RV64I-NEXT: [[AND8:%[0-9]+]]:_(s64) = G_AND [[ADD]], [[C1]]
-    ; RV64I-NEXT: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[AND8]], [[AND7]](s64)
+    ; RV64I-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C1]]
+    ; RV64I-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND]], [[C]](s64)
+    ; RV64I-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 21845
+    ; RV64I-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[LSHR]], [[C2]]
+    ; RV64I-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[COPY]], [[AND1]]
+    ; RV64I-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
+    ; RV64I-NEXT: [[AND2:%[0-9]+]]:_(s64) = G_AND [[SUB]], [[C1]]
+    ; RV64I-NEXT: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[AND2]], [[C3]](s64)
+    ; RV64I-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 13107
+    ; RV64I-NEXT: [[AND3:%[0-9]+]]:_(s64) = G_AND [[LSHR1]], [[C4]]
+    ; RV64I-NEXT: [[AND4:%[0-9]+]]:_(s64) = G_AND [[SUB]], [[C4]]
+    ; RV64I-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[AND3]], [[AND4]]
+    ; RV64I-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
+    ; RV64I-NEXT: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[ADD]], [[C5]](s64)
     ; RV64I-NEXT: [[ADD1:%[0-9]+]]:_(s64) = G_ADD [[LSHR2]], [[ADD]]
-    ; RV64I-NEXT: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 3855
-    ; RV64I-NEXT: [[ANYEXT5:%[0-9]+]]:_(s64) = G_ANYEXT [[C6]](s32)
-    ; RV64I-NEXT: [[AND9:%[0-9]+]]:_(s64) = G_AND [[ADD1]], [[ANYEXT5]]
-    ; RV64I-NEXT: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 257
-    ; RV64I-NEXT: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
-    ; RV64I-NEXT: [[ANYEXT6:%[0-9]+]]:_(s64) = G_ANYEXT [[C7]](s32)
-    ; RV64I-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[AND9]], [[ANYEXT6]]
-    ; RV64I-NEXT: [[ANYEXT7:%[0-9]+]]:_(s64) = G_ANYEXT [[C8]](s32)
-    ; RV64I-NEXT: [[AND10:%[0-9]+]]:_(s64) = G_AND [[ANYEXT7]], [[C1]]
-    ; RV64I-NEXT: [[AND11:%[0-9]+]]:_(s64) = G_AND [[MUL]], [[C1]]
-    ; RV64I-NEXT: [[LSHR3:%[0-9]+]]:_(s64) = G_LSHR [[AND11]], [[AND10]](s64)
+    ; RV64I-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 3855
+    ; RV64I-NEXT: [[AND5:%[0-9]+]]:_(s64) = G_AND [[ADD1]], [[C6]]
+    ; RV64I-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 257
+    ; RV64I-NEXT: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
+    ; RV64I-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[AND5]], [[C7]]
+    ; RV64I-NEXT: [[AND6:%[0-9]+]]:_(s64) = G_AND [[MUL]], [[C1]]
+    ; RV64I-NEXT: [[LSHR3:%[0-9]+]]:_(s64) = G_LSHR [[AND6]], [[C8]](s64)
     ; RV64I-NEXT: $x10 = COPY [[LSHR3]](s64)
     ; RV64I-NEXT: PseudoRET implicit $x10
     ;
@@ -149,39 +122,30 @@ body:             |
     ; RV64I: liveins: $x10
     ; RV64I-NEXT: {{  $}}
     ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
-    ; RV64I-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; RV64I-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
     ; RV64I-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4294967295
     ; RV64I-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C1]]
-    ; RV64I-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[C]](s32)
-    ; RV64I-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND]], [[ZEXT]](s64)
-    ; RV64I-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1431655765
-    ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C2]](s32)
-    ; RV64I-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[LSHR]], [[ANYEXT]]
+    ; RV64I-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND]], [[C]](s64)
+    ; RV64I-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 1431655765
+    ; RV64I-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[LSHR]], [[C2]]
     ; RV64I-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[COPY]], [[AND1]]
-    ; RV64I-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
+    ; RV64I-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
     ; RV64I-NEXT: [[AND2:%[0-9]+]]:_(s64) = G_AND [[SUB]], [[C1]]
-    ; RV64I-NEXT: [[ZEXT1:%[0-9]+]]:_(s64) = G_ZEXT [[C3]](s32)
-    ; RV64I-NEXT: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[AND2]], [[ZEXT1]](s64)
-    ; RV64I-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 858993459
-    ; RV64I-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C4]](s32)
-    ; RV64I-NEXT: [[AND3:%[0-9]+]]:_(s64) = G_AND [[LSHR1]], [[ANYEXT1]]
-    ; RV64I-NEXT: [[AND4:%[0-9]+]]:_(s64) = G_AND [[SUB]], [[ANYEXT1]]
+    ; RV64I-NEXT: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[AND2]], [[C3]](s64)
+    ; RV64I-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 858993459
+    ; RV64I-NEXT: [[AND3:%[0-9]+]]:_(s64) = G_AND [[LSHR1]], [[C4]]
+    ; RV64I-NEXT: [[AND4:%[0-9]+]]:_(s64) = G_AND [[SUB]], [[C4]]
     ; RV64I-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[AND3]], [[AND4]]
-    ; RV64I-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
-    ; RV64I-NEXT: [[AND5:%[0-9]+]]:_(s64) = G_AND [[ADD]], [[C1]]
-    ; RV64I-NEXT: [[ZEXT2:%[0-9]+]]:_(s64) = G_ZEXT [[C5]](s32)
-    ; RV64I-NEXT: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[AND5]], [[ZEXT2]](s64)
+    ; RV64I-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
+    ; RV64I-NEXT: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[ADD]], [[C5]](s64)
     ; RV64I-NEXT: [[ADD1:%[0-9]+]]:_(s64) = G_ADD [[LSHR2]], [[ADD]]
-    ; RV64I-NEXT: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 252645135
-    ; RV64I-NEXT: [[ANYEXT2:%[0-9]+]]:_(s64) = G_ANYEXT [[C6]](s32)
-    ; RV64I-NEXT: [[AND6:%[0-9]+]]:_(s64) = G_AND [[ADD1]], [[ANYEXT2]]
-    ; RV64I-NEXT: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 16843009
-    ; RV64I-NEXT: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
-    ; RV64I-NEXT: [[ANYEXT3:%[0-9]+]]:_(s64) = G_ANYEXT [[C7]](s32)
-    ; RV64I-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[AND6]], [[ANYEXT3]]
-    ; RV64I-NEXT: [[AND7:%[0-9]+]]:_(s64) = G_AND [[MUL]], [[C1]]
-    ; RV64I-NEXT: [[ZEXT3:%[0-9]+]]:_(s64) = G_ZEXT [[C8]](s32)
-    ; RV64I-NEXT: [[LSHR3:%[0-9]+]]:_(s64) = G_LSHR [[AND7]], [[ZEXT3]](s64)
+    ; RV64I-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 252645135
+    ; RV64I-NEXT: [[AND5:%[0-9]+]]:_(s64) = G_AND [[ADD1]], [[C6]]
+    ; RV64I-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 16843009
+    ; RV64I-NEXT: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
+    ; RV64I-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[AND5]], [[C7]]
+    ; RV64I-NEXT: [[AND6:%[0-9]+]]:_(s64) = G_AND [[MUL]], [[C1]]
+    ; RV64I-NEXT: [[LSHR3:%[0-9]+]]:_(s64) = G_LSHR [[AND6]], [[C8]](s64)
     ; RV64I-NEXT: $x10 = COPY [[LSHR3]](s64)
     ; RV64I-NEXT: PseudoRET implicit $x10
     ;

diff  --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-cttz-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-cttz-rv64.mir
index f4c05d2211d4cb..252e79280af610 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-cttz-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-cttz-rv64.mir
@@ -14,47 +14,32 @@ body:             |
     ; RV64I: liveins: $x10
     ; RV64I-NEXT: {{  $}}
     ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
-    ; RV64I-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
-    ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; RV64I-NEXT: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[COPY]], [[ANYEXT]]
-    ; RV64I-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY]], [[ANYEXT]]
+    ; RV64I-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+    ; RV64I-NEXT: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[COPY]], [[C]]
+    ; RV64I-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY]], [[C]]
     ; RV64I-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[XOR]], [[ADD]]
-    ; RV64I-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; RV64I-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
+    ; RV64I-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
     ; RV64I-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 255
-    ; RV64I-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[ANYEXT1]], [[C2]]
-    ; RV64I-NEXT: [[AND2:%[0-9]+]]:_(s64) = G_AND [[AND]], [[C2]]
-    ; RV64I-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND2]], [[AND1]](s64)
-    ; RV64I-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 85
-    ; RV64I-NEXT: [[ANYEXT2:%[0-9]+]]:_(s64) = G_ANYEXT [[C3]](s32)
-    ; RV64I-NEXT: [[AND3:%[0-9]+]]:_(s64) = G_AND [[LSHR]], [[ANYEXT2]]
-    ; RV64I-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[AND]], [[AND3]]
-    ; RV64I-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
-    ; RV64I-NEXT: [[ANYEXT3:%[0-9]+]]:_(s64) = G_ANYEXT [[C4]](s32)
-    ; RV64I-NEXT: [[AND4:%[0-9]+]]:_(s64) = G_AND [[ANYEXT3]], [[C2]]
-    ; RV64I-NEXT: [[AND5:%[0-9]+]]:_(s64) = G_AND [[SUB]], [[C2]]
-    ; RV64I-NEXT: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[AND5]], [[AND4]](s64)
-    ; RV64I-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 51
-    ; RV64I-NEXT: [[ANYEXT4:%[0-9]+]]:_(s64) = G_ANYEXT [[C5]](s32)
-    ; RV64I-NEXT: [[AND6:%[0-9]+]]:_(s64) = G_AND [[LSHR1]], [[ANYEXT4]]
-    ; RV64I-NEXT: [[AND7:%[0-9]+]]:_(s64) = G_AND [[SUB]], [[ANYEXT4]]
-    ; RV64I-NEXT: [[ADD1:%[0-9]+]]:_(s64) = G_ADD [[AND6]], [[AND7]]
-    ; RV64I-NEXT: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
-    ; RV64I-NEXT: [[ANYEXT5:%[0-9]+]]:_(s64) = G_ANYEXT [[C6]](s32)
-    ; RV64I-NEXT: [[AND8:%[0-9]+]]:_(s64) = G_AND [[ANYEXT5]], [[C2]]
-    ; RV64I-NEXT: [[AND9:%[0-9]+]]:_(s64) = G_AND [[ADD1]], [[C2]]
-    ; RV64I-NEXT: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[AND9]], [[AND8]](s64)
+    ; RV64I-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[AND]], [[C2]]
+    ; RV64I-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND1]], [[C1]](s64)
+    ; RV64I-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 85
+    ; RV64I-NEXT: [[AND2:%[0-9]+]]:_(s64) = G_AND [[LSHR]], [[C3]]
+    ; RV64I-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[AND]], [[AND2]]
+    ; RV64I-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
+    ; RV64I-NEXT: [[AND3:%[0-9]+]]:_(s64) = G_AND [[SUB]], [[C2]]
+    ; RV64I-NEXT: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[AND3]], [[C4]](s64)
+    ; RV64I-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 51
+    ; RV64I-NEXT: [[AND4:%[0-9]+]]:_(s64) = G_AND [[LSHR1]], [[C5]]
+    ; RV64I-NEXT: [[AND5:%[0-9]+]]:_(s64) = G_AND [[SUB]], [[C5]]
+    ; RV64I-NEXT: [[ADD1:%[0-9]+]]:_(s64) = G_ADD [[AND4]], [[AND5]]
+    ; RV64I-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
+    ; RV64I-NEXT: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[ADD1]], [[C6]](s64)
     ; RV64I-NEXT: [[ADD2:%[0-9]+]]:_(s64) = G_ADD [[LSHR2]], [[ADD1]]
-    ; RV64I-NEXT: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 15
-    ; RV64I-NEXT: [[ANYEXT6:%[0-9]+]]:_(s64) = G_ANYEXT [[C7]](s32)
-    ; RV64I-NEXT: [[AND10:%[0-9]+]]:_(s64) = G_AND [[ADD2]], [[ANYEXT6]]
-    ; RV64I-NEXT: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY [[ANYEXT1]](s64)
-    ; RV64I-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[AND10]], [[COPY1]]
-    ; RV64I-NEXT: [[ANYEXT7:%[0-9]+]]:_(s64) = G_ANYEXT [[C8]](s32)
-    ; RV64I-NEXT: [[AND11:%[0-9]+]]:_(s64) = G_AND [[ANYEXT7]], [[C2]]
-    ; RV64I-NEXT: [[AND12:%[0-9]+]]:_(s64) = G_AND [[MUL]], [[C2]]
-    ; RV64I-NEXT: [[LSHR3:%[0-9]+]]:_(s64) = G_LSHR [[AND12]], [[AND11]](s64)
+    ; RV64I-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 15
+    ; RV64I-NEXT: [[AND6:%[0-9]+]]:_(s64) = G_AND [[ADD2]], [[C7]]
+    ; RV64I-NEXT: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64I-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[AND6]], [[C1]]
+    ; RV64I-NEXT: [[LSHR3:%[0-9]+]]:_(s64) = G_LSHR [[MUL]], [[C8]](s64)
     ; RV64I-NEXT: $x10 = COPY [[LSHR3]](s64)
     ; RV64I-NEXT: PseudoRET implicit $x10
     ;
@@ -62,9 +47,8 @@ body:             |
     ; RV64ZBB: liveins: $x10
     ; RV64ZBB-NEXT: {{  $}}
     ; RV64ZBB-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
-    ; RV64ZBB-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 256
-    ; RV64ZBB-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; RV64ZBB-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[COPY]], [[ANYEXT]]
+    ; RV64ZBB-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 256
+    ; RV64ZBB-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[COPY]], [[C]]
     ; RV64ZBB-NEXT: [[CTZW:%[0-9]+]]:_(s64) = G_CTZW [[OR]]
     ; RV64ZBB-NEXT: $x10 = COPY [[CTZW]](s64)
     ; RV64ZBB-NEXT: PseudoRET implicit $x10
@@ -86,48 +70,34 @@ body:             |
     ; RV64I: liveins: $x10
     ; RV64I-NEXT: {{  $}}
     ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
-    ; RV64I-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
-    ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; RV64I-NEXT: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[COPY]], [[ANYEXT]]
-    ; RV64I-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY]], [[ANYEXT]]
+    ; RV64I-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+    ; RV64I-NEXT: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[COPY]], [[C]]
+    ; RV64I-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY]], [[C]]
     ; RV64I-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[XOR]], [[ADD]]
-    ; RV64I-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; RV64I-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
+    ; RV64I-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
     ; RV64I-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 65535
-    ; RV64I-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[ANYEXT1]], [[C2]]
-    ; RV64I-NEXT: [[AND2:%[0-9]+]]:_(s64) = G_AND [[AND]], [[C2]]
-    ; RV64I-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND2]], [[AND1]](s64)
-    ; RV64I-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 21845
-    ; RV64I-NEXT: [[ANYEXT2:%[0-9]+]]:_(s64) = G_ANYEXT [[C3]](s32)
-    ; RV64I-NEXT: [[AND3:%[0-9]+]]:_(s64) = G_AND [[LSHR]], [[ANYEXT2]]
-    ; RV64I-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[AND]], [[AND3]]
-    ; RV64I-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
-    ; RV64I-NEXT: [[ANYEXT3:%[0-9]+]]:_(s64) = G_ANYEXT [[C4]](s32)
-    ; RV64I-NEXT: [[AND4:%[0-9]+]]:_(s64) = G_AND [[ANYEXT3]], [[C2]]
-    ; RV64I-NEXT: [[AND5:%[0-9]+]]:_(s64) = G_AND [[SUB]], [[C2]]
-    ; RV64I-NEXT: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[AND5]], [[AND4]](s64)
-    ; RV64I-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 13107
-    ; RV64I-NEXT: [[ANYEXT4:%[0-9]+]]:_(s64) = G_ANYEXT [[C5]](s32)
-    ; RV64I-NEXT: [[AND6:%[0-9]+]]:_(s64) = G_AND [[LSHR1]], [[ANYEXT4]]
-    ; RV64I-NEXT: [[AND7:%[0-9]+]]:_(s64) = G_AND [[SUB]], [[ANYEXT4]]
-    ; RV64I-NEXT: [[ADD1:%[0-9]+]]:_(s64) = G_ADD [[AND6]], [[AND7]]
-    ; RV64I-NEXT: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
-    ; RV64I-NEXT: [[ANYEXT5:%[0-9]+]]:_(s64) = G_ANYEXT [[C6]](s32)
-    ; RV64I-NEXT: [[AND8:%[0-9]+]]:_(s64) = G_AND [[ANYEXT5]], [[C2]]
-    ; RV64I-NEXT: [[AND9:%[0-9]+]]:_(s64) = G_AND [[ADD1]], [[C2]]
-    ; RV64I-NEXT: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[AND9]], [[AND8]](s64)
+    ; RV64I-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[AND]], [[C2]]
+    ; RV64I-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND1]], [[C1]](s64)
+    ; RV64I-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 21845
+    ; RV64I-NEXT: [[AND2:%[0-9]+]]:_(s64) = G_AND [[LSHR]], [[C3]]
+    ; RV64I-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[AND]], [[AND2]]
+    ; RV64I-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
+    ; RV64I-NEXT: [[AND3:%[0-9]+]]:_(s64) = G_AND [[SUB]], [[C2]]
+    ; RV64I-NEXT: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[AND3]], [[C4]](s64)
+    ; RV64I-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 13107
+    ; RV64I-NEXT: [[AND4:%[0-9]+]]:_(s64) = G_AND [[LSHR1]], [[C5]]
+    ; RV64I-NEXT: [[AND5:%[0-9]+]]:_(s64) = G_AND [[SUB]], [[C5]]
+    ; RV64I-NEXT: [[ADD1:%[0-9]+]]:_(s64) = G_ADD [[AND4]], [[AND5]]
+    ; RV64I-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
+    ; RV64I-NEXT: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[ADD1]], [[C6]](s64)
     ; RV64I-NEXT: [[ADD2:%[0-9]+]]:_(s64) = G_ADD [[LSHR2]], [[ADD1]]
-    ; RV64I-NEXT: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 3855
-    ; RV64I-NEXT: [[ANYEXT6:%[0-9]+]]:_(s64) = G_ANYEXT [[C7]](s32)
-    ; RV64I-NEXT: [[AND10:%[0-9]+]]:_(s64) = G_AND [[ADD2]], [[ANYEXT6]]
-    ; RV64I-NEXT: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 257
-    ; RV64I-NEXT: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
-    ; RV64I-NEXT: [[ANYEXT7:%[0-9]+]]:_(s64) = G_ANYEXT [[C8]](s32)
-    ; RV64I-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[AND10]], [[ANYEXT7]]
-    ; RV64I-NEXT: [[ANYEXT8:%[0-9]+]]:_(s64) = G_ANYEXT [[C9]](s32)
-    ; RV64I-NEXT: [[AND11:%[0-9]+]]:_(s64) = G_AND [[ANYEXT8]], [[C2]]
-    ; RV64I-NEXT: [[AND12:%[0-9]+]]:_(s64) = G_AND [[MUL]], [[C2]]
-    ; RV64I-NEXT: [[LSHR3:%[0-9]+]]:_(s64) = G_LSHR [[AND12]], [[AND11]](s64)
+    ; RV64I-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 3855
+    ; RV64I-NEXT: [[AND6:%[0-9]+]]:_(s64) = G_AND [[ADD2]], [[C7]]
+    ; RV64I-NEXT: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 257
+    ; RV64I-NEXT: [[C9:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
+    ; RV64I-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[AND6]], [[C8]]
+    ; RV64I-NEXT: [[AND7:%[0-9]+]]:_(s64) = G_AND [[MUL]], [[C2]]
+    ; RV64I-NEXT: [[LSHR3:%[0-9]+]]:_(s64) = G_LSHR [[AND7]], [[C9]](s64)
     ; RV64I-NEXT: $x10 = COPY [[LSHR3]](s64)
     ; RV64I-NEXT: PseudoRET implicit $x10
     ;
@@ -135,9 +105,8 @@ body:             |
     ; RV64ZBB: liveins: $x10
     ; RV64ZBB-NEXT: {{  $}}
     ; RV64ZBB-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
-    ; RV64ZBB-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 65536
-    ; RV64ZBB-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; RV64ZBB-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[COPY]], [[ANYEXT]]
+    ; RV64ZBB-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 65536
+    ; RV64ZBB-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[COPY]], [[C]]
     ; RV64ZBB-NEXT: [[CTZW:%[0-9]+]]:_(s64) = G_CTZW [[OR]]
     ; RV64ZBB-NEXT: $x10 = COPY [[CTZW]](s64)
     ; RV64ZBB-NEXT: PseudoRET implicit $x10
@@ -159,44 +128,34 @@ body:             |
     ; RV64I: liveins: $x10
     ; RV64I-NEXT: {{  $}}
     ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
-    ; RV64I-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
-    ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; RV64I-NEXT: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[COPY]], [[ANYEXT]]
-    ; RV64I-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY]], [[ANYEXT]]
+    ; RV64I-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+    ; RV64I-NEXT: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[COPY]], [[C]]
+    ; RV64I-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY]], [[C]]
     ; RV64I-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[XOR]], [[ADD]]
-    ; RV64I-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; RV64I-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
     ; RV64I-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4294967295
     ; RV64I-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[AND]], [[C2]]
-    ; RV64I-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[C1]](s32)
-    ; RV64I-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND1]], [[ZEXT]](s64)
-    ; RV64I-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1431655765
-    ; RV64I-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C3]](s32)
-    ; RV64I-NEXT: [[AND2:%[0-9]+]]:_(s64) = G_AND [[LSHR]], [[ANYEXT1]]
+    ; RV64I-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND1]], [[C1]](s64)
+    ; RV64I-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 1431655765
+    ; RV64I-NEXT: [[AND2:%[0-9]+]]:_(s64) = G_AND [[LSHR]], [[C3]]
     ; RV64I-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[AND]], [[AND2]]
-    ; RV64I-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
+    ; RV64I-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
     ; RV64I-NEXT: [[AND3:%[0-9]+]]:_(s64) = G_AND [[SUB]], [[C2]]
-    ; RV64I-NEXT: [[ZEXT1:%[0-9]+]]:_(s64) = G_ZEXT [[C4]](s32)
-    ; RV64I-NEXT: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[AND3]], [[ZEXT1]](s64)
-    ; RV64I-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 858993459
-    ; RV64I-NEXT: [[ANYEXT2:%[0-9]+]]:_(s64) = G_ANYEXT [[C5]](s32)
-    ; RV64I-NEXT: [[AND4:%[0-9]+]]:_(s64) = G_AND [[LSHR1]], [[ANYEXT2]]
-    ; RV64I-NEXT: [[AND5:%[0-9]+]]:_(s64) = G_AND [[SUB]], [[ANYEXT2]]
+    ; RV64I-NEXT: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[AND3]], [[C4]](s64)
+    ; RV64I-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 858993459
+    ; RV64I-NEXT: [[AND4:%[0-9]+]]:_(s64) = G_AND [[LSHR1]], [[C5]]
+    ; RV64I-NEXT: [[AND5:%[0-9]+]]:_(s64) = G_AND [[SUB]], [[C5]]
     ; RV64I-NEXT: [[ADD1:%[0-9]+]]:_(s64) = G_ADD [[AND4]], [[AND5]]
-    ; RV64I-NEXT: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
-    ; RV64I-NEXT: [[AND6:%[0-9]+]]:_(s64) = G_AND [[ADD1]], [[C2]]
-    ; RV64I-NEXT: [[ZEXT2:%[0-9]+]]:_(s64) = G_ZEXT [[C6]](s32)
-    ; RV64I-NEXT: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[AND6]], [[ZEXT2]](s64)
+    ; RV64I-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
+    ; RV64I-NEXT: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[ADD1]], [[C6]](s64)
     ; RV64I-NEXT: [[ADD2:%[0-9]+]]:_(s64) = G_ADD [[LSHR2]], [[ADD1]]
-    ; RV64I-NEXT: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 252645135
-    ; RV64I-NEXT: [[ANYEXT3:%[0-9]+]]:_(s64) = G_ANYEXT [[C7]](s32)
-    ; RV64I-NEXT: [[AND7:%[0-9]+]]:_(s64) = G_AND [[ADD2]], [[ANYEXT3]]
-    ; RV64I-NEXT: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 16843009
-    ; RV64I-NEXT: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
-    ; RV64I-NEXT: [[ANYEXT4:%[0-9]+]]:_(s64) = G_ANYEXT [[C8]](s32)
-    ; RV64I-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[AND7]], [[ANYEXT4]]
-    ; RV64I-NEXT: [[AND8:%[0-9]+]]:_(s64) = G_AND [[MUL]], [[C2]]
-    ; RV64I-NEXT: [[ZEXT3:%[0-9]+]]:_(s64) = G_ZEXT [[C9]](s32)
-    ; RV64I-NEXT: [[LSHR3:%[0-9]+]]:_(s64) = G_LSHR [[AND8]], [[ZEXT3]](s64)
+    ; RV64I-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 252645135
+    ; RV64I-NEXT: [[AND6:%[0-9]+]]:_(s64) = G_AND [[ADD2]], [[C7]]
+    ; RV64I-NEXT: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 16843009
+    ; RV64I-NEXT: [[C9:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
+    ; RV64I-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[AND6]], [[C8]]
+    ; RV64I-NEXT: [[AND7:%[0-9]+]]:_(s64) = G_AND [[MUL]], [[C2]]
+    ; RV64I-NEXT: [[LSHR3:%[0-9]+]]:_(s64) = G_LSHR [[AND7]], [[C9]](s64)
     ; RV64I-NEXT: $x10 = COPY [[LSHR3]](s64)
     ; RV64I-NEXT: PseudoRET implicit $x10
     ;
@@ -275,47 +234,32 @@ body:             |
     ; RV64I: liveins: $x10
     ; RV64I-NEXT: {{  $}}
     ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
-    ; RV64I-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
-    ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; RV64I-NEXT: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[COPY]], [[ANYEXT]]
-    ; RV64I-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY]], [[ANYEXT]]
+    ; RV64I-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+    ; RV64I-NEXT: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[COPY]], [[C]]
+    ; RV64I-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY]], [[C]]
     ; RV64I-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[XOR]], [[ADD]]
-    ; RV64I-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; RV64I-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
+    ; RV64I-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
     ; RV64I-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 255
-    ; RV64I-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[ANYEXT1]], [[C2]]
-    ; RV64I-NEXT: [[AND2:%[0-9]+]]:_(s64) = G_AND [[AND]], [[C2]]
-    ; RV64I-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND2]], [[AND1]](s64)
-    ; RV64I-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 85
-    ; RV64I-NEXT: [[ANYEXT2:%[0-9]+]]:_(s64) = G_ANYEXT [[C3]](s32)
-    ; RV64I-NEXT: [[AND3:%[0-9]+]]:_(s64) = G_AND [[LSHR]], [[ANYEXT2]]
-    ; RV64I-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[AND]], [[AND3]]
-    ; RV64I-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
-    ; RV64I-NEXT: [[ANYEXT3:%[0-9]+]]:_(s64) = G_ANYEXT [[C4]](s32)
-    ; RV64I-NEXT: [[AND4:%[0-9]+]]:_(s64) = G_AND [[ANYEXT3]], [[C2]]
-    ; RV64I-NEXT: [[AND5:%[0-9]+]]:_(s64) = G_AND [[SUB]], [[C2]]
-    ; RV64I-NEXT: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[AND5]], [[AND4]](s64)
-    ; RV64I-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 51
-    ; RV64I-NEXT: [[ANYEXT4:%[0-9]+]]:_(s64) = G_ANYEXT [[C5]](s32)
-    ; RV64I-NEXT: [[AND6:%[0-9]+]]:_(s64) = G_AND [[LSHR1]], [[ANYEXT4]]
-    ; RV64I-NEXT: [[AND7:%[0-9]+]]:_(s64) = G_AND [[SUB]], [[ANYEXT4]]
-    ; RV64I-NEXT: [[ADD1:%[0-9]+]]:_(s64) = G_ADD [[AND6]], [[AND7]]
-    ; RV64I-NEXT: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
-    ; RV64I-NEXT: [[ANYEXT5:%[0-9]+]]:_(s64) = G_ANYEXT [[C6]](s32)
-    ; RV64I-NEXT: [[AND8:%[0-9]+]]:_(s64) = G_AND [[ANYEXT5]], [[C2]]
-    ; RV64I-NEXT: [[AND9:%[0-9]+]]:_(s64) = G_AND [[ADD1]], [[C2]]
-    ; RV64I-NEXT: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[AND9]], [[AND8]](s64)
+    ; RV64I-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[AND]], [[C2]]
+    ; RV64I-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND1]], [[C1]](s64)
+    ; RV64I-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 85
+    ; RV64I-NEXT: [[AND2:%[0-9]+]]:_(s64) = G_AND [[LSHR]], [[C3]]
+    ; RV64I-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[AND]], [[AND2]]
+    ; RV64I-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
+    ; RV64I-NEXT: [[AND3:%[0-9]+]]:_(s64) = G_AND [[SUB]], [[C2]]
+    ; RV64I-NEXT: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[AND3]], [[C4]](s64)
+    ; RV64I-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 51
+    ; RV64I-NEXT: [[AND4:%[0-9]+]]:_(s64) = G_AND [[LSHR1]], [[C5]]
+    ; RV64I-NEXT: [[AND5:%[0-9]+]]:_(s64) = G_AND [[SUB]], [[C5]]
+    ; RV64I-NEXT: [[ADD1:%[0-9]+]]:_(s64) = G_ADD [[AND4]], [[AND5]]
+    ; RV64I-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
+    ; RV64I-NEXT: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[ADD1]], [[C6]](s64)
     ; RV64I-NEXT: [[ADD2:%[0-9]+]]:_(s64) = G_ADD [[LSHR2]], [[ADD1]]
-    ; RV64I-NEXT: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 15
-    ; RV64I-NEXT: [[ANYEXT6:%[0-9]+]]:_(s64) = G_ANYEXT [[C7]](s32)
-    ; RV64I-NEXT: [[AND10:%[0-9]+]]:_(s64) = G_AND [[ADD2]], [[ANYEXT6]]
-    ; RV64I-NEXT: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY [[ANYEXT1]](s64)
-    ; RV64I-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[AND10]], [[COPY1]]
-    ; RV64I-NEXT: [[ANYEXT7:%[0-9]+]]:_(s64) = G_ANYEXT [[C8]](s32)
-    ; RV64I-NEXT: [[AND11:%[0-9]+]]:_(s64) = G_AND [[ANYEXT7]], [[C2]]
-    ; RV64I-NEXT: [[AND12:%[0-9]+]]:_(s64) = G_AND [[MUL]], [[C2]]
-    ; RV64I-NEXT: [[LSHR3:%[0-9]+]]:_(s64) = G_LSHR [[AND12]], [[AND11]](s64)
+    ; RV64I-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 15
+    ; RV64I-NEXT: [[AND6:%[0-9]+]]:_(s64) = G_AND [[ADD2]], [[C7]]
+    ; RV64I-NEXT: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64I-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[AND6]], [[C1]]
+    ; RV64I-NEXT: [[LSHR3:%[0-9]+]]:_(s64) = G_LSHR [[MUL]], [[C8]](s64)
     ; RV64I-NEXT: $x10 = COPY [[LSHR3]](s64)
     ; RV64I-NEXT: PseudoRET implicit $x10
     ;
@@ -323,9 +267,8 @@ body:             |
     ; RV64ZBB: liveins: $x10
     ; RV64ZBB-NEXT: {{  $}}
     ; RV64ZBB-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
-    ; RV64ZBB-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 256
-    ; RV64ZBB-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; RV64ZBB-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[COPY]], [[ANYEXT]]
+    ; RV64ZBB-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 256
+    ; RV64ZBB-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[COPY]], [[C]]
     ; RV64ZBB-NEXT: [[CTZW:%[0-9]+]]:_(s64) = G_CTZW [[OR]]
     ; RV64ZBB-NEXT: $x10 = COPY [[CTZW]](s64)
     ; RV64ZBB-NEXT: PseudoRET implicit $x10
@@ -347,48 +290,34 @@ body:             |
     ; RV64I: liveins: $x10
     ; RV64I-NEXT: {{  $}}
     ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
-    ; RV64I-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
-    ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; RV64I-NEXT: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[COPY]], [[ANYEXT]]
-    ; RV64I-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY]], [[ANYEXT]]
+    ; RV64I-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+    ; RV64I-NEXT: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[COPY]], [[C]]
+    ; RV64I-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY]], [[C]]
     ; RV64I-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[XOR]], [[ADD]]
-    ; RV64I-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; RV64I-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
+    ; RV64I-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
     ; RV64I-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 65535
-    ; RV64I-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[ANYEXT1]], [[C2]]
-    ; RV64I-NEXT: [[AND2:%[0-9]+]]:_(s64) = G_AND [[AND]], [[C2]]
-    ; RV64I-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND2]], [[AND1]](s64)
-    ; RV64I-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 21845
-    ; RV64I-NEXT: [[ANYEXT2:%[0-9]+]]:_(s64) = G_ANYEXT [[C3]](s32)
-    ; RV64I-NEXT: [[AND3:%[0-9]+]]:_(s64) = G_AND [[LSHR]], [[ANYEXT2]]
-    ; RV64I-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[AND]], [[AND3]]
-    ; RV64I-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
-    ; RV64I-NEXT: [[ANYEXT3:%[0-9]+]]:_(s64) = G_ANYEXT [[C4]](s32)
-    ; RV64I-NEXT: [[AND4:%[0-9]+]]:_(s64) = G_AND [[ANYEXT3]], [[C2]]
-    ; RV64I-NEXT: [[AND5:%[0-9]+]]:_(s64) = G_AND [[SUB]], [[C2]]
-    ; RV64I-NEXT: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[AND5]], [[AND4]](s64)
-    ; RV64I-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 13107
-    ; RV64I-NEXT: [[ANYEXT4:%[0-9]+]]:_(s64) = G_ANYEXT [[C5]](s32)
-    ; RV64I-NEXT: [[AND6:%[0-9]+]]:_(s64) = G_AND [[LSHR1]], [[ANYEXT4]]
-    ; RV64I-NEXT: [[AND7:%[0-9]+]]:_(s64) = G_AND [[SUB]], [[ANYEXT4]]
-    ; RV64I-NEXT: [[ADD1:%[0-9]+]]:_(s64) = G_ADD [[AND6]], [[AND7]]
-    ; RV64I-NEXT: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
-    ; RV64I-NEXT: [[ANYEXT5:%[0-9]+]]:_(s64) = G_ANYEXT [[C6]](s32)
-    ; RV64I-NEXT: [[AND8:%[0-9]+]]:_(s64) = G_AND [[ANYEXT5]], [[C2]]
-    ; RV64I-NEXT: [[AND9:%[0-9]+]]:_(s64) = G_AND [[ADD1]], [[C2]]
-    ; RV64I-NEXT: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[AND9]], [[AND8]](s64)
+    ; RV64I-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[AND]], [[C2]]
+    ; RV64I-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND1]], [[C1]](s64)
+    ; RV64I-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 21845
+    ; RV64I-NEXT: [[AND2:%[0-9]+]]:_(s64) = G_AND [[LSHR]], [[C3]]
+    ; RV64I-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[AND]], [[AND2]]
+    ; RV64I-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
+    ; RV64I-NEXT: [[AND3:%[0-9]+]]:_(s64) = G_AND [[SUB]], [[C2]]
+    ; RV64I-NEXT: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[AND3]], [[C4]](s64)
+    ; RV64I-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 13107
+    ; RV64I-NEXT: [[AND4:%[0-9]+]]:_(s64) = G_AND [[LSHR1]], [[C5]]
+    ; RV64I-NEXT: [[AND5:%[0-9]+]]:_(s64) = G_AND [[SUB]], [[C5]]
+    ; RV64I-NEXT: [[ADD1:%[0-9]+]]:_(s64) = G_ADD [[AND4]], [[AND5]]
+    ; RV64I-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
+    ; RV64I-NEXT: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[ADD1]], [[C6]](s64)
     ; RV64I-NEXT: [[ADD2:%[0-9]+]]:_(s64) = G_ADD [[LSHR2]], [[ADD1]]
-    ; RV64I-NEXT: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 3855
-    ; RV64I-NEXT: [[ANYEXT6:%[0-9]+]]:_(s64) = G_ANYEXT [[C7]](s32)
-    ; RV64I-NEXT: [[AND10:%[0-9]+]]:_(s64) = G_AND [[ADD2]], [[ANYEXT6]]
-    ; RV64I-NEXT: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 257
-    ; RV64I-NEXT: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
-    ; RV64I-NEXT: [[ANYEXT7:%[0-9]+]]:_(s64) = G_ANYEXT [[C8]](s32)
-    ; RV64I-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[AND10]], [[ANYEXT7]]
-    ; RV64I-NEXT: [[ANYEXT8:%[0-9]+]]:_(s64) = G_ANYEXT [[C9]](s32)
-    ; RV64I-NEXT: [[AND11:%[0-9]+]]:_(s64) = G_AND [[ANYEXT8]], [[C2]]
-    ; RV64I-NEXT: [[AND12:%[0-9]+]]:_(s64) = G_AND [[MUL]], [[C2]]
-    ; RV64I-NEXT: [[LSHR3:%[0-9]+]]:_(s64) = G_LSHR [[AND12]], [[AND11]](s64)
+    ; RV64I-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 3855
+    ; RV64I-NEXT: [[AND6:%[0-9]+]]:_(s64) = G_AND [[ADD2]], [[C7]]
+    ; RV64I-NEXT: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 257
+    ; RV64I-NEXT: [[C9:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
+    ; RV64I-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[AND6]], [[C8]]
+    ; RV64I-NEXT: [[AND7:%[0-9]+]]:_(s64) = G_AND [[MUL]], [[C2]]
+    ; RV64I-NEXT: [[LSHR3:%[0-9]+]]:_(s64) = G_LSHR [[AND7]], [[C9]](s64)
     ; RV64I-NEXT: $x10 = COPY [[LSHR3]](s64)
     ; RV64I-NEXT: PseudoRET implicit $x10
     ;
@@ -396,9 +325,8 @@ body:             |
     ; RV64ZBB: liveins: $x10
     ; RV64ZBB-NEXT: {{  $}}
     ; RV64ZBB-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
-    ; RV64ZBB-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 65536
-    ; RV64ZBB-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; RV64ZBB-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[COPY]], [[ANYEXT]]
+    ; RV64ZBB-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 65536
+    ; RV64ZBB-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[COPY]], [[C]]
     ; RV64ZBB-NEXT: [[CTZW:%[0-9]+]]:_(s64) = G_CTZW [[OR]]
     ; RV64ZBB-NEXT: $x10 = COPY [[CTZW]](s64)
     ; RV64ZBB-NEXT: PseudoRET implicit $x10
@@ -420,44 +348,34 @@ body:             |
     ; RV64I: liveins: $x10
     ; RV64I-NEXT: {{  $}}
     ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
-    ; RV64I-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
-    ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; RV64I-NEXT: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[COPY]], [[ANYEXT]]
-    ; RV64I-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY]], [[ANYEXT]]
+    ; RV64I-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+    ; RV64I-NEXT: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[COPY]], [[C]]
+    ; RV64I-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY]], [[C]]
     ; RV64I-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[XOR]], [[ADD]]
-    ; RV64I-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; RV64I-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
     ; RV64I-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4294967295
     ; RV64I-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[AND]], [[C2]]
-    ; RV64I-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[C1]](s32)
-    ; RV64I-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND1]], [[ZEXT]](s64)
-    ; RV64I-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1431655765
-    ; RV64I-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C3]](s32)
-    ; RV64I-NEXT: [[AND2:%[0-9]+]]:_(s64) = G_AND [[LSHR]], [[ANYEXT1]]
+    ; RV64I-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND1]], [[C1]](s64)
+    ; RV64I-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 1431655765
+    ; RV64I-NEXT: [[AND2:%[0-9]+]]:_(s64) = G_AND [[LSHR]], [[C3]]
     ; RV64I-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[AND]], [[AND2]]
-    ; RV64I-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
+    ; RV64I-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
     ; RV64I-NEXT: [[AND3:%[0-9]+]]:_(s64) = G_AND [[SUB]], [[C2]]
-    ; RV64I-NEXT: [[ZEXT1:%[0-9]+]]:_(s64) = G_ZEXT [[C4]](s32)
-    ; RV64I-NEXT: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[AND3]], [[ZEXT1]](s64)
-    ; RV64I-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 858993459
-    ; RV64I-NEXT: [[ANYEXT2:%[0-9]+]]:_(s64) = G_ANYEXT [[C5]](s32)
-    ; RV64I-NEXT: [[AND4:%[0-9]+]]:_(s64) = G_AND [[LSHR1]], [[ANYEXT2]]
-    ; RV64I-NEXT: [[AND5:%[0-9]+]]:_(s64) = G_AND [[SUB]], [[ANYEXT2]]
+    ; RV64I-NEXT: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[AND3]], [[C4]](s64)
+    ; RV64I-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 858993459
+    ; RV64I-NEXT: [[AND4:%[0-9]+]]:_(s64) = G_AND [[LSHR1]], [[C5]]
+    ; RV64I-NEXT: [[AND5:%[0-9]+]]:_(s64) = G_AND [[SUB]], [[C5]]
     ; RV64I-NEXT: [[ADD1:%[0-9]+]]:_(s64) = G_ADD [[AND4]], [[AND5]]
-    ; RV64I-NEXT: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
-    ; RV64I-NEXT: [[AND6:%[0-9]+]]:_(s64) = G_AND [[ADD1]], [[C2]]
-    ; RV64I-NEXT: [[ZEXT2:%[0-9]+]]:_(s64) = G_ZEXT [[C6]](s32)
-    ; RV64I-NEXT: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[AND6]], [[ZEXT2]](s64)
+    ; RV64I-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
+    ; RV64I-NEXT: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[ADD1]], [[C6]](s64)
     ; RV64I-NEXT: [[ADD2:%[0-9]+]]:_(s64) = G_ADD [[LSHR2]], [[ADD1]]
-    ; RV64I-NEXT: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 252645135
-    ; RV64I-NEXT: [[ANYEXT3:%[0-9]+]]:_(s64) = G_ANYEXT [[C7]](s32)
-    ; RV64I-NEXT: [[AND7:%[0-9]+]]:_(s64) = G_AND [[ADD2]], [[ANYEXT3]]
-    ; RV64I-NEXT: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 16843009
-    ; RV64I-NEXT: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
-    ; RV64I-NEXT: [[ANYEXT4:%[0-9]+]]:_(s64) = G_ANYEXT [[C8]](s32)
-    ; RV64I-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[AND7]], [[ANYEXT4]]
-    ; RV64I-NEXT: [[AND8:%[0-9]+]]:_(s64) = G_AND [[MUL]], [[C2]]
-    ; RV64I-NEXT: [[ZEXT3:%[0-9]+]]:_(s64) = G_ZEXT [[C9]](s32)
-    ; RV64I-NEXT: [[LSHR3:%[0-9]+]]:_(s64) = G_LSHR [[AND8]], [[ZEXT3]](s64)
+    ; RV64I-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 252645135
+    ; RV64I-NEXT: [[AND6:%[0-9]+]]:_(s64) = G_AND [[ADD2]], [[C7]]
+    ; RV64I-NEXT: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 16843009
+    ; RV64I-NEXT: [[C9:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
+    ; RV64I-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[AND6]], [[C8]]
+    ; RV64I-NEXT: [[AND7:%[0-9]+]]:_(s64) = G_AND [[MUL]], [[C2]]
+    ; RV64I-NEXT: [[LSHR3:%[0-9]+]]:_(s64) = G_LSHR [[AND7]], [[C9]](s64)
     ; RV64I-NEXT: $x10 = COPY [[LSHR3]](s64)
     ; RV64I-NEXT: PseudoRET implicit $x10
     ;

diff  --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-fshl-fshr-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-fshl-fshr-rv64.mir
index 0402ccba900741..012342b127608c 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-fshl-fshr-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-fshl-fshr-rv64.mir
@@ -14,23 +14,17 @@ body:             |
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
     ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x12
-    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 7
-    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY2]], [[ANYEXT]]
-    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
-    ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
-    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[COPY2]], [[ANYEXT1]]
-    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[XOR]], [[ANYEXT]]
-    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 7
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY2]], [[C]]
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[COPY2]], [[C1]]
+    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[XOR]], [[C]]
+    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[AND]](s64)
     ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 255
-    ; CHECK-NEXT: [[AND2:%[0-9]+]]:_(s64) = G_AND [[AND]], [[C3]]
-    ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[AND2]](s64)
-    ; CHECK-NEXT: [[ANYEXT2:%[0-9]+]]:_(s64) = G_ANYEXT [[C2]](s32)
-    ; CHECK-NEXT: [[AND3:%[0-9]+]]:_(s64) = G_AND [[ANYEXT2]], [[C3]]
-    ; CHECK-NEXT: [[AND4:%[0-9]+]]:_(s64) = G_AND [[COPY1]], [[C3]]
-    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND4]], [[AND3]](s64)
-    ; CHECK-NEXT: [[AND5:%[0-9]+]]:_(s64) = G_AND [[AND1]], [[C3]]
-    ; CHECK-NEXT: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[LSHR]], [[AND5]](s64)
+    ; CHECK-NEXT: [[AND2:%[0-9]+]]:_(s64) = G_AND [[COPY1]], [[C3]]
+    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND2]], [[C2]](s64)
+    ; CHECK-NEXT: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[LSHR]], [[AND1]](s64)
     ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[SHL]], [[LSHR1]]
     ; CHECK-NEXT: $x10 = COPY [[OR]](s64)
     ; CHECK-NEXT: PseudoRET implicit $x10
@@ -58,23 +52,17 @@ body:             |
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
     ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x12
-    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 15
-    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY2]], [[ANYEXT]]
-    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
-    ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
-    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[COPY2]], [[ANYEXT1]]
-    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[XOR]], [[ANYEXT]]
-    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 15
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY2]], [[C]]
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[COPY2]], [[C1]]
+    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[XOR]], [[C]]
+    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[AND]](s64)
     ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 65535
-    ; CHECK-NEXT: [[AND2:%[0-9]+]]:_(s64) = G_AND [[AND]], [[C3]]
-    ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[AND2]](s64)
-    ; CHECK-NEXT: [[ANYEXT2:%[0-9]+]]:_(s64) = G_ANYEXT [[C2]](s32)
-    ; CHECK-NEXT: [[AND3:%[0-9]+]]:_(s64) = G_AND [[ANYEXT2]], [[C3]]
-    ; CHECK-NEXT: [[AND4:%[0-9]+]]:_(s64) = G_AND [[COPY1]], [[C3]]
-    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND4]], [[AND3]](s64)
-    ; CHECK-NEXT: [[AND5:%[0-9]+]]:_(s64) = G_AND [[AND1]], [[C3]]
-    ; CHECK-NEXT: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[LSHR]], [[AND5]](s64)
+    ; CHECK-NEXT: [[AND2:%[0-9]+]]:_(s64) = G_AND [[COPY1]], [[C3]]
+    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND2]], [[C2]](s64)
+    ; CHECK-NEXT: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[LSHR]], [[AND1]](s64)
     ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[SHL]], [[LSHR1]]
     ; CHECK-NEXT: $x10 = COPY [[OR]](s64)
     ; CHECK-NEXT: PseudoRET implicit $x10
@@ -102,19 +90,16 @@ body:             |
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
     ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x12
-    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
-    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY2]], [[ANYEXT]]
-    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
-    ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
-    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[COPY2]], [[ANYEXT1]]
-    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[XOR]], [[ANYEXT]]
-    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 31
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY2]], [[C]]
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[COPY2]], [[C1]]
+    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[XOR]], [[C]]
+    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
     ; CHECK-NEXT: [[SLLW:%[0-9]+]]:_(s64) = G_SLLW [[COPY]], [[AND]]
     ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4294967295
     ; CHECK-NEXT: [[AND2:%[0-9]+]]:_(s64) = G_AND [[COPY1]], [[C3]]
-    ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[C2]](s32)
-    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND2]], [[ZEXT]](s64)
+    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND2]], [[C2]](s64)
     ; CHECK-NEXT: [[SRLW:%[0-9]+]]:_(s64) = G_SRLW [[LSHR]], [[AND1]]
     ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[SLLW]], [[SRLW]]
     ; CHECK-NEXT: $x10 = COPY [[OR]](s64)
@@ -175,23 +160,17 @@ body:             |
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
     ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x12
-    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 7
-    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY2]], [[ANYEXT]]
-    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
-    ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
-    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[COPY2]], [[ANYEXT1]]
-    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[XOR]], [[ANYEXT]]
-    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; CHECK-NEXT: [[ANYEXT2:%[0-9]+]]:_(s64) = G_ANYEXT [[C2]](s32)
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 7
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY2]], [[C]]
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[COPY2]], [[C1]]
+    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[XOR]], [[C]]
+    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[C2]](s64)
+    ; CHECK-NEXT: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[SHL]], [[AND1]](s64)
     ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 255
-    ; CHECK-NEXT: [[AND2:%[0-9]+]]:_(s64) = G_AND [[ANYEXT2]], [[C3]]
-    ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[AND2]](s64)
-    ; CHECK-NEXT: [[AND3:%[0-9]+]]:_(s64) = G_AND [[AND1]], [[C3]]
-    ; CHECK-NEXT: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[SHL]], [[AND3]](s64)
-    ; CHECK-NEXT: [[AND4:%[0-9]+]]:_(s64) = G_AND [[AND]], [[C3]]
-    ; CHECK-NEXT: [[AND5:%[0-9]+]]:_(s64) = G_AND [[COPY1]], [[C3]]
-    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND5]], [[AND4]](s64)
+    ; CHECK-NEXT: [[AND2:%[0-9]+]]:_(s64) = G_AND [[COPY1]], [[C3]]
+    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND2]], [[AND]](s64)
     ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[SHL1]], [[LSHR]]
     ; CHECK-NEXT: $x10 = COPY [[OR]](s64)
     ; CHECK-NEXT: PseudoRET implicit $x10
@@ -219,23 +198,17 @@ body:             |
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
     ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x12
-    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 15
-    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY2]], [[ANYEXT]]
-    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
-    ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
-    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[COPY2]], [[ANYEXT1]]
-    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[XOR]], [[ANYEXT]]
-    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; CHECK-NEXT: [[ANYEXT2:%[0-9]+]]:_(s64) = G_ANYEXT [[C2]](s32)
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 15
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY2]], [[C]]
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[COPY2]], [[C1]]
+    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[XOR]], [[C]]
+    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[C2]](s64)
+    ; CHECK-NEXT: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[SHL]], [[AND1]](s64)
     ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 65535
-    ; CHECK-NEXT: [[AND2:%[0-9]+]]:_(s64) = G_AND [[ANYEXT2]], [[C3]]
-    ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[AND2]](s64)
-    ; CHECK-NEXT: [[AND3:%[0-9]+]]:_(s64) = G_AND [[AND1]], [[C3]]
-    ; CHECK-NEXT: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[SHL]], [[AND3]](s64)
-    ; CHECK-NEXT: [[AND4:%[0-9]+]]:_(s64) = G_AND [[AND]], [[C3]]
-    ; CHECK-NEXT: [[AND5:%[0-9]+]]:_(s64) = G_AND [[COPY1]], [[C3]]
-    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND5]], [[AND4]](s64)
+    ; CHECK-NEXT: [[AND2:%[0-9]+]]:_(s64) = G_AND [[COPY1]], [[C3]]
+    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND2]], [[AND]](s64)
     ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[SHL1]], [[LSHR]]
     ; CHECK-NEXT: $x10 = COPY [[OR]](s64)
     ; CHECK-NEXT: PseudoRET implicit $x10
@@ -263,16 +236,13 @@ body:             |
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
     ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x12
-    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
-    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY2]], [[ANYEXT]]
-    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
-    ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
-    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[COPY2]], [[ANYEXT1]]
-    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[XOR]], [[ANYEXT]]
-    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[C2]](s32)
-    ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[ZEXT]](s64)
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 31
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY2]], [[C]]
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[COPY2]], [[C1]]
+    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[XOR]], [[C]]
+    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[C2]](s64)
     ; CHECK-NEXT: [[SLLW:%[0-9]+]]:_(s64) = G_SLLW [[SHL]], [[AND1]]
     ; CHECK-NEXT: [[SRLW:%[0-9]+]]:_(s64) = G_SRLW [[COPY1]], [[AND]]
     ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[SLLW]], [[SRLW]]

diff  --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-load-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-load-rv64.mir
index f749648ac51bda..43a9c4bb40bd50 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-load-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-load-rv64.mir
@@ -275,10 +275,9 @@ body:             |
     ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
     ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
     ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
-    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
     ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[LOAD]](s32)
-    ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[C1]](s32)
-    ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT]], [[ZEXT]](s64)
+    ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT]], [[C1]](s64)
     ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[ZEXTLOAD]](s32)
     ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[SHL]], [[ANYEXT1]]
     ; CHECK-NEXT: $x10 = COPY [[OR]](s64)
@@ -324,10 +323,9 @@ body:             |
     ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
     ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
     ; CHECK-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 1)
-    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
     ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[ZEXTLOAD1]](s32)
-    ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[C1]](s32)
-    ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT]], [[ZEXT]](s64)
+    ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT]], [[C1]](s64)
     ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[ZEXTLOAD]](s32)
     ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[SHL]], [[ANYEXT1]]
     ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
@@ -335,13 +333,13 @@ body:             |
     ; CHECK-NEXT: [[ZEXTLOAD2:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 2)
     ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
     ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s8) from unknown-address + 3)
+    ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
     ; CHECK-NEXT: [[ANYEXT2:%[0-9]+]]:_(s64) = G_ANYEXT [[LOAD]](s32)
-    ; CHECK-NEXT: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT2]], [[ZEXT]](s64)
+    ; CHECK-NEXT: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT2]], [[C3]](s64)
     ; CHECK-NEXT: [[ANYEXT3:%[0-9]+]]:_(s64) = G_ANYEXT [[ZEXTLOAD2]](s32)
     ; CHECK-NEXT: [[OR1:%[0-9]+]]:_(s64) = G_OR [[SHL1]], [[ANYEXT3]]
-    ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
-    ; CHECK-NEXT: [[ZEXT1:%[0-9]+]]:_(s64) = G_ZEXT [[C3]](s32)
-    ; CHECK-NEXT: [[SHL2:%[0-9]+]]:_(s64) = G_SHL [[OR1]], [[ZEXT1]](s64)
+    ; CHECK-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
+    ; CHECK-NEXT: [[SHL2:%[0-9]+]]:_(s64) = G_SHL [[OR1]], [[C4]](s64)
     ; CHECK-NEXT: [[OR2:%[0-9]+]]:_(s64) = G_OR [[SHL2]], [[OR]]
     ; CHECK-NEXT: $x10 = COPY [[OR2]](s64)
     ; CHECK-NEXT: PseudoRET implicit $x10
@@ -386,10 +384,9 @@ body:             |
     ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
     ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
     ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s16) from unknown-address + 2)
-    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
     ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[LOAD]](s32)
-    ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[C1]](s32)
-    ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT]], [[ZEXT]](s64)
+    ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT]], [[C1]](s64)
     ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[ZEXTLOAD]](s32)
     ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[SHL]], [[ANYEXT1]]
     ; CHECK-NEXT: $x10 = COPY [[OR]](s64)

diff  --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-rotate-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-rotate-rv64.mir
index cc21d58d968d27..4689a7dd219abf 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-rotate-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-rotate-rv64.mir
@@ -17,19 +17,16 @@ body:             |
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
-    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 7
-    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[ANYEXT]], [[COPY1]]
-    ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY1]], [[ANYEXT1]]
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 7
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[C]], [[COPY1]]
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY1]], [[C1]]
     ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 255
     ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[AND]], [[C2]]
     ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[AND1]](s64)
-    ; CHECK-NEXT: [[AND2:%[0-9]+]]:_(s64) = G_AND [[SUB]], [[ANYEXT1]]
-    ; CHECK-NEXT: [[AND3:%[0-9]+]]:_(s64) = G_AND [[AND2]], [[C2]]
-    ; CHECK-NEXT: [[AND4:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C2]]
-    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND4]], [[AND3]](s64)
+    ; CHECK-NEXT: [[AND2:%[0-9]+]]:_(s64) = G_AND [[SUB]], [[C1]]
+    ; CHECK-NEXT: [[AND3:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C2]]
+    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND3]], [[AND2]](s64)
     ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[SHL]], [[LSHR]]
     ; CHECK-NEXT: $x10 = COPY [[OR]](s64)
     ; CHECK-NEXT: PseudoRET implicit $x10
@@ -54,19 +51,16 @@ body:             |
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
-    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 15
-    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[ANYEXT]], [[COPY1]]
-    ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY1]], [[ANYEXT1]]
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 15
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[C]], [[COPY1]]
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY1]], [[C1]]
     ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 65535
     ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[AND]], [[C2]]
     ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[AND1]](s64)
-    ; CHECK-NEXT: [[AND2:%[0-9]+]]:_(s64) = G_AND [[SUB]], [[ANYEXT1]]
-    ; CHECK-NEXT: [[AND3:%[0-9]+]]:_(s64) = G_AND [[AND2]], [[C2]]
-    ; CHECK-NEXT: [[AND4:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C2]]
-    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND4]], [[AND3]](s64)
+    ; CHECK-NEXT: [[AND2:%[0-9]+]]:_(s64) = G_AND [[SUB]], [[C1]]
+    ; CHECK-NEXT: [[AND3:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C2]]
+    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND3]], [[AND2]](s64)
     ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[SHL]], [[LSHR]]
     ; CHECK-NEXT: $x10 = COPY [[OR]](s64)
     ; CHECK-NEXT: PseudoRET implicit $x10
@@ -91,14 +85,12 @@ body:             |
     ; RV64I-NEXT: {{  $}}
     ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
     ; RV64I-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
-    ; RV64I-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; RV64I-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
-    ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; RV64I-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[ANYEXT]], [[COPY1]]
-    ; RV64I-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
-    ; RV64I-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY1]], [[ANYEXT1]]
+    ; RV64I-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64I-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 31
+    ; RV64I-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[C]], [[COPY1]]
+    ; RV64I-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY1]], [[C1]]
     ; RV64I-NEXT: [[SLLW:%[0-9]+]]:_(s64) = G_SLLW [[COPY]], [[AND]]
-    ; RV64I-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[SUB]], [[ANYEXT1]]
+    ; RV64I-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[SUB]], [[C1]]
     ; RV64I-NEXT: [[SRLW:%[0-9]+]]:_(s64) = G_SRLW [[COPY]], [[AND1]]
     ; RV64I-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[SLLW]], [[SRLW]]
     ; RV64I-NEXT: $x10 = COPY [[OR]](s64)
@@ -170,19 +162,16 @@ body:             |
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
-    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 7
-    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[ANYEXT]], [[COPY1]]
-    ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY1]], [[ANYEXT1]]
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 7
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[C]], [[COPY1]]
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY1]], [[C1]]
     ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 255
     ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[AND]], [[C2]]
     ; CHECK-NEXT: [[AND2:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C2]]
     ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND2]], [[AND1]](s64)
-    ; CHECK-NEXT: [[AND3:%[0-9]+]]:_(s64) = G_AND [[SUB]], [[ANYEXT1]]
-    ; CHECK-NEXT: [[AND4:%[0-9]+]]:_(s64) = G_AND [[AND3]], [[C2]]
-    ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[AND4]](s64)
+    ; CHECK-NEXT: [[AND3:%[0-9]+]]:_(s64) = G_AND [[SUB]], [[C1]]
+    ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[AND3]](s64)
     ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR]], [[SHL]]
     ; CHECK-NEXT: $x10 = COPY [[OR]](s64)
     ; CHECK-NEXT: PseudoRET implicit $x10
@@ -207,19 +196,16 @@ body:             |
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
-    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 15
-    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[ANYEXT]], [[COPY1]]
-    ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY1]], [[ANYEXT1]]
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 15
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[C]], [[COPY1]]
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY1]], [[C1]]
     ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 65535
     ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[AND]], [[C2]]
     ; CHECK-NEXT: [[AND2:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C2]]
     ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND2]], [[AND1]](s64)
-    ; CHECK-NEXT: [[AND3:%[0-9]+]]:_(s64) = G_AND [[SUB]], [[ANYEXT1]]
-    ; CHECK-NEXT: [[AND4:%[0-9]+]]:_(s64) = G_AND [[AND3]], [[C2]]
-    ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[AND4]](s64)
+    ; CHECK-NEXT: [[AND3:%[0-9]+]]:_(s64) = G_AND [[SUB]], [[C1]]
+    ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[AND3]](s64)
     ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR]], [[SHL]]
     ; CHECK-NEXT: $x10 = COPY [[OR]](s64)
     ; CHECK-NEXT: PseudoRET implicit $x10
@@ -244,14 +230,12 @@ body:             |
     ; RV64I-NEXT: {{  $}}
     ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
     ; RV64I-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
-    ; RV64I-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; RV64I-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
-    ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; RV64I-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[ANYEXT]], [[COPY1]]
-    ; RV64I-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
-    ; RV64I-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY1]], [[ANYEXT1]]
+    ; RV64I-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64I-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 31
+    ; RV64I-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[C]], [[COPY1]]
+    ; RV64I-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY1]], [[C1]]
     ; RV64I-NEXT: [[SRLW:%[0-9]+]]:_(s64) = G_SRLW [[COPY]], [[AND]]
-    ; RV64I-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[SUB]], [[ANYEXT1]]
+    ; RV64I-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[SUB]], [[C1]]
     ; RV64I-NEXT: [[SLLW:%[0-9]+]]:_(s64) = G_SLLW [[COPY]], [[AND1]]
     ; RV64I-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[SRLW]], [[SLLW]]
     ; RV64I-NEXT: $x10 = COPY [[OR]](s64)

diff  --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-sat-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-sat-rv64.mir
index 6985ad5511f67b..54a3cb63545d58 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-sat-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-sat-rv64.mir
@@ -22,8 +22,9 @@ body:             |
     ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[COPY1]], [[C]]
     ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(ult), [[AND]](s64), [[AND1]]
     ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[TRUNC]](s32)
-    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
-    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s64), [[C1]], [[COPY2]]
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+    ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[C1]](s64)
+    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s64), [[TRUNC1]], [[COPY2]]
     ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[SELECT]](s32)
     ; CHECK-NEXT: $x10 = COPY [[ANYEXT]](s64)
     ; CHECK-NEXT: PseudoRET implicit $x10
@@ -92,17 +93,15 @@ body:             |
     ; CHECK-NEXT: [[SEXT_INREG2:%[0-9]+]]:_(s64) = G_SEXT_INREG [[ADD]], 32
     ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(ne), [[ADD]](s64), [[SEXT_INREG2]]
     ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[TRUNC]](s32)
-    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 31
     ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY [[SEXT_INREG2]](s64)
-    ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[C]](s32)
-    ; CHECK-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[COPY3]], [[ZEXT]](s64)
-    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648
-    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
-    ; CHECK-NEXT: [[ADD1:%[0-9]+]]:_(s64) = G_ADD [[ASHR]], [[ANYEXT]]
+    ; CHECK-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[COPY3]], [[C]](s64)
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 -2147483648
+    ; CHECK-NEXT: [[ADD1:%[0-9]+]]:_(s64) = G_ADD [[ASHR]], [[C1]]
     ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[ADD1]](s64)
     ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s64), [[TRUNC1]], [[COPY2]]
-    ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[SELECT]](s32)
-    ; CHECK-NEXT: $x10 = COPY [[ANYEXT1]](s64)
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[SELECT]](s32)
+    ; CHECK-NEXT: $x10 = COPY [[ANYEXT]](s64)
     ; CHECK-NEXT: PseudoRET implicit $x10
     %2:_(s64) = COPY $x10
     %0:_(s32) = G_TRUNC %2(s64)
@@ -180,8 +179,9 @@ body:             |
     ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C]]
     ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[COPY1]], [[C]]
     ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(ult), [[AND]](s64), [[AND1]]
-    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s64), [[C1]], [[TRUNC]]
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[C1]](s64)
+    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s64), [[TRUNC1]], [[TRUNC]]
     ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[SELECT]](s32)
     ; CHECK-NEXT: $x10 = COPY [[ANYEXT]](s64)
     ; CHECK-NEXT: PseudoRET implicit $x10
@@ -247,17 +247,15 @@ body:             |
     ; CHECK-NEXT: [[SEXT_INREG2:%[0-9]+]]:_(s64) = G_SEXT_INREG [[SUB]], 32
     ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(ne), [[SUB]](s64), [[SEXT_INREG2]]
     ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[TRUNC]](s32)
-    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 31
     ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY [[SEXT_INREG2]](s64)
-    ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[C]](s32)
-    ; CHECK-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[COPY3]], [[ZEXT]](s64)
-    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648
-    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
-    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[ASHR]], [[ANYEXT]]
+    ; CHECK-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[COPY3]], [[C]](s64)
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 -2147483648
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[ASHR]], [[C1]]
     ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[ADD]](s64)
     ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s64), [[TRUNC1]], [[COPY2]]
-    ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[SELECT]](s32)
-    ; CHECK-NEXT: $x10 = COPY [[ANYEXT1]](s64)
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[SELECT]](s32)
+    ; CHECK-NEXT: $x10 = COPY [[ANYEXT]](s64)
     ; CHECK-NEXT: PseudoRET implicit $x10
     %2:_(s64) = COPY $x10
     %0:_(s32) = G_TRUNC %2(s64)
@@ -334,9 +332,10 @@ body:             |
     ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[ADD]], [[C]]
     ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[COPY1]], [[C]]
     ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(ult), [[AND]](s64), [[AND1]]
-    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
-    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[ADD]](s64)
-    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s64), [[C1]], [[TRUNC]]
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[C1]](s64)
+    ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[ADD]](s64)
+    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s64), [[TRUNC]], [[TRUNC1]]
     ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[SELECT]](s32)
     ; CHECK-NEXT: $x10 = COPY [[ANYEXT]](s64)
     ; CHECK-NEXT: PseudoRET implicit $x10

diff  --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-store-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-store-rv64.mir
index 9fde63784a9e01..f441c614d88670 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-store-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-store-rv64.mir
@@ -261,12 +261,10 @@ body:             |
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
     ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
-    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
-    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
     ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 65535
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[ANYEXT]], [[C1]]
-    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C1]]
-    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND1]], [[AND]](s64)
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C1]]
+    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND]], [[C]](s64)
     ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
     ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C2]](s64)
     ; CHECK-NEXT: G_STORE [[TRUNC]](s32), [[COPY1]](p0) :: (store (s8))
@@ -313,30 +311,25 @@ body:             |
     ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
     ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[TRUNC]](s32)
-    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
     ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4294967295
     ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C1]]
-    ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[C]](s32)
-    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND]], [[ZEXT]](s64)
+    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND]], [[C]](s64)
     ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[LSHR]](s64)
     ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
     ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C2]](s64)
-    ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
-    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C3]](s32)
+    ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
     ; CHECK-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 65535
-    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[ANYEXT]], [[C4]]
-    ; CHECK-NEXT: [[AND2:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C4]]
-    ; CHECK-NEXT: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[AND2]], [[AND1]](s64)
+    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C4]]
+    ; CHECK-NEXT: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[AND1]], [[C3]](s64)
     ; CHECK-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
     ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C5]](s64)
     ; CHECK-NEXT: G_STORE [[COPY2]](s32), [[COPY1]](p0) :: (store (s8))
     ; CHECK-NEXT: [[TRUNC2:%[0-9]+]]:_(s32) = G_TRUNC [[LSHR1]](s64)
     ; CHECK-NEXT: G_STORE [[TRUNC2]](s32), [[PTR_ADD1]](p0) :: (store (s8) into unknown-address + 1)
-    ; CHECK-NEXT: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
-    ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C6]](s32)
-    ; CHECK-NEXT: [[AND3:%[0-9]+]]:_(s64) = G_AND [[ANYEXT1]], [[C4]]
-    ; CHECK-NEXT: [[AND4:%[0-9]+]]:_(s64) = G_AND [[LSHR]], [[C4]]
-    ; CHECK-NEXT: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[AND4]], [[AND3]](s64)
+    ; CHECK-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
+    ; CHECK-NEXT: [[AND2:%[0-9]+]]:_(s64) = G_AND [[LSHR]], [[C4]]
+    ; CHECK-NEXT: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[AND2]], [[C6]](s64)
     ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C5]](s64)
     ; CHECK-NEXT: G_STORE [[TRUNC1]](s32), [[PTR_ADD]](p0) :: (store (s8) into unknown-address + 2)
     ; CHECK-NEXT: [[TRUNC3:%[0-9]+]]:_(s32) = G_TRUNC [[LSHR2]](s64)
@@ -382,11 +375,10 @@ body:             |
     ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
     ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[TRUNC]](s32)
-    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
     ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4294967295
     ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C1]]
-    ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[C]](s32)
-    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND]], [[ZEXT]](s64)
+    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND]], [[C]](s64)
     ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[LSHR]](s64)
     ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
     ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C2]](s64)
@@ -436,18 +428,18 @@ body:             |
     ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
     ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C1]](s64)
     ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY2]](s64)
-    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
     ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4294967295
     ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY2]], [[C3]]
-    ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[C2]](s32)
-    ; CHECK-NEXT: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[AND]], [[ZEXT]](s64)
+    ; CHECK-NEXT: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[AND]], [[C2]](s64)
     ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[LSHR1]](s64)
     ; CHECK-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
     ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C4]](s64)
     ; CHECK-NEXT: G_STORE [[TRUNC]](s32), [[COPY1]](p0) :: (store (s16))
     ; CHECK-NEXT: G_STORE [[TRUNC1]](s32), [[PTR_ADD1]](p0) :: (store (s16) into unknown-address + 2)
     ; CHECK-NEXT: [[TRUNC2:%[0-9]+]]:_(s32) = G_TRUNC [[LSHR]](s64)
-    ; CHECK-NEXT: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[LSHR]], [[ZEXT]](s64)
+    ; CHECK-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
+    ; CHECK-NEXT: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[LSHR]], [[C5]](s64)
     ; CHECK-NEXT: [[TRUNC3:%[0-9]+]]:_(s32) = G_TRUNC [[LSHR2]](s64)
     ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C4]](s64)
     ; CHECK-NEXT: G_STORE [[TRUNC2]](s32), [[PTR_ADD]](p0) :: (store (s16) into unknown-address + 4)

diff  --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-anyext.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-anyext.mir
index c74a999354b608..d5a81d5022c4d6 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-anyext.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-anyext.mir
@@ -26,12 +26,10 @@ body:             |
     ; RV64: liveins: $v0
     ; RV64-NEXT: {{  $}}
     ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s1>) = COPY $v0
-    ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
-    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+    ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR [[C]](s64)
+    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR [[C1]](s64)
     ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SELECT [[COPY]](<vscale x 1 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
     ; RV64-NEXT: $v8 = COPY [[SELECT]](<vscale x 1 x s8>)
     ; RV64-NEXT: PseudoRET implicit $v8
@@ -63,12 +61,10 @@ body:             |
     ; RV64: liveins: $v0
     ; RV64-NEXT: {{  $}}
     ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s1>) = COPY $v0
-    ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
-    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+    ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SPLAT_VECTOR [[C]](s64)
+    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SPLAT_VECTOR [[C1]](s64)
     ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SELECT [[COPY]](<vscale x 1 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
     ; RV64-NEXT: $v8 = COPY [[SELECT]](<vscale x 1 x s16>)
     ; RV64-NEXT: PseudoRET implicit $v8
@@ -100,12 +96,10 @@ body:             |
     ; RV64: liveins: $v0
     ; RV64-NEXT: {{  $}}
     ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s1>) = COPY $v0
-    ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
-    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+    ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SPLAT_VECTOR [[C]](s64)
+    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SPLAT_VECTOR [[C1]](s64)
     ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SELECT [[COPY]](<vscale x 1 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
     ; RV64-NEXT: $v8 = COPY [[SELECT]](<vscale x 1 x s32>)
     ; RV64-NEXT: PseudoRET implicit $v8
@@ -174,12 +168,10 @@ body:             |
     ; RV64: liveins: $v0
     ; RV64-NEXT: {{  $}}
     ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s1>) = COPY $v0
-    ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
-    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+    ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[C]](s64)
+    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[C1]](s64)
     ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SELECT [[COPY]](<vscale x 2 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
     ; RV64-NEXT: $v8 = COPY [[SELECT]](<vscale x 2 x s8>)
     ; RV64-NEXT: PseudoRET implicit $v8
@@ -211,12 +203,10 @@ body:             |
     ; RV64: liveins: $v0
     ; RV64-NEXT: {{  $}}
     ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s1>) = COPY $v0
-    ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
-    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+    ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SPLAT_VECTOR [[C]](s64)
+    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SPLAT_VECTOR [[C1]](s64)
     ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SELECT [[COPY]](<vscale x 2 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
     ; RV64-NEXT: $v8 = COPY [[SELECT]](<vscale x 2 x s16>)
     ; RV64-NEXT: PseudoRET implicit $v8
@@ -248,12 +238,10 @@ body:             |
     ; RV64: liveins: $v0
     ; RV64-NEXT: {{  $}}
     ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s1>) = COPY $v0
-    ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
-    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+    ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SPLAT_VECTOR [[C]](s64)
+    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SPLAT_VECTOR [[C1]](s64)
     ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SELECT [[COPY]](<vscale x 2 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
     ; RV64-NEXT: $v8 = COPY [[SELECT]](<vscale x 2 x s32>)
     ; RV64-NEXT: PseudoRET implicit $v8
@@ -322,12 +310,10 @@ body:             |
     ; RV64: liveins: $v0
     ; RV64-NEXT: {{  $}}
     ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s1>) = COPY $v0
-    ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
-    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+    ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[C]](s64)
+    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[C1]](s64)
     ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SELECT [[COPY]](<vscale x 4 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
     ; RV64-NEXT: $v8 = COPY [[SELECT]](<vscale x 4 x s8>)
     ; RV64-NEXT: PseudoRET implicit $v8
@@ -359,12 +345,10 @@ body:             |
     ; RV64: liveins: $v0
     ; RV64-NEXT: {{  $}}
     ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s1>) = COPY $v0
-    ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
-    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+    ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SPLAT_VECTOR [[C]](s64)
+    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SPLAT_VECTOR [[C1]](s64)
     ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SELECT [[COPY]](<vscale x 4 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
     ; RV64-NEXT: $v8 = COPY [[SELECT]](<vscale x 4 x s16>)
     ; RV64-NEXT: PseudoRET implicit $v8
@@ -396,12 +380,10 @@ body:             |
     ; RV64: liveins: $v0
     ; RV64-NEXT: {{  $}}
     ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s1>) = COPY $v0
-    ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
-    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+    ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SPLAT_VECTOR [[C]](s64)
+    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SPLAT_VECTOR [[C1]](s64)
     ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SELECT [[COPY]](<vscale x 4 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
     ; RV64-NEXT: $v8m2 = COPY [[SELECT]](<vscale x 4 x s32>)
     ; RV64-NEXT: PseudoRET implicit $v8m2
@@ -470,12 +452,10 @@ body:             |
     ; RV64: liveins: $v0
     ; RV64-NEXT: {{  $}}
     ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s1>) = COPY $v0
-    ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
-    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+    ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[C]](s64)
+    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[C1]](s64)
     ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SELECT [[COPY]](<vscale x 8 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
     ; RV64-NEXT: $v8 = COPY [[SELECT]](<vscale x 8 x s8>)
     ; RV64-NEXT: PseudoRET implicit $v8
@@ -507,12 +487,10 @@ body:             |
     ; RV64: liveins: $v0
     ; RV64-NEXT: {{  $}}
     ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s1>) = COPY $v0
-    ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
-    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+    ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SPLAT_VECTOR [[C]](s64)
+    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SPLAT_VECTOR [[C1]](s64)
     ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SELECT [[COPY]](<vscale x 8 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
     ; RV64-NEXT: $v8m2 = COPY [[SELECT]](<vscale x 8 x s16>)
     ; RV64-NEXT: PseudoRET implicit $v8m2
@@ -544,12 +522,10 @@ body:             |
     ; RV64: liveins: $v0
     ; RV64-NEXT: {{  $}}
     ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s1>) = COPY $v0
-    ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
-    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+    ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SPLAT_VECTOR [[C]](s64)
+    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SPLAT_VECTOR [[C1]](s64)
     ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SELECT [[COPY]](<vscale x 8 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
     ; RV64-NEXT: $v8m4 = COPY [[SELECT]](<vscale x 8 x s32>)
     ; RV64-NEXT: PseudoRET implicit $v8m4
@@ -618,12 +594,10 @@ body:             |
     ; RV64: liveins: $v0
     ; RV64-NEXT: {{  $}}
     ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s1>) = COPY $v0
-    ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
-    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+    ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR [[C]](s64)
+    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR [[C1]](s64)
     ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SELECT [[COPY]](<vscale x 16 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
     ; RV64-NEXT: $v8m2 = COPY [[SELECT]](<vscale x 16 x s8>)
     ; RV64-NEXT: PseudoRET implicit $v8m2
@@ -655,12 +629,10 @@ body:             |
     ; RV64: liveins: $v0
     ; RV64-NEXT: {{  $}}
     ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s1>) = COPY $v0
-    ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
-    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+    ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SPLAT_VECTOR [[C]](s64)
+    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SPLAT_VECTOR [[C1]](s64)
     ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SELECT [[COPY]](<vscale x 16 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
     ; RV64-NEXT: $v8m4 = COPY [[SELECT]](<vscale x 16 x s16>)
     ; RV64-NEXT: PseudoRET implicit $v8m4
@@ -692,12 +664,10 @@ body:             |
     ; RV64: liveins: $v0
     ; RV64-NEXT: {{  $}}
     ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s1>) = COPY $v0
-    ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
-    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+    ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SPLAT_VECTOR [[C]](s64)
+    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SPLAT_VECTOR [[C1]](s64)
     ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SELECT [[COPY]](<vscale x 16 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
     ; RV64-NEXT: $v8m8 = COPY [[SELECT]](<vscale x 16 x s32>)
     ; RV64-NEXT: PseudoRET implicit $v8m8
@@ -729,12 +699,10 @@ body:             |
     ; RV64: liveins: $v0
     ; RV64-NEXT: {{  $}}
     ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 32 x s1>) = COPY $v0
-    ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
-    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+    ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SPLAT_VECTOR [[C]](s64)
+    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SPLAT_VECTOR [[C1]](s64)
     ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SELECT [[COPY]](<vscale x 32 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
     ; RV64-NEXT: $v8m4 = COPY [[SELECT]](<vscale x 32 x s8>)
     ; RV64-NEXT: PseudoRET implicit $v8m4
@@ -766,12 +734,10 @@ body:             |
     ; RV64: liveins: $v0
     ; RV64-NEXT: {{  $}}
     ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 32 x s1>) = COPY $v0
-    ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 32 x s16>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
-    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 32 x s16>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+    ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 32 x s16>) = G_SPLAT_VECTOR [[C]](s64)
+    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 32 x s16>) = G_SPLAT_VECTOR [[C1]](s64)
     ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 32 x s16>) = G_SELECT [[COPY]](<vscale x 32 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
     ; RV64-NEXT: $v8m8 = COPY [[SELECT]](<vscale x 32 x s16>)
     ; RV64-NEXT: PseudoRET implicit $v8m8
@@ -803,12 +769,10 @@ body:             |
     ; RV64: liveins: $v0
     ; RV64-NEXT: {{  $}}
     ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 64 x s1>) = COPY $v0
-    ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
-    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+    ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SPLAT_VECTOR [[C]](s64)
+    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SPLAT_VECTOR [[C1]](s64)
     ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SELECT [[COPY]](<vscale x 64 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
     ; RV64-NEXT: $v8m8 = COPY [[SELECT]](<vscale x 64 x s8>)
     ; RV64-NEXT: PseudoRET implicit $v8m8

diff  --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-extract-subvector.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-extract-subvector.mir
index ca03482eee0d1d..dcee71432f4c3d 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-extract-subvector.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-extract-subvector.mir
@@ -32,12 +32,10 @@ body:             |
     ;
     ; RV64-LABEL: name: extract_subvector_nxv2i1_nxv4i1
     ; RV64: [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
-    ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
-    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+    ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[C]](s64)
+    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[C1]](s64)
     ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SELECT [[DEF]](<vscale x 4 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
     ; RV64-NEXT: [[READ_VLENB:%[0-9]+]]:_(s64) = G_READ_VLENB
     ; RV64-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
@@ -47,9 +45,8 @@ body:             |
     ; RV64-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
     ; RV64-NEXT: [[VSLIDEDOWN_VL:%[0-9]+]]:_(<vscale x 4 x s8>) = G_VSLIDEDOWN_VL [[DEF1]], [[SELECT]], [[LSHR]](s64), [[VMSET_VL]](<vscale x 2 x s1>), [[C3]], 3
     ; RV64-NEXT: [[EXTRACT_SUBVECTOR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_EXTRACT_SUBVECTOR [[VSLIDEDOWN_VL]](<vscale x 4 x s8>), 0
-    ; RV64-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; RV64-NEXT: [[ANYEXT2:%[0-9]+]]:_(s64) = G_ANYEXT [[C4]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR2:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[ANYEXT2]](s64)
+    ; RV64-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64-NEXT: [[SPLAT_VECTOR2:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[C4]](s64)
     ; RV64-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 2 x s1>) = G_ICMP intpred(ne), [[EXTRACT_SUBVECTOR]](<vscale x 2 x s8>), [[SPLAT_VECTOR2]]
     ; RV64-NEXT: $v8 = COPY [[ICMP]](<vscale x 2 x s1>)
     ; RV64-NEXT: PseudoRET implicit $v8
@@ -87,12 +84,10 @@ body:             |
     ;
     ; RV64-LABEL: name: extract_subvector_nxv2i1_nxv8i1
     ; RV64: [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
-    ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
-    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+    ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[C]](s64)
+    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[C1]](s64)
     ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SELECT [[DEF]](<vscale x 8 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
     ; RV64-NEXT: [[READ_VLENB:%[0-9]+]]:_(s64) = G_READ_VLENB
     ; RV64-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
@@ -102,9 +97,8 @@ body:             |
     ; RV64-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
     ; RV64-NEXT: [[VSLIDEDOWN_VL:%[0-9]+]]:_(<vscale x 8 x s8>) = G_VSLIDEDOWN_VL [[DEF1]], [[SELECT]], [[LSHR]](s64), [[VMSET_VL]](<vscale x 2 x s1>), [[C3]], 3
     ; RV64-NEXT: [[EXTRACT_SUBVECTOR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_EXTRACT_SUBVECTOR [[VSLIDEDOWN_VL]](<vscale x 8 x s8>), 0
-    ; RV64-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; RV64-NEXT: [[ANYEXT2:%[0-9]+]]:_(s64) = G_ANYEXT [[C4]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR2:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[ANYEXT2]](s64)
+    ; RV64-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64-NEXT: [[SPLAT_VECTOR2:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[C4]](s64)
     ; RV64-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 2 x s1>) = G_ICMP intpred(ne), [[EXTRACT_SUBVECTOR]](<vscale x 2 x s8>), [[SPLAT_VECTOR2]]
     ; RV64-NEXT: $v8 = COPY [[ICMP]](<vscale x 2 x s1>)
     ; RV64-NEXT: PseudoRET implicit $v8
@@ -135,17 +129,14 @@ body:             |
     ;
     ; RV64-LABEL: name: extract_subvector_nxv4i1_nxv64i1
     ; RV64: [[DEF:%[0-9]+]]:_(<vscale x 64 x s1>) = G_IMPLICIT_DEF
-    ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
-    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+    ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SPLAT_VECTOR [[C]](s64)
+    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SPLAT_VECTOR [[C1]](s64)
     ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SELECT [[DEF]](<vscale x 64 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
     ; RV64-NEXT: [[EXTRACT_SUBVECTOR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_EXTRACT_SUBVECTOR [[SELECT]](<vscale x 64 x s8>), 16
-    ; RV64-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; RV64-NEXT: [[ANYEXT2:%[0-9]+]]:_(s64) = G_ANYEXT [[C2]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR2:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[ANYEXT2]](s64)
+    ; RV64-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64-NEXT: [[SPLAT_VECTOR2:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[C2]](s64)
     ; RV64-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 4 x s1>) = G_ICMP intpred(ne), [[EXTRACT_SUBVECTOR]](<vscale x 4 x s8>), [[SPLAT_VECTOR2]]
     ; RV64-NEXT: $v8 = COPY [[ICMP]](<vscale x 4 x s1>)
     ; RV64-NEXT: PseudoRET implicit $v8

diff  --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-icmp.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-icmp.mir
index fd92f123e886e4..9a92f68a49ad94 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-icmp.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-icmp.mir
@@ -21,12 +21,10 @@ body:             |
     ;
     ; RV64-LABEL: name: icmp_nxv1i1
     ; RV64: [[DEF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
-    ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
-    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
-    ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+    ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR [[C]](s64)
+    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR [[C1]](s64)
     ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SELECT [[DEF]](<vscale x 1 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
     ; RV64-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 1 x s1>) = G_ICMP intpred(sgt), [[SELECT]](<vscale x 1 x s8>), [[SELECT]]
     ; RV64-NEXT: $v8 = COPY [[ICMP]](<vscale x 1 x s1>)
@@ -55,12 +53,10 @@ body:             |
     ;
     ; RV64-LABEL: name: icmp_nxv2i1
     ; RV64: [[DEF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
-    ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
-    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
-    ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+    ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[C]](s64)
+    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[C1]](s64)
     ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SELECT [[DEF]](<vscale x 2 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
     ; RV64-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 2 x s1>) = G_ICMP intpred(sgt), [[SELECT]](<vscale x 2 x s8>), [[SELECT]]
     ; RV64-NEXT: $v8 = COPY [[ICMP]](<vscale x 2 x s1>)
@@ -89,12 +85,10 @@ body:             |
     ;
     ; RV64-LABEL: name: icmp_nxv4i1
     ; RV64: [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
-    ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
-    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
-    ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+    ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[C]](s64)
+    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[C1]](s64)
     ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SELECT [[DEF]](<vscale x 4 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
     ; RV64-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 4 x s1>) = G_ICMP intpred(sgt), [[SELECT]](<vscale x 4 x s8>), [[SELECT]]
     ; RV64-NEXT: $v8 = COPY [[ICMP]](<vscale x 4 x s1>)
@@ -123,12 +117,10 @@ body:             |
     ;
     ; RV64-LABEL: name: icmp_nxv8i1
     ; RV64: [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
-    ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
-    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
-    ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+    ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[C]](s64)
+    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[C1]](s64)
     ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SELECT [[DEF]](<vscale x 8 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
     ; RV64-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 8 x s1>) = G_ICMP intpred(sgt), [[SELECT]](<vscale x 8 x s8>), [[SELECT]]
     ; RV64-NEXT: $v8 = COPY [[ICMP]](<vscale x 8 x s1>)
@@ -157,12 +149,10 @@ body:             |
     ;
     ; RV64-LABEL: name: icmp_nxv16i1
     ; RV64: [[DEF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
-    ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
-    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
-    ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+    ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR [[C]](s64)
+    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR [[C1]](s64)
     ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SELECT [[DEF]](<vscale x 16 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
     ; RV64-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 16 x s1>) = G_ICMP intpred(sgt), [[SELECT]](<vscale x 16 x s8>), [[SELECT]]
     ; RV64-NEXT: $v8 = COPY [[ICMP]](<vscale x 16 x s1>)
@@ -191,12 +181,10 @@ body:             |
     ;
     ; RV64-LABEL: name: icmp_nxv32i1
     ; RV64: [[DEF:%[0-9]+]]:_(<vscale x 32 x s1>) = G_IMPLICIT_DEF
-    ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
-    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
-    ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+    ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SPLAT_VECTOR [[C]](s64)
+    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SPLAT_VECTOR [[C1]](s64)
     ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SELECT [[DEF]](<vscale x 32 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
     ; RV64-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 32 x s1>) = G_ICMP intpred(sgt), [[SELECT]](<vscale x 32 x s8>), [[SELECT]]
     ; RV64-NEXT: $v8 = COPY [[ICMP]](<vscale x 32 x s1>)
@@ -225,12 +213,10 @@ body:             |
     ;
     ; RV64-LABEL: name: icmp_nxv64i1
     ; RV64: [[DEF:%[0-9]+]]:_(<vscale x 64 x s1>) = G_IMPLICIT_DEF
-    ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
-    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
-    ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+    ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SPLAT_VECTOR [[C]](s64)
+    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SPLAT_VECTOR [[C1]](s64)
     ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SELECT [[DEF]](<vscale x 64 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
     ; RV64-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 64 x s1>) = G_ICMP intpred(sgt), [[SELECT]](<vscale x 64 x s8>), [[SELECT]]
     ; RV64-NEXT: $v8 = COPY [[ICMP]](<vscale x 64 x s1>)

diff  --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-insert-subvector.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-insert-subvector.mir
index 81a3a0c7ddd03a..e8b5325bf8ef1b 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-insert-subvector.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-insert-subvector.mir
@@ -57,19 +57,15 @@ body:             |
     ; RV32-NEXT: {{  $}}
     ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s1>) = COPY $v8
     ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
-    ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; RV32-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
-    ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; RV32-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
-    ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+    ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[C]](s64)
+    ; RV32-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[C1]](s64)
     ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SELECT [[COPY]](<vscale x 4 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
-    ; RV32-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; RV32-NEXT: [[ANYEXT2:%[0-9]+]]:_(s64) = G_ANYEXT [[C2]](s32)
-    ; RV32-NEXT: [[SPLAT_VECTOR2:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[ANYEXT2]](s64)
-    ; RV32-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; RV32-NEXT: [[ANYEXT3:%[0-9]+]]:_(s64) = G_ANYEXT [[C3]](s32)
-    ; RV32-NEXT: [[SPLAT_VECTOR3:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[ANYEXT3]](s64)
+    ; RV32-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV32-NEXT: [[SPLAT_VECTOR2:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[C2]](s64)
+    ; RV32-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; RV32-NEXT: [[SPLAT_VECTOR3:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[C3]](s64)
     ; RV32-NEXT: [[SELECT1:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SELECT [[DEF]](<vscale x 2 x s1>), [[SPLAT_VECTOR3]], [[SPLAT_VECTOR2]]
     ; RV32-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
     ; RV32-NEXT: [[INSERT_SUBVECTOR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_INSERT_SUBVECTOR [[DEF1]], [[SELECT1]](<vscale x 2 x s8>), 0
@@ -82,9 +78,8 @@ body:             |
     ; RV32-NEXT: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[READ_VLENB1]], [[C5]](s64)
     ; RV32-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[LSHR1]], [[LSHR]]
     ; RV32-NEXT: [[VSLIDEUP_VL:%[0-9]+]]:_(<vscale x 4 x s8>) = G_VSLIDEUP_VL [[SELECT]], [[INSERT_SUBVECTOR]], [[LSHR1]](s64), [[VMSET_VL]](<vscale x 4 x s1>), [[ADD]](s64), 1
-    ; RV32-NEXT: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; RV32-NEXT: [[ANYEXT4:%[0-9]+]]:_(s64) = G_ANYEXT [[C6]](s32)
-    ; RV32-NEXT: [[SPLAT_VECTOR4:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[ANYEXT4]](s64)
+    ; RV32-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV32-NEXT: [[SPLAT_VECTOR4:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[C6]](s64)
     ; RV32-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 4 x s1>) = G_ICMP intpred(ne), [[VSLIDEUP_VL]](<vscale x 4 x s8>), [[SPLAT_VECTOR4]]
     ; RV32-NEXT: $v8 = COPY [[ICMP]](<vscale x 4 x s1>)
     ; RV32-NEXT: PseudoRET implicit $v8
@@ -138,19 +133,15 @@ body:             |
     ; RV32-NEXT: {{  $}}
     ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s1>) = COPY $v8
     ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
-    ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; RV32-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
-    ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; RV32-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
-    ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+    ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[C]](s64)
+    ; RV32-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[C1]](s64)
     ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SELECT [[COPY]](<vscale x 8 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
-    ; RV32-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; RV32-NEXT: [[ANYEXT2:%[0-9]+]]:_(s64) = G_ANYEXT [[C2]](s32)
-    ; RV32-NEXT: [[SPLAT_VECTOR2:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[ANYEXT2]](s64)
-    ; RV32-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; RV32-NEXT: [[ANYEXT3:%[0-9]+]]:_(s64) = G_ANYEXT [[C3]](s32)
-    ; RV32-NEXT: [[SPLAT_VECTOR3:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[ANYEXT3]](s64)
+    ; RV32-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV32-NEXT: [[SPLAT_VECTOR2:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[C2]](s64)
+    ; RV32-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; RV32-NEXT: [[SPLAT_VECTOR3:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[C3]](s64)
     ; RV32-NEXT: [[SELECT1:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SELECT [[DEF]](<vscale x 2 x s1>), [[SPLAT_VECTOR3]], [[SPLAT_VECTOR2]]
     ; RV32-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
     ; RV32-NEXT: [[INSERT_SUBVECTOR:%[0-9]+]]:_(<vscale x 8 x s8>) = G_INSERT_SUBVECTOR [[DEF1]], [[SELECT1]](<vscale x 2 x s8>), 0
@@ -164,9 +155,8 @@ body:             |
     ; RV32-NEXT: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[READ_VLENB1]], [[C6]](s64)
     ; RV32-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[LSHR1]], [[LSHR]]
     ; RV32-NEXT: [[VSLIDEUP_VL:%[0-9]+]]:_(<vscale x 8 x s8>) = G_VSLIDEUP_VL [[SELECT]], [[INSERT_SUBVECTOR]], [[LSHR1]](s64), [[VMSET_VL]](<vscale x 8 x s1>), [[ADD]](s64), 0
-    ; RV32-NEXT: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; RV32-NEXT: [[ANYEXT4:%[0-9]+]]:_(s64) = G_ANYEXT [[C7]](s32)
-    ; RV32-NEXT: [[SPLAT_VECTOR4:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[ANYEXT4]](s64)
+    ; RV32-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV32-NEXT: [[SPLAT_VECTOR4:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[C7]](s64)
     ; RV32-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 8 x s1>) = G_ICMP intpred(ne), [[VSLIDEUP_VL]](<vscale x 8 x s8>), [[SPLAT_VECTOR4]]
     ; RV32-NEXT: $v8 = COPY [[ICMP]](<vscale x 8 x s1>)
     ; RV32-NEXT: PseudoRET implicit $v8

diff  --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-sext.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-sext.mir
index c7c8d347adfc9a..daf0afa3c7737d 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-sext.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-sext.mir
@@ -26,12 +26,10 @@ body:             |
     ; RV64: liveins: $v0
     ; RV64-NEXT: {{  $}}
     ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s1>) = COPY $v0
-    ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
-    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
-    ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+    ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR [[C]](s64)
+    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR [[C1]](s64)
     ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SELECT [[COPY]](<vscale x 1 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
     ; RV64-NEXT: $v8 = COPY [[SELECT]](<vscale x 1 x s8>)
     ; RV64-NEXT: PseudoRET implicit $v8
@@ -63,12 +61,10 @@ body:             |
     ; RV64: liveins: $v0
     ; RV64-NEXT: {{  $}}
     ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s1>) = COPY $v0
-    ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
-    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
-    ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+    ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SPLAT_VECTOR [[C]](s64)
+    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SPLAT_VECTOR [[C1]](s64)
     ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SELECT [[COPY]](<vscale x 1 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
     ; RV64-NEXT: $v8 = COPY [[SELECT]](<vscale x 1 x s16>)
     ; RV64-NEXT: PseudoRET implicit $v8
@@ -100,12 +96,10 @@ body:             |
     ; RV64: liveins: $v0
     ; RV64-NEXT: {{  $}}
     ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s1>) = COPY $v0
-    ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
-    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
-    ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+    ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SPLAT_VECTOR [[C]](s64)
+    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SPLAT_VECTOR [[C1]](s64)
     ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SELECT [[COPY]](<vscale x 1 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
     ; RV64-NEXT: $v8 = COPY [[SELECT]](<vscale x 1 x s32>)
     ; RV64-NEXT: PseudoRET implicit $v8
@@ -174,12 +168,10 @@ body:             |
     ; RV64: liveins: $v0
     ; RV64-NEXT: {{  $}}
     ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s1>) = COPY $v0
-    ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
-    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
-    ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+    ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[C]](s64)
+    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[C1]](s64)
     ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SELECT [[COPY]](<vscale x 2 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
     ; RV64-NEXT: $v8 = COPY [[SELECT]](<vscale x 2 x s8>)
     ; RV64-NEXT: PseudoRET implicit $v8
@@ -211,12 +203,10 @@ body:             |
     ; RV64: liveins: $v0
     ; RV64-NEXT: {{  $}}
     ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s1>) = COPY $v0
-    ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
-    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
-    ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+    ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SPLAT_VECTOR [[C]](s64)
+    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SPLAT_VECTOR [[C1]](s64)
     ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SELECT [[COPY]](<vscale x 2 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
     ; RV64-NEXT: $v8 = COPY [[SELECT]](<vscale x 2 x s16>)
     ; RV64-NEXT: PseudoRET implicit $v8
@@ -248,12 +238,10 @@ body:             |
     ; RV64: liveins: $v0
     ; RV64-NEXT: {{  $}}
     ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s1>) = COPY $v0
-    ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
-    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
-    ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+    ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SPLAT_VECTOR [[C]](s64)
+    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SPLAT_VECTOR [[C1]](s64)
     ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SELECT [[COPY]](<vscale x 2 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
     ; RV64-NEXT: $v8 = COPY [[SELECT]](<vscale x 2 x s32>)
     ; RV64-NEXT: PseudoRET implicit $v8
@@ -322,12 +310,10 @@ body:             |
     ; RV64: liveins: $v0
     ; RV64-NEXT: {{  $}}
     ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s1>) = COPY $v0
-    ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
-    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
-    ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+    ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[C]](s64)
+    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[C1]](s64)
     ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SELECT [[COPY]](<vscale x 4 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
     ; RV64-NEXT: $v8 = COPY [[SELECT]](<vscale x 4 x s8>)
     ; RV64-NEXT: PseudoRET implicit $v8
@@ -359,12 +345,10 @@ body:             |
     ; RV64: liveins: $v0
     ; RV64-NEXT: {{  $}}
     ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s1>) = COPY $v0
-    ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
-    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
-    ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+    ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SPLAT_VECTOR [[C]](s64)
+    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SPLAT_VECTOR [[C1]](s64)
     ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SELECT [[COPY]](<vscale x 4 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
     ; RV64-NEXT: $v8 = COPY [[SELECT]](<vscale x 4 x s16>)
     ; RV64-NEXT: PseudoRET implicit $v8
@@ -396,12 +380,10 @@ body:             |
     ; RV64: liveins: $v0
     ; RV64-NEXT: {{  $}}
     ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s1>) = COPY $v0
-    ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
-    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
-    ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+    ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SPLAT_VECTOR [[C]](s64)
+    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SPLAT_VECTOR [[C1]](s64)
     ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SELECT [[COPY]](<vscale x 4 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
     ; RV64-NEXT: $v8m2 = COPY [[SELECT]](<vscale x 4 x s32>)
     ; RV64-NEXT: PseudoRET implicit $v8m2
@@ -470,12 +452,10 @@ body:             |
     ; RV64: liveins: $v0
     ; RV64-NEXT: {{  $}}
     ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s1>) = COPY $v0
-    ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
-    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
-    ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+    ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[C]](s64)
+    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[C1]](s64)
     ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SELECT [[COPY]](<vscale x 8 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
     ; RV64-NEXT: $v8 = COPY [[SELECT]](<vscale x 8 x s8>)
     ; RV64-NEXT: PseudoRET implicit $v8
@@ -507,12 +487,10 @@ body:             |
     ; RV64: liveins: $v0
     ; RV64-NEXT: {{  $}}
     ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s1>) = COPY $v0
-    ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
-    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
-    ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+    ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SPLAT_VECTOR [[C]](s64)
+    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SPLAT_VECTOR [[C1]](s64)
     ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SELECT [[COPY]](<vscale x 8 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
     ; RV64-NEXT: $v8m2 = COPY [[SELECT]](<vscale x 8 x s16>)
     ; RV64-NEXT: PseudoRET implicit $v8m2
@@ -544,12 +522,10 @@ body:             |
     ; RV64: liveins: $v0
     ; RV64-NEXT: {{  $}}
     ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s1>) = COPY $v0
-    ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
-    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
-    ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+    ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SPLAT_VECTOR [[C]](s64)
+    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SPLAT_VECTOR [[C1]](s64)
     ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SELECT [[COPY]](<vscale x 8 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
     ; RV64-NEXT: $v8m4 = COPY [[SELECT]](<vscale x 8 x s32>)
     ; RV64-NEXT: PseudoRET implicit $v8m4
@@ -618,12 +594,10 @@ body:             |
     ; RV64: liveins: $v0
     ; RV64-NEXT: {{  $}}
     ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s1>) = COPY $v0
-    ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
-    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
-    ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+    ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR [[C]](s64)
+    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR [[C1]](s64)
     ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SELECT [[COPY]](<vscale x 16 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
     ; RV64-NEXT: $v8m2 = COPY [[SELECT]](<vscale x 16 x s8>)
     ; RV64-NEXT: PseudoRET implicit $v8m2
@@ -655,12 +629,10 @@ body:             |
     ; RV64: liveins: $v0
     ; RV64-NEXT: {{  $}}
     ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s1>) = COPY $v0
-    ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
-    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
-    ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+    ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SPLAT_VECTOR [[C]](s64)
+    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SPLAT_VECTOR [[C1]](s64)
     ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SELECT [[COPY]](<vscale x 16 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
     ; RV64-NEXT: $v8m4 = COPY [[SELECT]](<vscale x 16 x s16>)
     ; RV64-NEXT: PseudoRET implicit $v8m4
@@ -692,12 +664,10 @@ body:             |
     ; RV64: liveins: $v0
     ; RV64-NEXT: {{  $}}
     ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s1>) = COPY $v0
-    ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
-    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
-    ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+    ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SPLAT_VECTOR [[C]](s64)
+    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SPLAT_VECTOR [[C1]](s64)
     ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SELECT [[COPY]](<vscale x 16 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
     ; RV64-NEXT: $v8m8 = COPY [[SELECT]](<vscale x 16 x s32>)
     ; RV64-NEXT: PseudoRET implicit $v8m8
@@ -729,12 +699,10 @@ body:             |
     ; RV64: liveins: $v0
     ; RV64-NEXT: {{  $}}
     ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 32 x s1>) = COPY $v0
-    ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
-    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
-    ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+    ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SPLAT_VECTOR [[C]](s64)
+    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SPLAT_VECTOR [[C1]](s64)
     ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SELECT [[COPY]](<vscale x 32 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
     ; RV64-NEXT: $v8m4 = COPY [[SELECT]](<vscale x 32 x s8>)
     ; RV64-NEXT: PseudoRET implicit $v8m4
@@ -766,12 +734,10 @@ body:             |
     ; RV64: liveins: $v0
     ; RV64-NEXT: {{  $}}
     ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 32 x s1>) = COPY $v0
-    ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 32 x s16>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
-    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
-    ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 32 x s16>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+    ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 32 x s16>) = G_SPLAT_VECTOR [[C]](s64)
+    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 32 x s16>) = G_SPLAT_VECTOR [[C1]](s64)
     ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 32 x s16>) = G_SELECT [[COPY]](<vscale x 32 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
     ; RV64-NEXT: $v8m8 = COPY [[SELECT]](<vscale x 32 x s16>)
     ; RV64-NEXT: PseudoRET implicit $v8m8
@@ -803,12 +769,10 @@ body:             |
     ; RV64: liveins: $v0
     ; RV64-NEXT: {{  $}}
     ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 64 x s1>) = COPY $v0
-    ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
-    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
-    ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+    ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SPLAT_VECTOR [[C]](s64)
+    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SPLAT_VECTOR [[C1]](s64)
     ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SELECT [[COPY]](<vscale x 64 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
     ; RV64-NEXT: $v8m8 = COPY [[SELECT]](<vscale x 64 x s8>)
     ; RV64-NEXT: PseudoRET implicit $v8m8

diff  --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-splatvector-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-splatvector-rv64.mir
index 126b6c9497e94c..8ccc23126eb81f 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-splatvector-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-splatvector-rv64.mir
@@ -49,15 +49,12 @@ body:             |
     ; CHECK: liveins: $x10
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
-    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C1]]
-    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[AND]], [[ANYEXT]]
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C]]
+    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[AND]], [[C]]
     ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR [[AND1]](s64)
-    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C2]](s32)
-    ; CHECK-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; CHECK-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR [[C1]](s64)
     ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 1 x s1>) = G_ICMP intpred(ne), [[SPLAT_VECTOR]](<vscale x 1 x s8>), [[SPLAT_VECTOR1]]
     ; CHECK-NEXT: $v0 = COPY [[ICMP]](<vscale x 1 x s1>)
     ; CHECK-NEXT: PseudoRET implicit $v0
@@ -115,15 +112,12 @@ body:             |
     ; CHECK: liveins: $x10
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
-    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C1]]
-    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[AND]], [[ANYEXT]]
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C]]
+    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[AND]], [[C]]
     ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[AND1]](s64)
-    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C2]](s32)
-    ; CHECK-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; CHECK-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[C1]](s64)
     ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 2 x s1>) = G_ICMP intpred(ne), [[SPLAT_VECTOR]](<vscale x 2 x s8>), [[SPLAT_VECTOR1]]
     ; CHECK-NEXT: $v0 = COPY [[ICMP]](<vscale x 2 x s1>)
     ; CHECK-NEXT: PseudoRET implicit $v0
@@ -181,15 +175,12 @@ body:             |
     ; CHECK: liveins: $x10
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
-    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C1]]
-    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[AND]], [[ANYEXT]]
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C]]
+    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[AND]], [[C]]
     ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[AND1]](s64)
-    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C2]](s32)
-    ; CHECK-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; CHECK-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[C1]](s64)
     ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 4 x s1>) = G_ICMP intpred(ne), [[SPLAT_VECTOR]](<vscale x 4 x s8>), [[SPLAT_VECTOR1]]
     ; CHECK-NEXT: $v0 = COPY [[ICMP]](<vscale x 4 x s1>)
     ; CHECK-NEXT: PseudoRET implicit $v0
@@ -247,15 +238,12 @@ body:             |
     ; CHECK: liveins: $x10
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
-    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C1]]
-    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[AND]], [[ANYEXT]]
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C]]
+    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[AND]], [[C]]
     ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[AND1]](s64)
-    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C2]](s32)
-    ; CHECK-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; CHECK-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[C1]](s64)
     ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 8 x s1>) = G_ICMP intpred(ne), [[SPLAT_VECTOR]](<vscale x 8 x s8>), [[SPLAT_VECTOR1]]
     ; CHECK-NEXT: $v0 = COPY [[ICMP]](<vscale x 8 x s1>)
     ; CHECK-NEXT: PseudoRET implicit $v0
@@ -313,15 +301,12 @@ body:             |
     ; CHECK: liveins: $x10
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
-    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C1]]
-    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[AND]], [[ANYEXT]]
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C]]
+    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[AND]], [[C]]
     ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR [[AND1]](s64)
-    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C2]](s32)
-    ; CHECK-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; CHECK-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR [[C1]](s64)
     ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 16 x s1>) = G_ICMP intpred(ne), [[SPLAT_VECTOR]](<vscale x 16 x s8>), [[SPLAT_VECTOR1]]
     ; CHECK-NEXT: $v0 = COPY [[ICMP]](<vscale x 16 x s1>)
     ; CHECK-NEXT: PseudoRET implicit $v0
@@ -379,15 +364,12 @@ body:             |
     ; CHECK: liveins: $x10
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
-    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C1]]
-    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[AND]], [[ANYEXT]]
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C]]
+    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[AND]], [[C]]
     ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SPLAT_VECTOR [[AND1]](s64)
-    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C2]](s32)
-    ; CHECK-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; CHECK-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SPLAT_VECTOR [[C1]](s64)
     ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 32 x s1>) = G_ICMP intpred(ne), [[SPLAT_VECTOR]](<vscale x 32 x s8>), [[SPLAT_VECTOR1]]
     ; CHECK-NEXT: $v0 = COPY [[ICMP]](<vscale x 32 x s1>)
     ; CHECK-NEXT: PseudoRET implicit $v0
@@ -445,15 +427,12 @@ body:             |
     ; CHECK: liveins: $x10
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
-    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C1]]
-    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[AND]], [[ANYEXT]]
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C]]
+    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[AND]], [[C]]
     ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SPLAT_VECTOR [[AND1]](s64)
-    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C2]](s32)
-    ; CHECK-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; CHECK-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SPLAT_VECTOR [[C1]](s64)
     ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 64 x s1>) = G_ICMP intpred(ne), [[SPLAT_VECTOR]](<vscale x 64 x s8>), [[SPLAT_VECTOR1]]
     ; CHECK-NEXT: $v0 = COPY [[ICMP]](<vscale x 64 x s1>)
     ; CHECK-NEXT: PseudoRET implicit $v0
@@ -471,9 +450,8 @@ tracksRegLiveness: true
 body:             |
   bb.1:
     ; CHECK-LABEL: name: splatvector_nxv1i8
-    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR [[C]](s64)
     ; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s8>)
     ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
@@ -491,9 +469,8 @@ tracksRegLiveness: true
 body:             |
   bb.1:
     ; CHECK-LABEL: name: splatvector_nxv2i8
-    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[C]](s64)
     ; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s8>)
     ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
@@ -510,9 +487,8 @@ tracksRegLiveness: true
 body:             |
   bb.1:
     ; CHECK-LABEL: name: splatvector_nxv4i8
-    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[C]](s64)
     ; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s8>)
     ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
@@ -529,9 +505,8 @@ tracksRegLiveness: true
 body:             |
   bb.1:
     ; CHECK-LABEL: name: splatvector_nxv8i8
-    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[C]](s64)
     ; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s8>)
     ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
@@ -548,9 +523,8 @@ tracksRegLiveness: true
 body:             |
   bb.1:
     ; CHECK-LABEL: name: splatvector_nxv16i8
-    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR [[C]](s64)
     ; CHECK-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s8>)
     ; CHECK-NEXT: PseudoRET implicit $v8m2
     %0:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
@@ -567,9 +541,8 @@ tracksRegLiveness: true
 body:             |
   bb.1:
     ; CHECK-LABEL: name: splatvector_nxv1i16
-    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SPLAT_VECTOR [[C]](s64)
     ; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s16>)
     ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
@@ -586,9 +559,8 @@ tracksRegLiveness: true
 body:             |
   bb.1:
     ; CHECK-LABEL: name: splatvector_nxv2i16
-    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SPLAT_VECTOR [[C]](s64)
     ; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s16>)
     ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
@@ -605,9 +577,8 @@ tracksRegLiveness: true
 body:             |
   bb.1:
     ; CHECK-LABEL: name: splatvector_nxv4i16
-    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SPLAT_VECTOR [[C]](s64)
     ; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s16>)
     ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
@@ -624,9 +595,8 @@ tracksRegLiveness: true
 body:             |
   bb.1:
     ; CHECK-LABEL: name: splatvector_nxv8i16
-    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SPLAT_VECTOR [[C]](s64)
     ; CHECK-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s16>)
     ; CHECK-NEXT: PseudoRET implicit $v8m2
     %0:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
@@ -643,9 +613,8 @@ tracksRegLiveness: true
 body:             |
   bb.1:
     ; CHECK-LABEL: name: splatvector_nxv16i16
-    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SPLAT_VECTOR [[C]](s64)
     ; CHECK-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s16>)
     ; CHECK-NEXT: PseudoRET implicit $v8m4
     %0:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
@@ -662,9 +631,8 @@ tracksRegLiveness: true
 body:             |
   bb.1:
     ; CHECK-LABEL: name: splatvector_nxv1i32
-    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SPLAT_VECTOR [[C]](s64)
     ; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s32>)
     ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
@@ -681,9 +649,8 @@ tracksRegLiveness: true
 body:             |
   bb.1:
     ; CHECK-LABEL: name: splatvector_nxv2i32
-    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SPLAT_VECTOR [[C]](s64)
     ; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s32>)
     ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
@@ -700,9 +667,8 @@ tracksRegLiveness: true
 body:             |
   bb.1:
     ; CHECK-LABEL: name: splatvector_nxv4i32
-    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SPLAT_VECTOR [[C]](s64)
     ; CHECK-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s32>)
     ; CHECK-NEXT: PseudoRET implicit $v8m2
     %0:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
@@ -719,9 +685,8 @@ tracksRegLiveness: true
 body:             |
   bb.1:
     ; CHECK-LABEL: name: splatvector_nxv8i32
-    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SPLAT_VECTOR [[C]](s64)
     ; CHECK-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s32>)
     ; CHECK-NEXT: PseudoRET implicit $v8m4
     %0:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
@@ -738,9 +703,8 @@ tracksRegLiveness: true
 body:             |
   bb.1:
     ; CHECK-LABEL: name: splatvector_nxv16i32
-    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SPLAT_VECTOR [[C]](s64)
     ; CHECK-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s32>)
     ; CHECK-NEXT: PseudoRET implicit $v8m8
     %0:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF

diff  --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-zext.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-zext.mir
index 2a6e7a6fa8e524..34c795dc8c624d 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-zext.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-zext.mir
@@ -26,12 +26,10 @@ body:             |
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
     ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s1>) = COPY $v8
-    ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
-    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+    ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR [[C]](s64)
+    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR [[C1]](s64)
     ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SELECT [[COPY]](<vscale x 1 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
     ; RV64-NEXT: $v8 = COPY [[SELECT]](<vscale x 1 x s8>)
     ; RV64-NEXT: PseudoRET implicit $v8
@@ -63,12 +61,10 @@ body:             |
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
     ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s1>) = COPY $v8
-    ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
-    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+    ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SPLAT_VECTOR [[C]](s64)
+    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SPLAT_VECTOR [[C1]](s64)
     ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SELECT [[COPY]](<vscale x 1 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
     ; RV64-NEXT: $v8 = COPY [[SELECT]](<vscale x 1 x s16>)
     ; RV64-NEXT: PseudoRET implicit $v8
@@ -100,12 +96,10 @@ body:             |
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
     ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s1>) = COPY $v8
-    ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
-    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+    ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SPLAT_VECTOR [[C]](s64)
+    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SPLAT_VECTOR [[C1]](s64)
     ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SELECT [[COPY]](<vscale x 1 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
     ; RV64-NEXT: $v8 = COPY [[SELECT]](<vscale x 1 x s32>)
     ; RV64-NEXT: PseudoRET implicit $v8
@@ -174,12 +168,10 @@ body:             |
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
     ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s1>) = COPY $v8
-    ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
-    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+    ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[C]](s64)
+    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[C1]](s64)
     ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SELECT [[COPY]](<vscale x 2 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
     ; RV64-NEXT: $v8 = COPY [[SELECT]](<vscale x 2 x s8>)
     ; RV64-NEXT: PseudoRET implicit $v8
@@ -211,12 +203,10 @@ body:             |
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
     ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s1>) = COPY $v8
-    ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
-    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+    ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SPLAT_VECTOR [[C]](s64)
+    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SPLAT_VECTOR [[C1]](s64)
     ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SELECT [[COPY]](<vscale x 2 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
     ; RV64-NEXT: $v8 = COPY [[SELECT]](<vscale x 2 x s16>)
     ; RV64-NEXT: PseudoRET implicit $v8
@@ -248,12 +238,10 @@ body:             |
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
     ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s1>) = COPY $v8
-    ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
-    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+    ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SPLAT_VECTOR [[C]](s64)
+    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SPLAT_VECTOR [[C1]](s64)
     ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SELECT [[COPY]](<vscale x 2 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
     ; RV64-NEXT: $v8 = COPY [[SELECT]](<vscale x 2 x s32>)
     ; RV64-NEXT: PseudoRET implicit $v8
@@ -322,12 +310,10 @@ body:             |
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
     ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s1>) = COPY $v8
-    ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
-    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+    ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[C]](s64)
+    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[C1]](s64)
     ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SELECT [[COPY]](<vscale x 4 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
     ; RV64-NEXT: $v8 = COPY [[SELECT]](<vscale x 4 x s8>)
     ; RV64-NEXT: PseudoRET implicit $v8
@@ -359,12 +345,10 @@ body:             |
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
     ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s1>) = COPY $v8
-    ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
-    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+    ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SPLAT_VECTOR [[C]](s64)
+    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SPLAT_VECTOR [[C1]](s64)
     ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SELECT [[COPY]](<vscale x 4 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
     ; RV64-NEXT: $v8 = COPY [[SELECT]](<vscale x 4 x s16>)
     ; RV64-NEXT: PseudoRET implicit $v8
@@ -396,12 +380,10 @@ body:             |
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
     ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s1>) = COPY $v8
-    ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
-    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+    ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SPLAT_VECTOR [[C]](s64)
+    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SPLAT_VECTOR [[C1]](s64)
     ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SELECT [[COPY]](<vscale x 4 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
     ; RV64-NEXT: $v8m2 = COPY [[SELECT]](<vscale x 4 x s32>)
     ; RV64-NEXT: PseudoRET implicit $v8m2
@@ -470,12 +452,10 @@ body:             |
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
     ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s1>) = COPY $v8
-    ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
-    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+    ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[C]](s64)
+    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[C1]](s64)
     ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SELECT [[COPY]](<vscale x 8 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
     ; RV64-NEXT: $v8 = COPY [[SELECT]](<vscale x 8 x s8>)
     ; RV64-NEXT: PseudoRET implicit $v8
@@ -507,12 +487,10 @@ body:             |
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
     ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s1>) = COPY $v8
-    ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
-    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+    ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SPLAT_VECTOR [[C]](s64)
+    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SPLAT_VECTOR [[C1]](s64)
     ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SELECT [[COPY]](<vscale x 8 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
     ; RV64-NEXT: $v8m2 = COPY [[SELECT]](<vscale x 8 x s16>)
     ; RV64-NEXT: PseudoRET implicit $v8m2
@@ -544,12 +522,10 @@ body:             |
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
     ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s1>) = COPY $v8
-    ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
-    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+    ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SPLAT_VECTOR [[C]](s64)
+    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SPLAT_VECTOR [[C1]](s64)
     ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SELECT [[COPY]](<vscale x 8 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
     ; RV64-NEXT: $v8m4 = COPY [[SELECT]](<vscale x 8 x s32>)
     ; RV64-NEXT: PseudoRET implicit $v8m4
@@ -618,12 +594,10 @@ body:             |
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
     ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s1>) = COPY $v8
-    ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
-    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+    ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR [[C]](s64)
+    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR [[C1]](s64)
     ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SELECT [[COPY]](<vscale x 16 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
     ; RV64-NEXT: $v8m2 = COPY [[SELECT]](<vscale x 16 x s8>)
     ; RV64-NEXT: PseudoRET implicit $v8m2
@@ -655,12 +629,10 @@ body:             |
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
     ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s1>) = COPY $v8
-    ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
-    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+    ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SPLAT_VECTOR [[C]](s64)
+    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SPLAT_VECTOR [[C1]](s64)
     ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SELECT [[COPY]](<vscale x 16 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
     ; RV64-NEXT: $v8m4 = COPY [[SELECT]](<vscale x 16 x s16>)
     ; RV64-NEXT: PseudoRET implicit $v8m4
@@ -692,12 +664,10 @@ body:             |
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
     ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s1>) = COPY $v8
-    ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
-    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+    ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SPLAT_VECTOR [[C]](s64)
+    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SPLAT_VECTOR [[C1]](s64)
     ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SELECT [[COPY]](<vscale x 16 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
     ; RV64-NEXT: $v8m8 = COPY [[SELECT]](<vscale x 16 x s32>)
     ; RV64-NEXT: PseudoRET implicit $v8m8
@@ -729,12 +699,10 @@ body:             |
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
     ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 32 x s1>) = COPY $v8
-    ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
-    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+    ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SPLAT_VECTOR [[C]](s64)
+    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SPLAT_VECTOR [[C1]](s64)
     ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SELECT [[COPY]](<vscale x 32 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
     ; RV64-NEXT: $v8m4 = COPY [[SELECT]](<vscale x 32 x s8>)
     ; RV64-NEXT: PseudoRET implicit $v8m4
@@ -766,12 +734,10 @@ body:             |
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
     ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 32 x s1>) = COPY $v8
-    ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 32 x s16>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
-    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 32 x s16>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+    ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 32 x s16>) = G_SPLAT_VECTOR [[C]](s64)
+    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 32 x s16>) = G_SPLAT_VECTOR [[C1]](s64)
     ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 32 x s16>) = G_SELECT [[COPY]](<vscale x 32 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
     ; RV64-NEXT: $v8m8 = COPY [[SELECT]](<vscale x 32 x s16>)
     ; RV64-NEXT: PseudoRET implicit $v8m8
@@ -803,12 +769,10 @@ body:             |
     ; RV64: liveins: $v8
     ; RV64-NEXT: {{  $}}
     ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 64 x s1>) = COPY $v8
-    ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
-    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
-    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+    ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SPLAT_VECTOR [[C]](s64)
+    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SPLAT_VECTOR [[C1]](s64)
     ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SELECT [[COPY]](<vscale x 64 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
     ; RV64-NEXT: $v8m8 = COPY [[SELECT]](<vscale x 64 x s8>)
     ; RV64-NEXT: PseudoRET implicit $v8m8

diff  --git a/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbb-zbkb.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbb-zbkb.ll
index d2694098e1646e..5beebe5bbcd973 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbb-zbkb.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbb-zbkb.ll
@@ -6,17 +6,17 @@
 ; RUN: llc -mtriple=riscv64 -global-isel -mattr=+zbkb -verify-machineinstrs < %s \
 ; RUN:   | FileCheck %s -check-prefixes=CHECK,RV64ZBB-ZBKB,RV64ZBKB
 
-; FIXME: sext.w is unneeded.
-; FIXME: Use andn
 define signext i32 @andn_i32(i32 signext %a, i32 signext %b) nounwind {
-; CHECK-LABEL: andn_i32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    li a2, -1
-; CHECK-NEXT:    srli a2, a2, 32
-; CHECK-NEXT:    xor a1, a1, a2
-; CHECK-NEXT:    and a0, a1, a0
-; CHECK-NEXT:    sext.w a0, a0
-; CHECK-NEXT:    ret
+; RV64I-LABEL: andn_i32:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    not a1, a1
+; RV64I-NEXT:    and a0, a1, a0
+; RV64I-NEXT:    ret
+;
+; RV64ZBB-ZBKB-LABEL: andn_i32:
+; RV64ZBB-ZBKB:       # %bb.0:
+; RV64ZBB-ZBKB-NEXT:    andn a0, a0, a1
+; RV64ZBB-ZBKB-NEXT:    ret
   %neg = xor i32 %b, -1
   %and = and i32 %neg, %a
   ret i32 %and
@@ -38,17 +38,17 @@ define i64 @andn_i64(i64 %a, i64 %b) nounwind {
   ret i64 %and
 }
 
-; FIXME: sext.w is unneeded.
-; FIXME: Use orn
 define signext i32 @orn_i32(i32 signext %a, i32 signext %b) nounwind {
-; CHECK-LABEL: orn_i32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    li a2, -1
-; CHECK-NEXT:    srli a2, a2, 32
-; CHECK-NEXT:    xor a1, a1, a2
-; CHECK-NEXT:    or a0, a1, a0
-; CHECK-NEXT:    sext.w a0, a0
-; CHECK-NEXT:    ret
+; RV64I-LABEL: orn_i32:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    not a1, a1
+; RV64I-NEXT:    or a0, a1, a0
+; RV64I-NEXT:    ret
+;
+; RV64ZBB-ZBKB-LABEL: orn_i32:
+; RV64ZBB-ZBKB:       # %bb.0:
+; RV64ZBB-ZBKB-NEXT:    orn a0, a0, a1
+; RV64ZBB-ZBKB-NEXT:    ret
   %neg = xor i32 %b, -1
   %or = or i32 %neg, %a
   ret i32 %or
@@ -70,17 +70,17 @@ define i64 @orn_i64(i64 %a, i64 %b) nounwind {
   ret i64 %or
 }
 
-; FIXME: sext.w is unneeded.
-; FIXME: Use xnor
 define signext i32 @xnor_i32(i32 signext %a, i32 signext %b) nounwind {
-; CHECK-LABEL: xnor_i32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    li a2, -1
-; CHECK-NEXT:    srli a2, a2, 32
-; CHECK-NEXT:    xor a0, a0, a1
-; CHECK-NEXT:    xor a0, a0, a2
-; CHECK-NEXT:    sext.w a0, a0
-; CHECK-NEXT:    ret
+; RV64I-LABEL: xnor_i32:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    not a0, a0
+; RV64I-NEXT:    xor a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBB-ZBKB-LABEL: xnor_i32:
+; RV64ZBB-ZBKB:       # %bb.0:
+; RV64ZBB-ZBKB-NEXT:    xnor a0, a0, a1
+; RV64ZBB-ZBKB-NEXT:    ret
   %neg = xor i32 %a, -1
   %xor = xor i32 %neg, %b
   ret i32 %xor
@@ -146,26 +146,21 @@ define void @rol_i32_nosext(i32 signext %a, i32 signext %b, ptr %x) nounwind {
   ret void
 }
 
-; FIXME: Bad materialization of -2 as 0xfffffffe.
 define signext i32 @rol_i32_neg_constant_rhs(i32 signext %a) nounwind {
 ; RV64I-LABEL: rol_i32_neg_constant_rhs:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    neg a1, a0
+; RV64I-NEXT:    li a1, -2
+; RV64I-NEXT:    neg a2, a0
 ; RV64I-NEXT:    andi a0, a0, 31
-; RV64I-NEXT:    li a2, 1
-; RV64I-NEXT:    slli a2, a2, 32
-; RV64I-NEXT:    addi a2, a2, -2
-; RV64I-NEXT:    sllw a0, a2, a0
-; RV64I-NEXT:    andi a1, a1, 31
-; RV64I-NEXT:    srlw a1, a2, a1
+; RV64I-NEXT:    sllw a0, a1, a0
+; RV64I-NEXT:    andi a2, a2, 31
+; RV64I-NEXT:    srlw a1, a1, a2
 ; RV64I-NEXT:    or a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZBB-ZBKB-LABEL: rol_i32_neg_constant_rhs:
 ; RV64ZBB-ZBKB:       # %bb.0:
-; RV64ZBB-ZBKB-NEXT:    li a1, 1
-; RV64ZBB-ZBKB-NEXT:    slli a1, a1, 32
-; RV64ZBB-ZBKB-NEXT:    addi a1, a1, -2
+; RV64ZBB-ZBKB-NEXT:    li a1, -2
 ; RV64ZBB-ZBKB-NEXT:    rolw a0, a1, a0
 ; RV64ZBB-ZBKB-NEXT:    ret
   %1 = tail call i32 @llvm.fshl.i32(i32 -2, i32 -2, i32 %a)
@@ -235,26 +230,21 @@ define void @ror_i32_nosext(i32 signext %a, i32 signext %b, ptr %x) nounwind {
   ret void
 }
 
-; FIXME: Bad materialization of -2 as 0xfffffffe.
 define signext i32 @ror_i32_neg_constant_rhs(i32 signext %a) nounwind {
 ; RV64I-LABEL: ror_i32_neg_constant_rhs:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    neg a1, a0
+; RV64I-NEXT:    li a1, -2
+; RV64I-NEXT:    neg a2, a0
 ; RV64I-NEXT:    andi a0, a0, 31
-; RV64I-NEXT:    li a2, 1
-; RV64I-NEXT:    slli a2, a2, 32
-; RV64I-NEXT:    addi a2, a2, -2
-; RV64I-NEXT:    srlw a0, a2, a0
-; RV64I-NEXT:    andi a1, a1, 31
-; RV64I-NEXT:    sllw a1, a2, a1
+; RV64I-NEXT:    srlw a0, a1, a0
+; RV64I-NEXT:    andi a2, a2, 31
+; RV64I-NEXT:    sllw a1, a1, a2
 ; RV64I-NEXT:    or a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZBB-ZBKB-LABEL: ror_i32_neg_constant_rhs:
 ; RV64ZBB-ZBKB:       # %bb.0:
-; RV64ZBB-ZBKB-NEXT:    li a1, 1
-; RV64ZBB-ZBKB-NEXT:    slli a1, a1, 32
-; RV64ZBB-ZBKB-NEXT:    addi a1, a1, -2
+; RV64ZBB-ZBKB-NEXT:    li a1, -2
 ; RV64ZBB-ZBKB-NEXT:    rorw a0, a1, a0
 ; RV64ZBB-ZBKB-NEXT:    ret
   %1 = tail call i32 @llvm.fshr.i32(i32 -2, i32 -2, i32 %a)

diff  --git a/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbb.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbb.ll
index b55f8bd87b16a7..03f8eff90c23bd 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbb.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbb.ll
@@ -66,12 +66,14 @@ define signext i32 @ctlz_i32(i32 signext %a) nounwind {
 define signext i32 @log2_i32(i32 signext %a) nounwind {
 ; RV64I-LABEL: log2_i32:
 ; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    slli a1, a0, 32
 ; RV64I-NEXT:    srli a1, a1, 32
+; RV64I-NEXT:    li s0, 31
 ; RV64I-NEXT:    beqz a1, .LBB1_2
 ; RV64I-NEXT:  # %bb.1: # %cond.false
-; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    srliw a1, a0, 1
 ; RV64I-NEXT:    or a0, a0, a1
 ; RV64I-NEXT:    srliw a1, a0, 2
@@ -104,20 +106,20 @@ define signext i32 @log2_i32(i32 signext %a) nounwind {
 ; RV64I-NEXT:    srliw a0, a0, 24
 ; RV64I-NEXT:    li a1, 32
 ; RV64I-NEXT:    sub a0, a1, a0
-; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    j .LBB1_3
 ; RV64I-NEXT:  .LBB1_2:
 ; RV64I-NEXT:    li a0, 32
 ; RV64I-NEXT:  .LBB1_3: # %cond.end
-; RV64I-NEXT:    li a1, 31
-; RV64I-NEXT:    subw a0, a1, a0
+; RV64I-NEXT:    subw a0, s0, a0
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZBB-LABEL: log2_i32:
 ; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    clzw a0, a0
 ; RV64ZBB-NEXT:    li a1, 31
+; RV64ZBB-NEXT:    clzw a0, a0
 ; RV64ZBB-NEXT:    subw a0, a1, a0
 ; RV64ZBB-NEXT:    ret
   %1 = call i32 @llvm.ctlz.i32(i32 %a, i1 false)
@@ -132,12 +134,10 @@ define signext i32 @log2_ceil_i32(i32 signext %a) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    li a1, -1
-; RV64I-NEXT:    srli a1, a1, 32
-; RV64I-NEXT:    add a0, a0, a1
+; RV64I-NEXT:    li s0, 32
+; RV64I-NEXT:    addi a0, a0, -1
 ; RV64I-NEXT:    slli a1, a0, 32
 ; RV64I-NEXT:    srli a2, a1, 32
-; RV64I-NEXT:    li s0, 32
 ; RV64I-NEXT:    li a1, 32
 ; RV64I-NEXT:    beqz a2, .LBB2_2
 ; RV64I-NEXT:  # %bb.1: # %cond.false
@@ -182,11 +182,9 @@ define signext i32 @log2_ceil_i32(i32 signext %a) nounwind {
 ;
 ; RV64ZBB-LABEL: log2_ceil_i32:
 ; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    li a1, -1
-; RV64ZBB-NEXT:    srli a1, a1, 32
-; RV64ZBB-NEXT:    add a0, a0, a1
-; RV64ZBB-NEXT:    clzw a0, a0
 ; RV64ZBB-NEXT:    li a1, 32
+; RV64ZBB-NEXT:    addi a0, a0, -1
+; RV64ZBB-NEXT:    clzw a0, a0
 ; RV64ZBB-NEXT:    subw a0, a1, a0
 ; RV64ZBB-NEXT:    ret
   %1 = sub i32 %a, 1
@@ -277,10 +275,9 @@ define i32 @ctlz_lshr_i32(i32 signext %a) {
 ; RV64I-NEXT:    .cfi_def_cfa_offset 16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    .cfi_offset ra, -8
-; RV64I-NEXT:    li a2, 2
 ; RV64I-NEXT:    srliw a0, a0, 2
 ; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    srl a1, a0, a2
+; RV64I-NEXT:    srli a1, a0, 2
 ; RV64I-NEXT:    or a0, a0, a1
 ; RV64I-NEXT:    srli a1, a0, 4
 ; RV64I-NEXT:    or a0, a0, a1
@@ -289,13 +286,11 @@ define i32 @ctlz_lshr_i32(i32 signext %a) {
 ; RV64I-NEXT:    srliw a1, a0, 16
 ; RV64I-NEXT:    or a0, a0, a1
 ; RV64I-NEXT:    srliw a1, a0, 1
-; RV64I-NEXT:    lui a3, 349525
-; RV64I-NEXT:    addiw a3, a3, 1365
-; RV64I-NEXT:    and a1, a1, a3
+; RV64I-NEXT:    lui a2, 349525
+; RV64I-NEXT:    addiw a2, a2, 1365
+; RV64I-NEXT:    and a1, a1, a2
 ; RV64I-NEXT:    sub a0, a0, a1
-; RV64I-NEXT:    slli a1, a0, 32
-; RV64I-NEXT:    srli a1, a1, 32
-; RV64I-NEXT:    srl a1, a1, a2
+; RV64I-NEXT:    srliw a1, a0, 2
 ; RV64I-NEXT:    lui a2, 209715
 ; RV64I-NEXT:    addiw a2, a2, 819
 ; RV64I-NEXT:    and a1, a1, a2
@@ -417,11 +412,9 @@ define signext i32 @cttz_i32(i32 signext %a) nounwind {
 ; RV64I-NEXT:  # %bb.1: # %cond.false
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    li a1, -1
-; RV64I-NEXT:    srli a1, a1, 32
-; RV64I-NEXT:    xor a2, a0, a1
-; RV64I-NEXT:    add a0, a0, a1
-; RV64I-NEXT:    and a0, a2, a0
+; RV64I-NEXT:    not a1, a0
+; RV64I-NEXT:    addi a0, a0, -1
+; RV64I-NEXT:    and a0, a1, a0
 ; RV64I-NEXT:    srliw a1, a0, 1
 ; RV64I-NEXT:    lui a2, 349525
 ; RV64I-NEXT:    addiw a2, a2, 1365
@@ -462,11 +455,9 @@ define signext i32 @cttz_zero_undef_i32(i32 signext %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    li a1, -1
-; RV64I-NEXT:    srli a1, a1, 32
-; RV64I-NEXT:    xor a2, a0, a1
-; RV64I-NEXT:    add a0, a0, a1
-; RV64I-NEXT:    and a0, a2, a0
+; RV64I-NEXT:    not a1, a0
+; RV64I-NEXT:    addi a0, a0, -1
+; RV64I-NEXT:    and a0, a1, a0
 ; RV64I-NEXT:    srliw a1, a0, 1
 ; RV64I-NEXT:    lui a2, 349525
 ; RV64I-NEXT:    addiw a2, a2, 1365
@@ -508,11 +499,9 @@ define signext i32 @findFirstSet_i32(i32 signext %a) nounwind {
 ; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    li s0, -1
-; RV64I-NEXT:    li a0, -1
-; RV64I-NEXT:    srli a0, a0, 32
-; RV64I-NEXT:    xor a1, s1, a0
-; RV64I-NEXT:    add a0, s1, a0
-; RV64I-NEXT:    and a0, a1, a0
+; RV64I-NEXT:    not a0, a0
+; RV64I-NEXT:    addi a1, s1, -1
+; RV64I-NEXT:    and a0, a0, a1
 ; RV64I-NEXT:    srliw a1, a0, 1
 ; RV64I-NEXT:    lui a2, 349525
 ; RV64I-NEXT:    addiw a2, a2, 1365
@@ -569,11 +558,9 @@ define signext i32 @ffs_i32(i32 signext %a) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
-; RV64I-NEXT:    li a0, -1
-; RV64I-NEXT:    srli a0, a0, 32
-; RV64I-NEXT:    xor a1, s0, a0
-; RV64I-NEXT:    add a0, s0, a0
-; RV64I-NEXT:    and a0, a1, a0
+; RV64I-NEXT:    not a0, a0
+; RV64I-NEXT:    addi a1, s0, -1
+; RV64I-NEXT:    and a0, a0, a1
 ; RV64I-NEXT:    srliw a1, a0, 1
 ; RV64I-NEXT:    lui a2, 349525
 ; RV64I-NEXT:    addiw a2, a2, 1365
@@ -599,9 +586,8 @@ define signext i32 @ffs_i32(i32 signext %a) nounwind {
 ; RV64I-NEXT:    li a0, 0
 ; RV64I-NEXT:    beqz s0, .LBB9_2
 ; RV64I-NEXT:  # %bb.1:
-; RV64I-NEXT:    li a0, 1
-; RV64I-NEXT:    srliw a1, a1, 24
-; RV64I-NEXT:    addw a0, a1, a0
+; RV64I-NEXT:    srliw a0, a1, 24
+; RV64I-NEXT:    addiw a0, a0, 1
 ; RV64I-NEXT:  .LBB9_2:
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload

diff  --git a/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbkb.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbkb.ll
index 1c2bafdcb30576..5cf2619a476bc0 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbkb.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbkb.ll
@@ -146,20 +146,20 @@ define i64 @pack_i64_3(ptr %0, ptr %1) {
 define signext i32 @packh_i32(i32 signext %a, i32 signext %b) nounwind {
 ; RV64I-LABEL: packh_i32:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    andi a0, a0, 255
-; RV64I-NEXT:    slliw a1, a1, 8
 ; RV64I-NEXT:    lui a2, 16
 ; RV64I-NEXT:    addiw a2, a2, -256
+; RV64I-NEXT:    andi a0, a0, 255
+; RV64I-NEXT:    slli a1, a1, 8
 ; RV64I-NEXT:    and a1, a1, a2
 ; RV64I-NEXT:    or a0, a1, a0
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZBKB-LABEL: packh_i32:
 ; RV64ZBKB:       # %bb.0:
-; RV64ZBKB-NEXT:    andi a0, a0, 255
-; RV64ZBKB-NEXT:    slliw a1, a1, 8
 ; RV64ZBKB-NEXT:    lui a2, 16
 ; RV64ZBKB-NEXT:    addiw a2, a2, -256
+; RV64ZBKB-NEXT:    andi a0, a0, 255
+; RV64ZBKB-NEXT:    slli a1, a1, 8
 ; RV64ZBKB-NEXT:    and a1, a1, a2
 ; RV64ZBKB-NEXT:    or a0, a1, a0
 ; RV64ZBKB-NEXT:    ret


        


More information about the llvm-commits mailing list