[cfe-commits] r106970 - in /cfe/trunk: lib/CodeGen/CGCall.cpp lib/CodeGen/CodeGenFunction.cpp lib/CodeGen/CodeGenFunction.h test/CodeGen/2008-07-29-override-alias-decl.c test/CodeGen/2008-12-02-logical-or-fold.c test/CodeGen/builtins-ppc-altivec.

Devang Patel devang.patel at gmail.com
Wed Jun 30 13:12:27 PDT 2010


Chris,

This causes debug info regressions. See PR 7534.
-
Devang

On Sat, Jun 26, 2010 at 6:06 PM, Chris Lattner <sabre at nondot.org> wrote:
> Author: lattner
> Date: Sat Jun 26 20:06:27 2010
> New Revision: 106970
>
> URL: http://llvm.org/viewvc/llvm-project?rev=106970&view=rev
> Log:
> Change IR generation for return (in the simple case) to avoid doing silly
> load/store nonsense in the epilog.  For example, for:
>
> int foo(int X) {
>  int A[100];
>  return A[X];
> }
>
> we used to generate:
>
>  %arrayidx = getelementptr inbounds [100 x i32]* %A, i32 0, i64 %idxprom ; <i32*> [#uses=1]
>  %tmp1 = load i32* %arrayidx                     ; <i32> [#uses=1]
>  store i32 %tmp1, i32* %retval
>  %0 = load i32* %retval                          ; <i32> [#uses=1]
>  ret i32 %0
> }
>
> which codegen'd to this code:
>
> _foo:                                   ## @foo
> ## BB#0:                                ## %entry
>        subq    $408, %rsp              ## imm = 0x198
>        movl    %edi, 400(%rsp)
>        movl    400(%rsp), %edi
>        movslq  %edi, %rax
>        movl    (%rsp,%rax,4), %edi
>        movl    %edi, 404(%rsp)
>        movl    404(%rsp), %eax
>        addq    $408, %rsp              ## imm = 0x198
>        ret
>
> Now we generate:
>
>  %arrayidx = getelementptr inbounds [100 x i32]* %A, i32 0, i64 %idxprom ; <i32*> [#uses=1]
>  %tmp1 = load i32* %arrayidx                     ; <i32> [#uses=1]
>  ret i32 %tmp1
> }
>
> and:
>
> _foo:                                   ## @foo
> ## BB#0:                                ## %entry
>        subq    $408, %rsp              ## imm = 0x198
>        movl    %edi, 404(%rsp)
>        movl    404(%rsp), %edi
>        movslq  %edi, %rax
>        movl    (%rsp,%rax,4), %eax
>        addq    $408, %rsp              ## imm = 0x198
>        ret
>
> This actually does matter, cutting out 2000 lines of IR from CGStmt.ll
> for example.
>
> Another interesting effect is that altivec.h functions which are dead
> now get dce'd by the inliner.  Hence all the changes to
> builtins-ppc-altivec.c to ensure the calls aren't dead.
>
>
>
>
> Modified:
>    cfe/trunk/lib/CodeGen/CGCall.cpp
>    cfe/trunk/lib/CodeGen/CodeGenFunction.cpp
>    cfe/trunk/lib/CodeGen/CodeGenFunction.h
>    cfe/trunk/test/CodeGen/2008-07-29-override-alias-decl.c
>    cfe/trunk/test/CodeGen/2008-12-02-logical-or-fold.c
>    cfe/trunk/test/CodeGen/builtins-ppc-altivec.c
>    cfe/trunk/test/CodeGen/const-unordered-compare.c
>    cfe/trunk/test/CodeGenCXX/references.cpp
>
> Modified: cfe/trunk/lib/CodeGen/CGCall.cpp
> URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/CodeGen/CGCall.cpp?rev=106970&r1=106969&r2=106970&view=diff
> ==============================================================================
> --- cfe/trunk/lib/CodeGen/CGCall.cpp (original)
> +++ cfe/trunk/lib/CodeGen/CGCall.cpp Sat Jun 26 20:06:27 2010
> @@ -373,18 +373,18 @@
>     // FIXME: Use better alignment / avoid requiring aligned load.
>     Load->setAlignment(1);
>     return Load;
> -  } else {
> -    // Otherwise do coercion through memory. This is stupid, but
> -    // simple.
> -    llvm::Value *Tmp = CGF.CreateTempAlloca(Ty);
> -    llvm::Value *Casted =
> -      CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(SrcTy));
> -    llvm::StoreInst *Store =
> -      CGF.Builder.CreateStore(CGF.Builder.CreateLoad(SrcPtr), Casted);
> -    // FIXME: Use better alignment / avoid requiring aligned store.
> -    Store->setAlignment(1);
> -    return CGF.Builder.CreateLoad(Tmp);
>   }
> +
> +  // Otherwise do coercion through memory. This is stupid, but
> +  // simple.
> +  llvm::Value *Tmp = CGF.CreateTempAlloca(Ty);
> +  llvm::Value *Casted =
> +    CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(SrcTy));
> +  llvm::StoreInst *Store =
> +    CGF.Builder.CreateStore(CGF.Builder.CreateLoad(SrcPtr), Casted);
> +  // FIXME: Use better alignment / avoid requiring aligned store.
> +  Store->setAlignment(1);
> +  return CGF.Builder.CreateLoad(Tmp);
>  }
>
>  /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
> @@ -798,8 +798,7 @@
>   assert(AI == Fn->arg_end() && "Argument mismatch!");
>  }
>
> -void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
> -                                         llvm::Value *ReturnValue) {
> +void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI) {
>   // Functions with no result always return void.
>   if (ReturnValue == 0) {
>     Builder.CreateRetVoid();
> @@ -824,12 +823,32 @@
>     break;
>
>   case ABIArgInfo::Extend:
> -  case ABIArgInfo::Direct:
> -    // The internal return value temp always will have
> -    // pointer-to-return-type type.
> -    RV = Builder.CreateLoad(ReturnValue);
> +  case ABIArgInfo::Direct: {
> +    // The internal return value temp always will have pointer-to-return-type
> +    // type, just do a load.
> +
> +    // If the instruction right before the insertion point is a store to the
> +    // return value, we can elide the load, zap the store, and usually zap the
> +    // alloca.
> +    llvm::BasicBlock *InsertBB = Builder.GetInsertBlock();
> +    llvm::StoreInst *SI = 0;
> +    if (InsertBB->empty() ||
> +        !(SI = dyn_cast<llvm::StoreInst>(&InsertBB->back())) ||
> +        SI->getPointerOperand() != ReturnValue || SI->isVolatile()) {
> +      RV = Builder.CreateLoad(ReturnValue);
> +    } else {
> +      // Get the stored value and nuke the now-dead store.
> +      RV = SI->getValueOperand();
> +      SI->eraseFromParent();
> +
> +      // If that was the only use of the return value, nuke it as well now.
> +      if (ReturnValue->use_empty() && isa<llvm::AllocaInst>(ReturnValue)) {
> +        cast<llvm::AllocaInst>(ReturnValue)->eraseFromParent();
> +        ReturnValue = 0;
> +      }
> +    }
>     break;
> -
> +  }
>   case ABIArgInfo::Ignore:
>     break;
>
>
> Modified: cfe/trunk/lib/CodeGen/CodeGenFunction.cpp
> URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/CodeGen/CodeGenFunction.cpp?rev=106970&r1=106969&r2=106970&view=diff
> ==============================================================================
> --- cfe/trunk/lib/CodeGen/CodeGenFunction.cpp (original)
> +++ cfe/trunk/lib/CodeGen/CodeGenFunction.cpp Sat Jun 26 20:06:27 2010
> @@ -137,7 +137,7 @@
>     DI->EmitRegionEnd(CurFn, Builder);
>   }
>
> -  EmitFunctionEpilog(*CurFnInfo, ReturnValue);
> +  EmitFunctionEpilog(*CurFnInfo);
>   EmitEndEHSpec(CurCodeDecl);
>
>   // If someone did an indirect goto, emit the indirect goto block at the end of
>
> Modified: cfe/trunk/lib/CodeGen/CodeGenFunction.h
> URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/CodeGen/CodeGenFunction.h?rev=106970&r1=106969&r2=106970&view=diff
> ==============================================================================
> --- cfe/trunk/lib/CodeGen/CodeGenFunction.h (original)
> +++ cfe/trunk/lib/CodeGen/CodeGenFunction.h Sat Jun 26 20:06:27 2010
> @@ -585,7 +585,7 @@
>
>   /// EmitFunctionEpilog - Emit the target specific LLVM code to return the
>   /// given temporary.
> -  void EmitFunctionEpilog(const CGFunctionInfo &FI, llvm::Value *ReturnValue);
> +  void EmitFunctionEpilog(const CGFunctionInfo &FI);
>
>   /// EmitStartEHSpec - Emit the start of the exception spec.
>   void EmitStartEHSpec(const Decl *D);
>
> Modified: cfe/trunk/test/CodeGen/2008-07-29-override-alias-decl.c
> URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/2008-07-29-override-alias-decl.c?rev=106970&r1=106969&r2=106970&view=diff
> ==============================================================================
> --- cfe/trunk/test/CodeGen/2008-07-29-override-alias-decl.c (original)
> +++ cfe/trunk/test/CodeGen/2008-07-29-override-alias-decl.c Sat Jun 26 20:06:27 2010
> @@ -2,10 +2,7 @@
>
>  int x() { return 1; }
>
> -// CHECK:  [[retval:%.*]] = alloca i32
> -// CHECK:  store i32 1, i32* [[retval]]
> -// CHECK:  [[load:%.*]] = load i32* [[retval]]
> -// CHECK:  ret i32 [[load]]
> +// CHECK:  ret i32 1
>
>
>  int f() __attribute__((weak, alias("x")));
> @@ -17,9 +14,6 @@
>   return f();
>  }
>
> -// CHECK:  [[retval:%.*]] = alloca i32
>  // CHECK:  [[call:%.*]] = call i32 (...)* @f()
> -// CHECK:  store i32 [[call]], i32* [[retval]]
> -// CHECK:  [[load:%.*]] = load i32* [[retval]]
> -// CHECK:  ret i32 [[load]]
> +// CHECK:  ret i32 [[call]]
>
>
> Modified: cfe/trunk/test/CodeGen/2008-12-02-logical-or-fold.c
> URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/2008-12-02-logical-or-fold.c?rev=106970&r1=106969&r2=106970&view=diff
> ==============================================================================
> --- cfe/trunk/test/CodeGen/2008-12-02-logical-or-fold.c (original)
> +++ cfe/trunk/test/CodeGen/2008-12-02-logical-or-fold.c Sat Jun 26 20:06:27 2010
> @@ -1,4 +1,4 @@
> -// RUN: %clang_cc1 -emit-llvm -o - %s | grep "store i32 1"
> +// RUN: %clang_cc1 -emit-llvm -o - %s | grep "ret i32 1"
>  // PR3150
>
>  int a() {return 1||1;}
>
> Modified: cfe/trunk/test/CodeGen/builtins-ppc-altivec.c
> URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/builtins-ppc-altivec.c?rev=106970&r1=106969&r2=106970&view=diff
> ==============================================================================
> --- cfe/trunk/test/CodeGen/builtins-ppc-altivec.c (original)
> +++ cfe/trunk/test/CodeGen/builtins-ppc-altivec.c Sat Jun 26 20:06:27 2010
> @@ -1,46 +1,47 @@
>  // RUN: %clang_cc1 -faltivec -triple powerpc-unknown-unknown -emit-llvm %s -o - | FileCheck %s
>
> -int main ()
> -{
> -  // TODO: uncomment
> +// TODO: uncomment
>  /*  vector bool char vbc = { 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0 }; */
> -  vector signed char vsc = { 1, -2, 3, -4, 5, -6, 7, -8, 9, -10, 11, -12, 13, -14, 15, -16 };
> -  vector unsigned char vuc = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16 };
> -  // TODO: uncomment
> +vector signed char vsc = { 1, -2, 3, -4, 5, -6, 7, -8, 9, -10, 11, -12, 13, -14, 15, -16 };
> +vector unsigned char vuc = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16 };
> +// TODO: uncomment
>  /*  vector bool short vbs = { 1, 0, 1, 0, 1, 0, 1, 0 }; */
> -  vector short vs = { -1, 2, -3, 4, -5, 6, -7, 8 };
> -  vector unsigned short vus = { 1, 2, 3, 4, 5, 6, 7, 8 };
> -  // TODO: uncomment
> +vector short vs = { -1, 2, -3, 4, -5, 6, -7, 8 };
> +vector unsigned short vus = { 1, 2, 3, 4, 5, 6, 7, 8 };
> +// TODO: uncomment
>  /*  vector bool int vbi = { 1, 0, 1, 0 }; */
> -  vector int vi = { -1, 2, -3, 4 };
> -  vector unsigned int vui = { 1, 2, 3, 4 };
> -  vector float vf = { -1.5, 2.5, -3.5, 4.5 };
> +vector int vi = { -1, 2, -3, 4 };
> +vector unsigned int vui = { 1, 2, 3, 4 };
> +vector float vf = { -1.5, 2.5, -3.5, 4.5 };
>
> -  // TODO: uncomment
> +// TODO: uncomment
>  /*  vector bool char res_vbc; */
> -  vector signed char res_vsc;
> -  vector unsigned char res_vuc;
> -  // TODO: uncomment
> +vector signed char res_vsc;
> +vector unsigned char res_vuc;
> +// TODO: uncomment
>  /*  vector bool short res_vbs; */
> -  vector short res_vs;
> -  vector unsigned short res_vus;
> -  // TODO: uncomment
> -  vector pixel res_vp;
> -  // TODO: uncomment
> +vector short res_vs;
> +vector unsigned short res_vus;
> +// TODO: uncomment
> +vector pixel res_vp;
> +// TODO: uncomment
>  /*  vector bool int res_vbi; */
> -  vector int res_vi;
> -  vector unsigned int res_vui;
> -  vector float res_vf;
> -
> -  signed char param_sc;
> -  unsigned char param_uc;
> -  short param_s;
> -  unsigned short param_us;
> -  int param_i;
> -  unsigned int param_ui;
> -  float param_f;
> +vector int res_vi;
> +vector unsigned int res_vui;
> +vector float res_vf;
> +
> +signed char param_sc;
> +unsigned char param_uc;
> +short param_s;
> +unsigned short param_us;
> +int param_i;
> +unsigned int param_ui;
> +float param_f;
>
> -  int res_i;
> +int res_i;
> +
> +int test1() {
> +// CHECK: define i32 @test1
>
>   /* vec_abs */
>   vsc = vec_abs(vsc);                           // CHECK: sub nsw <16 x i8> zeroinitializer
> @@ -154,9 +155,12 @@
>
>   res_vf = vec_vandc(vf, vf);                   // CHECK: xor <4 x i32>
>                                                 // CHECK: and <4 x i32>
> +}
>
> +// CHECK: i32 @test2
> +int test2() {
>   /* vec_avg */
> -  res_vsc = vec_avg(vsc, vsc);                  // CHECK: @llvm.ppc.altivec.vavgsb
> +  res_vsc = vec_avg(vsc, vsc);                  // CHECK: call {{.*}}@llvm.ppc.altivec.vavgsb
>   res_vuc = vec_avg(vuc, vuc);                  // CHECK: @llvm.ppc.altivec.vavgub
>   res_vs  = vec_avg(vs, vs);                    // CHECK: @llvm.ppc.altivec.vavgsh
>   res_vus = vec_avg(vus, vus);                  // CHECK: @llvm.ppc.altivec.vavguh
> @@ -178,50 +182,53 @@
>   res_vi = vec_vcmpbfp(vf, vf);                 // CHECK: @llvm.ppc.altivec.vcmpbfp
>
>   /* vec_cmpeq */
> -  // TODO: uncomment
> -  /*res_vbc = */vec_cmpeq(vsc, vsc);            // CHECK: @llvm.ppc.altivec.vcmpequb
> -  /*res_vbc = */vec_cmpeq(vuc, vuc);            // CHECK: @llvm.ppc.altivec.vcmpequb
> -  /*res_vbs = */vec_cmpeq(vs, vs);              // CHECK: @llvm.ppc.altivec.vcmpequh
> -  /*res_vbs = */vec_cmpeq(vus, vus);            // CHECK: @llvm.ppc.altivec.vcmpequh
> -  /*res_vbi = */vec_cmpeq(vi, vi);              // CHECK: @llvm.ppc.altivec.vcmpequw
> -  /*res_vbi = */vec_cmpeq(vui, vui);            // CHECK: @llvm.ppc.altivec.vcmpequw
> -  /*res_vbi = */vec_cmpeq(vf, vf);              // CHECK: @llvm.ppc.altivec.vcmpeqfp
> +  vsc = vec_cmpeq(vsc, vsc);            // CHCK: call {{.*}}@llvm.ppc.altivec.vcmpequb
> +  vuc = vec_cmpeq(vuc, vuc);            // CHCK: @llvm.ppc.altivec.vcmpequb
> +  vs = vec_cmpeq(vs, vs);              // CHCK: @llvm.ppc.altivec.vcmpequh
> +  vs = vec_cmpeq(vus, vus);            // CHCK: @llvm.ppc.altivec.vcmpequh
> +  vi = vec_cmpeq(vi, vi);              // CHCK: @llvm.ppc.altivec.vcmpequw
> +  vui = vec_cmpeq(vui, vui);            // CHCK: @llvm.ppc.altivec.vcmpequw
> +  vf = vec_cmpeq(vf, vf);              // CHCK: @llvm.ppc.altivec.vcmpeqfp
>
>   /* vec_cmpge */
> -  // TODO: uncomment
> -  /*res_vbi = */vec_cmpge(vf, vf);              // CHECK: @llvm.ppc.altivec.vcmpgefp
> -  /*res_vbi = */vec_vcmpgefp(vf, vf);           // CHECK: @llvm.ppc.altivec.vcmpgefp
> +  vf = vec_cmpge(vf, vf);              // CHCK: @llvm.ppc.altivec.vcmpgefp
> +  vf = vec_vcmpgefp(vf, vf);           // CHCK: call {{.*}}@llvm.ppc.altivec.vcmpgefp
>
> +}
> +
> +// CHECK: define i32 @test5
> +int test5() {
> +
>   /* vec_cmpgt */
> -  // TODO: uncomment
> -  /*res_vbc = */vec_cmpgt(vsc, vsc);            // CHECK: @llvm.ppc.altivec.vcmpgtsb
> -  /*res_vbc = */vec_cmpgt(vuc, vuc);            // CHECK: @llvm.ppc.altivec.vcmpgtub
> -  /*res_vbs = */vec_cmpgt(vs, vs);              // CHECK: @llvm.ppc.altivec.vcmpgtsh
> -  /*res_vbs = */vec_cmpgt(vus, vus);            // CHECK: @llvm.ppc.altivec.vcmpgtuh
> -  /*res_vbi = */vec_cmpgt(vi, vi);              // CHECK: @llvm.ppc.altivec.vcmpgtsw
> -  /*res_vbi = */vec_cmpgt(vui, vui);            // CHECK: @llvm.ppc.altivec.vcmpgtuw
> -  /*res_vbi = */vec_cmpgt(vf, vf);              // CHECK: @llvm.ppc.altivec.vcmpgtfp
> -  /*res_vbc = */vec_vcmpgtsb(vsc, vsc);         // CHECK: @llvm.ppc.altivec.vcmpgtsb
> -  /*res_vbc = */vec_vcmpgtub(vuc, vuc);         // CHECK: @llvm.ppc.altivec.vcmpgtub
> -  /*res_vbs = */vec_vcmpgtsh(vs, vs);           // CHECK: @llvm.ppc.altivec.vcmpgtsh
> -  /*res_vbs = */vec_vcmpgtuh(vus, vus);         // CHECK: @llvm.ppc.altivec.vcmpgtuh
> -  /*res_vbi = */vec_vcmpgtsw(vi, vi);           // CHECK: @llvm.ppc.altivec.vcmpgtsw
> -  /*res_vbi = */vec_vcmpgtuw(vui, vui);         // CHECK: @llvm.ppc.altivec.vcmpgtuw
> -  /*res_vbi = */vec_vcmpgtfp(vf, vf);           // CHECK: @llvm.ppc.altivec.vcmpgtfp
> +  vsc = vec_cmpgt(vsc, vsc);            // CHECK: call {{.*}}@llvm.ppc.altivec.vcmpgtsb
> +  vuc = vec_cmpgt(vuc, vuc);            // CHECK: call {{.*}}@llvm.ppc.altivec.vcmpgtub
> +  vs = vec_cmpgt(vs, vs);              // CHECK: call {{.*}}@llvm.ppc.altivec.vcmpgtsh
> +  vus = vec_cmpgt(vus, vus);            // CHECK: @llvm.ppc.altivec.vcmpgtuh
> +  vi = vec_cmpgt(vi, vi);              // CHECK: @llvm.ppc.altivec.vcmpgtsw
> +  vui = vec_cmpgt(vui, vui);            // CHECK: @llvm.ppc.altivec.vcmpgtuw
> +  vf = vec_cmpgt(vf, vf);              // CHECK: @llvm.ppc.altivec.vcmpgtfp
> +  vsc = vec_vcmpgtsb(vsc, vsc);         // CHECK: @llvm.ppc.altivec.vcmpgtsb
> +  vuc = vec_vcmpgtub(vuc, vuc);         // CHECK: @llvm.ppc.altivec.vcmpgtub
> +  vs = vec_vcmpgtsh(vs, vs);           // CHECK: @llvm.ppc.altivec.vcmpgtsh
> +  vus = vec_vcmpgtuh(vus, vus);         // CHECK: @llvm.ppc.altivec.vcmpgtuh
> +  vi = vec_vcmpgtsw(vi, vi);           // CHECK: @llvm.ppc.altivec.vcmpgtsw
> +  vui = vec_vcmpgtuw(vui, vui);         // CHECK: @llvm.ppc.altivec.vcmpgtuw
> +  vf = vec_vcmpgtfp(vf, vf);           // CHECK: @llvm.ppc.altivec.vcmpgtfp
>
>   /* vec_cmple */
> -  // TODO: uncomment
> -  /*res_vbi = */vec_cmple(vf, vf);              // CHECK: @llvm.ppc.altivec.vcmpgefp
> +  vf = vec_cmple(vf, vf);              // CHECK: call {{.*}}@llvm.ppc.altivec.vcmpgefp
> +}
>
> +// CHECK: define i32 @test6
> +int test6() {
>   /* vec_cmplt */
> -  // TODO: uncomment
> -  /*res_vbc = */vec_cmplt(vsc, vsc);            // CHECK: @llvm.ppc.altivec.vcmpgtsb
> -  /*res_vbc = */vec_cmplt(vuc, vuc);            // CHECK: @llvm.ppc.altivec.vcmpgtub
> -  /*res_vbs = */vec_cmplt(vs, vs);              // CHECK: @llvm.ppc.altivec.vcmpgtsh
> -  /*res_vbs = */vec_cmplt(vus, vus);            // CHECK: @llvm.ppc.altivec.vcmpgtuh
> -  /*res_vbi = */vec_cmplt(vi, vi);              // CHECK: @llvm.ppc.altivec.vcmpgtsw
> -  /*res_vbi = */vec_cmplt(vui, vui);            // CHECK: @llvm.ppc.altivec.vcmpgtuw
> -  /*res_vbi = */vec_cmplt(vf, vf);              // CHECK: @llvm.ppc.altivec.vcmpgtfp
> +  vsc =vec_cmplt(vsc, vsc);            // CHECK: call {{.*}}@llvm.ppc.altivec.vcmpgtsb
> +  vsc =vec_cmplt(vuc, vuc);            // CHECK: call {{.*}}@llvm.ppc.altivec.vcmpgtub
> +  vs = vec_cmplt(vs, vs);              // CHECK: call {{.*}}@llvm.ppc.altivec.vcmpgtsh
> +  vs = vec_cmplt(vus, vus);            // CHECK: @llvm.ppc.altivec.vcmpgtuh
> +  vi = vec_cmplt(vi, vi);              // CHECK: @llvm.ppc.altivec.vcmpgtsw
> +  vui = vec_cmplt(vui, vui);            // CHECK: @llvm.ppc.altivec.vcmpgtuw
> +  vf = vec_cmplt(vf, vf);              // CHECK: @llvm.ppc.altivec.vcmpgtfp
>
>   /* vec_ctf */
>   res_vf  = vec_ctf(vi, param_i);               // CHECK: @llvm.ppc.altivec.vcfsx
>
> Modified: cfe/trunk/test/CodeGen/const-unordered-compare.c
> URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/const-unordered-compare.c?rev=106970&r1=106969&r2=106970&view=diff
> ==============================================================================
> --- cfe/trunk/test/CodeGen/const-unordered-compare.c (original)
> +++ cfe/trunk/test/CodeGen/const-unordered-compare.c Sat Jun 26 20:06:27 2010
> @@ -2,6 +2,6 @@
>
>  // Checks folding of an unordered comparison
>  int nan_ne_check() {
> -  // CHECK: store i32 1
> +  // CHECK: ret i32 1
>   return (__builtin_nanf("") != __builtin_nanf("")) ? 1 : 0;
>  }
>
> Modified: cfe/trunk/test/CodeGenCXX/references.cpp
> URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGenCXX/references.cpp?rev=106970&r1=106969&r2=106970&view=diff
> ==============================================================================
> --- cfe/trunk/test/CodeGenCXX/references.cpp (original)
> +++ cfe/trunk/test/CodeGenCXX/references.cpp Sat Jun 26 20:06:27 2010
> @@ -150,10 +150,9 @@
>
>  // PR6024
>  // CHECK: @_Z2f2v()
> -// CHECK: alloca
> -// CHECK: store
> -// CHECK: load
> -// CHECK: ret
> +// CHECK: alloca i32,
> +// CHECK-NEXT: store
> +// CHECK-NEXT: ret
>  const int &f2() { return 0; }
>
>  // Don't constant fold const reference parameters with default arguments to
>
>
> _______________________________________________
> cfe-commits mailing list
> cfe-commits at cs.uiuc.edu
> http://lists.cs.uiuc.edu/mailman/listinfo/cfe-commits
>



-- 
-
Devang




More information about the cfe-commits mailing list