< prev index next >

src/share/vm/opto/callGenerator.cpp

Print this page
rev 10595 : [backport] clean up obsolete c2 code
 - barriers are never added on constant oops
 - write barriers are always expanded to IR


  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "ci/bcEscapeAnalyzer.hpp"
  27 #include "ci/ciCallSite.hpp"
  28 #include "ci/ciObjArray.hpp"
  29 #include "ci/ciMemberName.hpp"
  30 #include "ci/ciMethodHandle.hpp"
  31 #include "classfile/javaClasses.hpp"
  32 #include "compiler/compileLog.hpp"
  33 #include "opto/addnode.hpp"
  34 #include "opto/callGenerator.hpp"
  35 #include "opto/callnode.hpp"
  36 #include "opto/cfgnode.hpp"
  37 #include "opto/connode.hpp"
  38 #include "opto/parse.hpp"
  39 #include "opto/rootnode.hpp"
  40 #include "opto/runtime.hpp"
  41 #include "opto/shenandoahSupport.hpp"
  42 #include "opto/subnode.hpp"
  43 
  44 
  45 // Utility function.
  46 const TypeFunc* CallGenerator::tf() const {
  47   return TypeFunc::make(method());
  48 }
  49 
  50 //-----------------------------ParseGenerator---------------------------------
  51 // Internal class which handles all direct bytecode traversal.
  52 class ParseGenerator : public InlineCallGenerator {
  53 private:
  54   bool  _is_osr;
  55   float _expected_uses;
  56 
  57 public:
  58   ParseGenerator(ciMethod* method, float expected_uses, bool is_osr = false)
  59     : InlineCallGenerator(method)
  60   {
  61     _is_osr        = is_osr;


 773   if (IncrementalInline && call_site_count > 0 &&
 774       (input_not_const || !C->inlining_incrementally() || C->over_inlining_cutoff())) {
 775     return CallGenerator::for_mh_late_inline(caller, callee, input_not_const);
 776   } else {
 777     // Out-of-line call.
 778     return CallGenerator::for_direct_call(callee);
 779   }
 780 }
 781 
 782 CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool& input_not_const) {
 783   GraphKit kit(jvms);
 784   PhaseGVN& gvn = kit.gvn();
 785   Compile* C = kit.C;
 786   vmIntrinsics::ID iid = callee->intrinsic_id();
 787   input_not_const = true;
 788   switch (iid) {
 789   case vmIntrinsics::_invokeBasic:
 790     {
 791       // Get MethodHandle receiver:
 792       Node* receiver = kit.argument(0);
 793       assert(!(ShenandoahBarrierNode::skip_through_barrier(receiver)->is_Con() && !receiver->is_Con()), "barrier prevents optimization");
 794       if (receiver->Opcode() == Op_ConP) {
 795         input_not_const = false;
 796         const TypeOopPtr* oop_ptr = receiver->bottom_type()->is_oopptr();
 797         ciMethod* target = oop_ptr->const_oop()->as_method_handle()->get_vmtarget();
 798         guarantee(!target->is_method_handle_intrinsic(), "should not happen");  // XXX remove
 799         const int vtable_index = Method::invalid_vtable_index;
 800         CallGenerator* cg = C->call_generator(target, vtable_index, false, jvms, true, PROB_ALWAYS, NULL, true, true);
 801         assert(cg == NULL || !cg->is_late_inline() || cg->is_mh_late_inline(), "no late inline here");
 802         if (cg != NULL && cg->is_inline())
 803           return cg;
 804       }
 805     }
 806     break;
 807 
 808   case vmIntrinsics::_linkToVirtual:
 809   case vmIntrinsics::_linkToStatic:
 810   case vmIntrinsics::_linkToSpecial:
 811   case vmIntrinsics::_linkToInterface:
 812     {
 813       // Get MemberName argument:
 814       Node* member_name = kit.argument(callee->arg_size() - 1);
 815       assert(!(ShenandoahBarrierNode::skip_through_barrier(member_name)->is_Con() && !member_name->is_Con()), "barrier prevents optimization");
 816       if (member_name->Opcode() == Op_ConP) {
 817         input_not_const = false;
 818         const TypeOopPtr* oop_ptr = member_name->bottom_type()->is_oopptr();
 819         ciMethod* target = oop_ptr->const_oop()->as_member_name()->get_vmtarget();
 820 
 821         // In lamda forms we erase signature types to avoid resolving issues
 822         // involving class loaders.  When we optimize a method handle invoke
 823         // to a direct call we must cast the receiver and arguments to its
 824         // actual types.
 825         ciSignature* signature = target->signature();
 826         const int receiver_skip = target->is_static() ? 0 : 1;
 827         // Cast receiver to its type.
 828         if (!target->is_static()) {
 829           Node* arg = kit.argument(0);
 830           const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr();
 831           const Type*       sig_type = TypeOopPtr::make_from_klass(signature->accessing_klass());
 832           if (arg_type != NULL && !arg_type->higher_equal(sig_type)) {
 833             Node* cast_obj = gvn.transform(new (C) CheckCastPPNode(kit.control(), arg, sig_type));
 834             kit.set_argument(0, cast_obj);
 835           }




  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "ci/bcEscapeAnalyzer.hpp"
  27 #include "ci/ciCallSite.hpp"
  28 #include "ci/ciObjArray.hpp"
  29 #include "ci/ciMemberName.hpp"
  30 #include "ci/ciMethodHandle.hpp"
  31 #include "classfile/javaClasses.hpp"
  32 #include "compiler/compileLog.hpp"
  33 #include "opto/addnode.hpp"
  34 #include "opto/callGenerator.hpp"
  35 #include "opto/callnode.hpp"
  36 #include "opto/cfgnode.hpp"
  37 #include "opto/connode.hpp"
  38 #include "opto/parse.hpp"
  39 #include "opto/rootnode.hpp"
  40 #include "opto/runtime.hpp"

  41 #include "opto/subnode.hpp"
  42 
  43 
  44 // Utility function.
  45 const TypeFunc* CallGenerator::tf() const {
  46   return TypeFunc::make(method());
  47 }
  48 
  49 //-----------------------------ParseGenerator---------------------------------
  50 // Internal class which handles all direct bytecode traversal.
  51 class ParseGenerator : public InlineCallGenerator {
  52 private:
  53   bool  _is_osr;
  54   float _expected_uses;
  55 
  56 public:
  57   ParseGenerator(ciMethod* method, float expected_uses, bool is_osr = false)
  58     : InlineCallGenerator(method)
  59   {
  60     _is_osr        = is_osr;


 772   if (IncrementalInline && call_site_count > 0 &&
 773       (input_not_const || !C->inlining_incrementally() || C->over_inlining_cutoff())) {
 774     return CallGenerator::for_mh_late_inline(caller, callee, input_not_const);
 775   } else {
 776     // Out-of-line call.
 777     return CallGenerator::for_direct_call(callee);
 778   }
 779 }
 780 
 781 CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool& input_not_const) {
 782   GraphKit kit(jvms);
 783   PhaseGVN& gvn = kit.gvn();
 784   Compile* C = kit.C;
 785   vmIntrinsics::ID iid = callee->intrinsic_id();
 786   input_not_const = true;
 787   switch (iid) {
 788   case vmIntrinsics::_invokeBasic:
 789     {
 790       // Get MethodHandle receiver:
 791       Node* receiver = kit.argument(0);

 792       if (receiver->Opcode() == Op_ConP) {
 793         input_not_const = false;
 794         const TypeOopPtr* oop_ptr = receiver->bottom_type()->is_oopptr();
 795         ciMethod* target = oop_ptr->const_oop()->as_method_handle()->get_vmtarget();
 796         guarantee(!target->is_method_handle_intrinsic(), "should not happen");  // XXX remove
 797         const int vtable_index = Method::invalid_vtable_index;
 798         CallGenerator* cg = C->call_generator(target, vtable_index, false, jvms, true, PROB_ALWAYS, NULL, true, true);
 799         assert(cg == NULL || !cg->is_late_inline() || cg->is_mh_late_inline(), "no late inline here");
 800         if (cg != NULL && cg->is_inline())
 801           return cg;
 802       }
 803     }
 804     break;
 805 
 806   case vmIntrinsics::_linkToVirtual:
 807   case vmIntrinsics::_linkToStatic:
 808   case vmIntrinsics::_linkToSpecial:
 809   case vmIntrinsics::_linkToInterface:
 810     {
 811       // Get MemberName argument:
 812       Node* member_name = kit.argument(callee->arg_size() - 1);

 813       if (member_name->Opcode() == Op_ConP) {
 814         input_not_const = false;
 815         const TypeOopPtr* oop_ptr = member_name->bottom_type()->is_oopptr();
 816         ciMethod* target = oop_ptr->const_oop()->as_member_name()->get_vmtarget();
 817 
 818         // In lamda forms we erase signature types to avoid resolving issues
 819         // involving class loaders.  When we optimize a method handle invoke
 820         // to a direct call we must cast the receiver and arguments to its
 821         // actual types.
 822         ciSignature* signature = target->signature();
 823         const int receiver_skip = target->is_static() ? 0 : 1;
 824         // Cast receiver to its type.
 825         if (!target->is_static()) {
 826           Node* arg = kit.argument(0);
 827           const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr();
 828           const Type*       sig_type = TypeOopPtr::make_from_klass(signature->accessing_klass());
 829           if (arg_type != NULL && !arg_type->higher_equal(sig_type)) {
 830             Node* cast_obj = gvn.transform(new (C) CheckCastPPNode(kit.control(), arg, sig_type));
 831             kit.set_argument(0, cast_obj);
 832           }


< prev index next >