< prev index next >

src/hotspot/share/opto/callnode.cpp

Print this page




  25 #include "precompiled.hpp"
  26 #include "compiler/compileLog.hpp"
  27 #include "ci/bcEscapeAnalyzer.hpp"
  28 #include "compiler/oopMap.hpp"
  29 #include "gc/shared/barrierSet.hpp"
  30 #include "gc/shared/c2/barrierSetC2.hpp"
  31 #include "interpreter/interpreter.hpp"
  32 #include "opto/callGenerator.hpp"
  33 #include "opto/callnode.hpp"
  34 #include "opto/castnode.hpp"
  35 #include "opto/convertnode.hpp"
  36 #include "opto/escape.hpp"
  37 #include "opto/locknode.hpp"
  38 #include "opto/machnode.hpp"
  39 #include "opto/matcher.hpp"
  40 #include "opto/parse.hpp"
  41 #include "opto/regalloc.hpp"
  42 #include "opto/regmask.hpp"
  43 #include "opto/rootnode.hpp"
  44 #include "opto/runtime.hpp"


  45 
  46 // Portions of code courtesy of Clifford Click
  47 
  48 // Optimization - Graph Style
  49 
  50 //=============================================================================
  51 uint StartNode::size_of() const { return sizeof(*this); }
  52 uint StartNode::cmp( const Node &n ) const
  53 { return _domain == ((StartNode&)n)._domain; }
  54 const Type *StartNode::bottom_type() const { return _domain; }
  55 const Type* StartNode::Value(PhaseGVN* phase) const { return _domain; }
  56 #ifndef PRODUCT
  57 void StartNode::dump_spec(outputStream *st) const { st->print(" #"); _domain->dump_on(st);}
  58 void StartNode::dump_compact_spec(outputStream *st) const { /* empty */ }
  59 #endif
  60 
  61 //------------------------------Ideal------------------------------------------
  62 Node *StartNode::Ideal(PhaseGVN *phase, bool can_reshape){
  63   return remove_dead_region(phase, can_reshape) ? this : NULL;
  64 }
  65 
  66 //------------------------------calling_convention-----------------------------
  67 void StartNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const {
  68   Matcher::calling_convention( sig_bt, parm_regs, argcnt, false );
  69 }
  70 
  71 //------------------------------Registers--------------------------------------
  72 const RegMask &StartNode::in_RegMask(uint) const {
  73   return RegMask::Empty;
  74 }
  75 
  76 //------------------------------match------------------------------------------
  77 // Construct projections for incoming parameters, and their RegMask info
  78 Node *StartNode::match( const ProjNode *proj, const Matcher *match ) {
  79   switch (proj->_con) {
  80   case TypeFunc::Control:
  81   case TypeFunc::I_O:
  82   case TypeFunc::Memory:
  83     return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
  84   case TypeFunc::FramePtr:
  85     return new MachProjNode(this,proj->_con,Matcher::c_frame_ptr_mask, Op_RegP);
  86   case TypeFunc::ReturnAdr:
  87     return new MachProjNode(this,proj->_con,match->_return_addr_mask,Op_RegP);
  88   case TypeFunc::Parms:
  89   default: {
  90       uint parm_num = proj->_con - TypeFunc::Parms;
  91       const Type *t = _domain->field_at(proj->_con);
  92       if (t->base() == Type::Half)  // 2nd half of Longs and Doubles
  93         return new ConNode(Type::TOP);
  94       uint ideal_reg = t->ideal_reg();
  95       RegMask &rm = match->_calling_convention_mask[parm_num];
  96       return new MachProjNode(this,proj->_con,rm,ideal_reg);
  97     }
  98   }


 462       if (cik->is_instance_klass()) {
 463         cik->print_name_on(st);
 464         iklass = cik->as_instance_klass();
 465       } else if (cik->is_type_array_klass()) {
 466         cik->as_array_klass()->base_element_type()->print_name_on(st);
 467         st->print("[%d]", spobj->n_fields());
 468       } else if (cik->is_obj_array_klass()) {
 469         ciKlass* cie = cik->as_obj_array_klass()->base_element_klass();
 470         if (cie->is_instance_klass()) {
 471           cie->print_name_on(st);
 472         } else if (cie->is_type_array_klass()) {
 473           cie->as_array_klass()->base_element_type()->print_name_on(st);
 474         } else {
 475           ShouldNotReachHere();
 476         }
 477         st->print("[%d]", spobj->n_fields());
 478         int ndim = cik->as_array_klass()->dimension() - 1;
 479         while (ndim-- > 0) {
 480           st->print("[]");
 481         }








 482       }
 483       st->print("={");
 484       uint nf = spobj->n_fields();
 485       if (nf > 0) {
 486         uint first_ind = spobj->first_index(mcall->jvms());
 487         Node* fld_node = mcall->in(first_ind);
 488         ciField* cifield;
 489         if (iklass != NULL) {
 490           st->print(" [");
 491           cifield = iklass->nonstatic_field_at(0);
 492           cifield->print_name_on(st);
 493           format_helper(regalloc, st, fld_node, ":", 0, &scobjs);
 494         } else {
 495           format_helper(regalloc, st, fld_node, "[", 0, &scobjs);
 496         }
 497         for (uint j = 1; j < nf; j++) {
 498           fld_node = mcall->in(first_ind+j);
 499           if (iklass != NULL) {
 500             st->print(", [");
 501             cifield = iklass->nonstatic_field_at(j);


 671 #ifndef PRODUCT
 672 void CallNode::dump_req(outputStream *st) const {
 673   // Dump the required inputs, enclosed in '(' and ')'
 674   uint i;                       // Exit value of loop
 675   for (i = 0; i < req(); i++) {    // For all required inputs
 676     if (i == TypeFunc::Parms) st->print("(");
 677     if (in(i)) st->print("%c%d ", Compile::current()->node_arena()->contains(in(i)) ? ' ' : 'o', in(i)->_idx);
 678     else st->print("_ ");
 679   }
 680   st->print(")");
 681 }
 682 
 683 void CallNode::dump_spec(outputStream *st) const {
 684   st->print(" ");
 685   if (tf() != NULL)  tf()->dump_on(st);
 686   if (_cnt != COUNT_UNKNOWN)  st->print(" C=%f",_cnt);
 687   if (jvms() != NULL)  jvms()->dump_spec(st);
 688 }
 689 #endif
 690 
 691 const Type *CallNode::bottom_type() const { return tf()->range(); }
 692 const Type* CallNode::Value(PhaseGVN* phase) const {
 693   if (phase->type(in(0)) == Type::TOP)  return Type::TOP;
 694   return tf()->range();


 695 }
 696 
 697 //------------------------------calling_convention-----------------------------
 698 void CallNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const {







 699   // Use the standard compiler calling convention
 700   Matcher::calling_convention( sig_bt, parm_regs, argcnt, true );
 701 }
 702 
 703 
 704 //------------------------------match------------------------------------------
 705 // Construct projections for control, I/O, memory-fields, ..., and
 706 // return result(s) along with their RegMask info
 707 Node *CallNode::match( const ProjNode *proj, const Matcher *match ) {
 708   switch (proj->_con) {


























 709   case TypeFunc::Control:
 710   case TypeFunc::I_O:
 711   case TypeFunc::Memory:
 712     return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
 713 
 714   case TypeFunc::Parms+1:       // For LONG & DOUBLE returns
 715     assert(tf()->range()->field_at(TypeFunc::Parms+1) == Type::HALF, "");
 716     // 2nd half of doubles and longs
 717     return new MachProjNode(this,proj->_con, RegMask::Empty, (uint)OptoReg::Bad);
 718 
 719   case TypeFunc::Parms: {       // Normal returns
 720     uint ideal_reg = tf()->range()->field_at(TypeFunc::Parms)->ideal_reg();
 721     OptoRegPair regs = is_CallRuntime()
 722       ? match->c_return_value(ideal_reg,true)  // Calls into C runtime
 723       : match->  return_value(ideal_reg,true); // Calls into compiled Java code
 724     RegMask rm = RegMask(regs.first());
 725     if( OptoReg::is_valid(regs.second()) )
 726       rm.Insert( regs.second() );
 727     return new MachProjNode(this,proj->_con,rm,ideal_reg);
 728   }
 729 
 730   case TypeFunc::ReturnAdr:
 731   case TypeFunc::FramePtr:
 732   default:
 733     ShouldNotReachHere();
 734   }
 735   return NULL;
 736 }
 737 
 738 // Do we Match on this edge index or not?  Match no edges
 739 uint CallNode::match_edge(uint idx) const {
 740   return 0;
 741 }
 742 
 743 //
 744 // Determine whether the call could modify the field of the specified
 745 // instance at the specified offset.
 746 //
 747 bool CallNode::may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase) {
 748   assert((t_oop != NULL), "sanity");
 749   if (is_call_to_arraycopystub() && strcmp(_name, "unsafe_arraycopy") != 0) {
 750     const TypeTuple* args = _tf->domain();
 751     Node* dest = NULL;
 752     // Stubs that can be called once an ArrayCopyNode is expanded have
 753     // different signatures. Look for the second pointer argument,
 754     // that is the destination of the copy.
 755     for (uint i = TypeFunc::Parms, j = 0; i < args->cnt(); i++) {
 756       if (args->field_at(i)->isa_ptr()) {
 757         j++;
 758         if (j == 2) {
 759           dest = in(i);
 760           break;
 761         }
 762       }
 763     }
 764     guarantee(dest != NULL, "Call had only one ptr in, broken IR!");
 765     if (!dest->is_top() && may_modify_arraycopy_helper(phase->type(dest)->is_oopptr(), t_oop, phase)) {
 766       return true;
 767     }
 768     return false;
 769   }
 770   if (t_oop->is_known_instance()) {


 779       Node* proj = proj_out_or_null(TypeFunc::Parms);
 780       if ((proj == NULL) || (phase->type(proj)->is_instptr()->klass() != boxing_klass)) {
 781         return false;
 782       }
 783     }
 784     if (is_CallJava() && as_CallJava()->method() != NULL) {
 785       ciMethod* meth = as_CallJava()->method();
 786       if (meth->is_getter()) {
 787         return false;
 788       }
 789       // May modify (by reflection) if an boxing object is passed
 790       // as argument or returned.
 791       Node* proj = returns_pointer() ? proj_out_or_null(TypeFunc::Parms) : NULL;
 792       if (proj != NULL) {
 793         const TypeInstPtr* inst_t = phase->type(proj)->isa_instptr();
 794         if ((inst_t != NULL) && (!inst_t->klass_is_exact() ||
 795                                  (inst_t->klass() == boxing_klass))) {
 796           return true;
 797         }
 798       }
 799       const TypeTuple* d = tf()->domain();
 800       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
 801         const TypeInstPtr* inst_t = d->field_at(i)->isa_instptr();
 802         if ((inst_t != NULL) && (!inst_t->klass_is_exact() ||
 803                                  (inst_t->klass() == boxing_klass))) {
 804           return true;
 805         }
 806       }
 807       return false;
 808     }
 809   }
 810   return true;
 811 }
 812 
 813 // Does this call have a direct reference to n other than debug information?
 814 bool CallNode::has_non_debug_use(Node *n) {
 815   const TypeTuple * d = tf()->domain();
 816   for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
 817     Node *arg = in(i);
 818     if (arg == n) {
 819       return true;
 820     }
 821   }
 822   return false;
 823 }
 824 











 825 // Returns the unique CheckCastPP of a call
 826 // or 'this' if there are several CheckCastPP or unexpected uses
 827 // or returns NULL if there is no one.
 828 Node *CallNode::result_cast() {
 829   Node *cast = NULL;
 830 
 831   Node *p = proj_out_or_null(TypeFunc::Parms);
 832   if (p == NULL)
 833     return NULL;
 834 
 835   for (DUIterator_Fast imax, i = p->fast_outs(imax); i < imax; i++) {
 836     Node *use = p->fast_out(i);
 837     if (use->is_CheckCastPP()) {
 838       if (cast != NULL) {
 839         return this;  // more than 1 CheckCastPP
 840       }
 841       cast = use;
 842     } else if (!use->is_Initialize() &&
 843                !use->is_AddP() &&
 844                use->Opcode() != Op_MemBarStoreStore) {
 845       // Expected uses are restricted to a CheckCastPP, an Initialize
 846       // node, a MemBarStoreStore (clone) and AddP nodes. If we
 847       // encounter any other use (a Phi node can be seen in rare
 848       // cases) return this to prevent incorrect optimizations.
 849       return this;
 850     }
 851   }
 852   return cast;
 853 }
 854 
 855 
 856 void CallNode::extract_projections(CallProjections* projs, bool separate_io_proj, bool do_asserts) {
 857   projs->fallthrough_proj      = NULL;
 858   projs->fallthrough_catchproj = NULL;
 859   projs->fallthrough_ioproj    = NULL;
 860   projs->catchall_ioproj       = NULL;
 861   projs->catchall_catchproj    = NULL;
 862   projs->fallthrough_memproj   = NULL;
 863   projs->catchall_memproj      = NULL;
 864   projs->resproj               = NULL;
 865   projs->exobj                 = NULL;





 866 
 867   for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
 868     ProjNode *pn = fast_out(i)->as_Proj();
 869     if (pn->outcnt() == 0) continue;
 870     switch (pn->_con) {
 871     case TypeFunc::Control:
 872       {
 873         // For Control (fallthrough) and I_O (catch_all_index) we have CatchProj -> Catch -> Proj
 874         projs->fallthrough_proj = pn;
 875         DUIterator_Fast jmax, j = pn->fast_outs(jmax);
 876         const Node *cn = pn->fast_out(j);
 877         if (cn->is_Catch()) {
 878           ProjNode *cpn = NULL;
 879           for (DUIterator_Fast kmax, k = cn->fast_outs(kmax); k < kmax; k++) {
 880             cpn = cn->fast_out(k)->as_Proj();
 881             assert(cpn->is_CatchProj(), "must be a CatchProjNode");
 882             if (cpn->_con == CatchProjNode::fall_through_index)
 883               projs->fallthrough_catchproj = cpn;
 884             else {
 885               assert(cpn->_con == CatchProjNode::catch_all_index, "must be correct index.");


 892     case TypeFunc::I_O:
 893       if (pn->_is_io_use)
 894         projs->catchall_ioproj = pn;
 895       else
 896         projs->fallthrough_ioproj = pn;
 897       for (DUIterator j = pn->outs(); pn->has_out(j); j++) {
 898         Node* e = pn->out(j);
 899         if (e->Opcode() == Op_CreateEx && e->in(0)->is_CatchProj() && e->outcnt() > 0) {
 900           assert(projs->exobj == NULL, "only one");
 901           projs->exobj = e;
 902         }
 903       }
 904       break;
 905     case TypeFunc::Memory:
 906       if (pn->_is_io_use)
 907         projs->catchall_memproj = pn;
 908       else
 909         projs->fallthrough_memproj = pn;
 910       break;
 911     case TypeFunc::Parms:
 912       projs->resproj = pn;
 913       break;
 914     default:
 915       assert(false, "unexpected projection from allocation node.");


 916     }
 917   }
 918 
 919   // The resproj may not exist because the result could be ignored
 920   // and the exception object may not exist if an exception handler
 921   // swallows the exception but all the other must exist and be found.
 922   assert(projs->fallthrough_proj      != NULL, "must be found");
 923   do_asserts = do_asserts && !Compile::current()->inlining_incrementally();
 924   assert(!do_asserts || projs->fallthrough_catchproj != NULL, "must be found");
 925   assert(!do_asserts || projs->fallthrough_memproj   != NULL, "must be found");
 926   assert(!do_asserts || projs->fallthrough_ioproj    != NULL, "must be found");
 927   assert(!do_asserts || projs->catchall_catchproj    != NULL, "must be found");
 928   if (separate_io_proj) {
 929     assert(!do_asserts || projs->catchall_memproj    != NULL, "must be found");
 930     assert(!do_asserts || projs->catchall_ioproj     != NULL, "must be found");
 931   }

 932 }
 933 
 934 Node *CallNode::Ideal(PhaseGVN *phase, bool can_reshape) {
 935   CallGenerator* cg = generator();
 936   if (can_reshape && cg != NULL && cg->is_mh_late_inline() && !cg->already_attempted()) {
 937     // Check whether this MH handle call becomes a candidate for inlining
 938     ciMethod* callee = cg->method();
 939     vmIntrinsics::ID iid = callee->intrinsic_id();
 940     if (iid == vmIntrinsics::_invokeBasic) {
 941       if (in(TypeFunc::Parms)->Opcode() == Op_ConP) {
 942         phase->C->prepend_late_inline(cg);
 943         set_generator(NULL);
 944       }
 945     } else {
 946       assert(callee->has_member_arg(), "wrong type of call?");
 947       if (in(TypeFunc::Parms + callee->arg_size() - 1)->Opcode() == Op_ConP) {
 948         phase->C->prepend_late_inline(cg);
 949         set_generator(NULL);
 950       }
 951     }


 955 
 956 bool CallNode::is_call_to_arraycopystub() const {
 957   if (_name != NULL && strstr(_name, "arraycopy") != 0) {
 958     return true;
 959   }
 960   return false;
 961 }
 962 
 963 //=============================================================================
 964 uint CallJavaNode::size_of() const { return sizeof(*this); }
 965 uint CallJavaNode::cmp( const Node &n ) const {
 966   CallJavaNode &call = (CallJavaNode&)n;
 967   return CallNode::cmp(call) && _method == call._method &&
 968          _override_symbolic_info == call._override_symbolic_info;
 969 }
 970 #ifdef ASSERT
 971 bool CallJavaNode::validate_symbolic_info() const {
 972   if (method() == NULL) {
 973     return true; // call into runtime or uncommon trap
 974   }




 975   ciMethod* symbolic_info = jvms()->method()->get_method_at_bci(_bci);
 976   ciMethod* callee = method();
 977   if (symbolic_info->is_method_handle_intrinsic() && !callee->is_method_handle_intrinsic()) {
 978     assert(override_symbolic_info(), "should be set");
 979   }
 980   assert(ciMethod::is_consistent_info(symbolic_info, callee), "inconsistent info");
 981   return true;
 982 }
 983 #endif
 984 
 985 #ifndef PRODUCT
 986 void CallJavaNode::dump_spec(outputStream *st) const {
 987   if( _method ) _method->print_short_name(st);
 988   CallNode::dump_spec(st);
 989 }
 990 
 991 void CallJavaNode::dump_compact_spec(outputStream* st) const {
 992   if (_method) {
 993     _method->print_short_name(st);
 994   } else {


1066   CallJavaNode::dump_spec(st);
1067 }
1068 #endif
1069 
1070 //=============================================================================
1071 uint CallRuntimeNode::size_of() const { return sizeof(*this); }
1072 uint CallRuntimeNode::cmp( const Node &n ) const {
1073   CallRuntimeNode &call = (CallRuntimeNode&)n;
1074   return CallNode::cmp(call) && !strcmp(_name,call._name);
1075 }
1076 #ifndef PRODUCT
1077 void CallRuntimeNode::dump_spec(outputStream *st) const {
1078   st->print("# ");
1079   st->print("%s", _name);
1080   CallNode::dump_spec(st);
1081 }
1082 #endif
1083 
1084 //------------------------------calling_convention-----------------------------
1085 void CallRuntimeNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const {







1086   Matcher::c_calling_convention( sig_bt, parm_regs, argcnt );
1087 }
1088 
1089 //=============================================================================
1090 //------------------------------calling_convention-----------------------------
1091 
1092 
1093 //=============================================================================
1094 #ifndef PRODUCT
1095 void CallLeafNode::dump_spec(outputStream *st) const {
1096   st->print("# ");
1097   st->print("%s", _name);
1098   CallNode::dump_spec(st);
1099 }
1100 #endif
1101 






1102 //=============================================================================
1103 
1104 void SafePointNode::set_local(JVMState* jvms, uint idx, Node *c) {
1105   assert(verify_jvms(jvms), "jvms must match");
1106   int loc = jvms->locoff() + idx;
1107   if (in(loc)->is_top() && idx > 0 && !c->is_top() ) {
1108     // If current local idx is top then local idx - 1 could
1109     // be a long/double that needs to be killed since top could
1110     // represent the 2nd half ofthe long/double.
1111     uint ideal = in(loc -1)->ideal_reg();
1112     if (ideal == Op_RegD || ideal == Op_RegL) {
1113       // set other (low index) half to top
1114       set_req(loc - 1, in(loc));
1115     }
1116   }
1117   set_req(loc, c);
1118 }
1119 
1120 uint SafePointNode::size_of() const { return sizeof(*this); }
1121 uint SafePointNode::cmp( const Node &n ) const {


1342   }
1343   SafePointScalarObjectNode* res = (SafePointScalarObjectNode*)Node::clone();
1344   sosn_map->Insert((void*)this, (void*)res);
1345   return res;
1346 }
1347 
1348 
1349 #ifndef PRODUCT
1350 void SafePointScalarObjectNode::dump_spec(outputStream *st) const {
1351   st->print(" # fields@[%d..%d]", first_index(),
1352              first_index() + n_fields() - 1);
1353 }
1354 
1355 #endif
1356 
1357 //=============================================================================
1358 uint AllocateNode::size_of() const { return sizeof(*this); }
1359 
1360 AllocateNode::AllocateNode(Compile* C, const TypeFunc *atype,
1361                            Node *ctrl, Node *mem, Node *abio,
1362                            Node *size, Node *klass_node, Node *initial_test)


1363   : CallNode(atype, NULL, TypeRawPtr::BOTTOM)
1364 {
1365   init_class_id(Class_Allocate);
1366   init_flags(Flag_is_macro);
1367   _is_scalar_replaceable = false;
1368   _is_non_escaping = false;
1369   _is_allocation_MemBar_redundant = false;

1370   Node *topnode = C->top();
1371 
1372   init_req( TypeFunc::Control  , ctrl );
1373   init_req( TypeFunc::I_O      , abio );
1374   init_req( TypeFunc::Memory   , mem );
1375   init_req( TypeFunc::ReturnAdr, topnode );
1376   init_req( TypeFunc::FramePtr , topnode );
1377   init_req( AllocSize          , size);
1378   init_req( KlassNode          , klass_node);
1379   init_req( InitialTest        , initial_test);
1380   init_req( ALength            , topnode);



1381   C->add_macro_node(this);
1382 }
1383 
1384 void AllocateNode::compute_MemBar_redundancy(ciMethod* initializer)
1385 {
1386   assert(initializer != NULL &&
1387          initializer->is_initializer() &&
1388          !initializer->is_static(),
1389              "unexpected initializer method");
1390   BCEscapeAnalyzer* analyzer = initializer->get_bcea();
1391   if (analyzer == NULL) {
1392     return;
1393   }
1394 
1395   // Allocation node is first parameter in its initializer
1396   if (analyzer->is_arg_stack(0) || analyzer->is_arg_local(0)) {
1397     _is_allocation_MemBar_redundant = true;
1398   }
1399 }
1400 


































































1401 //=============================================================================
1402 Node* AllocateArrayNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1403   if (remove_dead_region(phase, can_reshape))  return this;



1404   // Don't bother trying to transform a dead node
1405   if (in(0) && in(0)->is_top())  return NULL;
1406 
1407   const Type* type = phase->type(Ideal_length());
1408   if (type->isa_int() && type->is_int()->_hi < 0) {
1409     if (can_reshape) {
1410       PhaseIterGVN *igvn = phase->is_IterGVN();
1411       // Unreachable fall through path (negative array length),
1412       // the allocation can only throw so disconnect it.
1413       Node* proj = proj_out_or_null(TypeFunc::Control);
1414       Node* catchproj = NULL;
1415       if (proj != NULL) {
1416         for (DUIterator_Fast imax, i = proj->fast_outs(imax); i < imax; i++) {
1417           Node *cn = proj->fast_out(i);
1418           if (cn->is_Catch()) {
1419             catchproj = cn->as_Multi()->proj_out_or_null(CatchProjNode::fall_through_index);
1420             break;
1421           }
1422         }
1423       }


2058     }
2059     // unrelated
2060     return false;
2061   }
2062 
2063   if (dest_t->isa_aryptr()) {
2064     // arraycopy or array clone
2065     if (t_oop->isa_instptr()) {
2066       return false;
2067     }
2068     if (!t_oop->isa_aryptr()) {
2069       return true;
2070     }
2071 
2072     const Type* elem = dest_t->is_aryptr()->elem();
2073     if (elem == Type::BOTTOM) {
2074       // An array but we don't know what elements are
2075       return true;
2076     }
2077 
2078     dest_t = dest_t->add_offset(Type::OffsetBot)->is_oopptr();

2079     uint dest_alias = phase->C->get_alias_index(dest_t);
2080     uint t_oop_alias = phase->C->get_alias_index(t_oop);
2081 
2082     return dest_alias == t_oop_alias;
2083   }
2084 
2085   return true;
2086 }
2087 


  25 #include "precompiled.hpp"
  26 #include "compiler/compileLog.hpp"
  27 #include "ci/bcEscapeAnalyzer.hpp"
  28 #include "compiler/oopMap.hpp"
  29 #include "gc/shared/barrierSet.hpp"
  30 #include "gc/shared/c2/barrierSetC2.hpp"
  31 #include "interpreter/interpreter.hpp"
  32 #include "opto/callGenerator.hpp"
  33 #include "opto/callnode.hpp"
  34 #include "opto/castnode.hpp"
  35 #include "opto/convertnode.hpp"
  36 #include "opto/escape.hpp"
  37 #include "opto/locknode.hpp"
  38 #include "opto/machnode.hpp"
  39 #include "opto/matcher.hpp"
  40 #include "opto/parse.hpp"
  41 #include "opto/regalloc.hpp"
  42 #include "opto/regmask.hpp"
  43 #include "opto/rootnode.hpp"
  44 #include "opto/runtime.hpp"
  45 #include "opto/valuetypenode.hpp"
  46 #include "runtime/sharedRuntime.hpp"
  47 
  48 // Portions of code courtesy of Clifford Click
  49 
  50 // Optimization - Graph Style
  51 
  52 //=============================================================================
  53 uint StartNode::size_of() const { return sizeof(*this); }
  54 uint StartNode::cmp( const Node &n ) const
  55 { return _domain == ((StartNode&)n)._domain; }
  56 const Type *StartNode::bottom_type() const { return _domain; }
  57 const Type* StartNode::Value(PhaseGVN* phase) const { return _domain; }
  58 #ifndef PRODUCT
  59 void StartNode::dump_spec(outputStream *st) const { st->print(" #"); _domain->dump_on(st);}
  60 void StartNode::dump_compact_spec(outputStream *st) const { /* empty */ }
  61 #endif
  62 
  63 //------------------------------Ideal------------------------------------------
  64 Node *StartNode::Ideal(PhaseGVN *phase, bool can_reshape){
  65   return remove_dead_region(phase, can_reshape) ? this : NULL;
  66 }
  67 
  68 //------------------------------calling_convention-----------------------------
  69 void StartNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const {
  70   Matcher::calling_convention( sig_bt, parm_regs, argcnt, false );
  71 }
  72 
  73 //------------------------------Registers--------------------------------------
  74 const RegMask &StartNode::in_RegMask(uint) const {
  75   return RegMask::Empty;
  76 }
  77 
  78 //------------------------------match------------------------------------------
  79 // Construct projections for incoming parameters, and their RegMask info
  80 Node *StartNode::match(const ProjNode *proj, const Matcher *match, const RegMask* mask) {
  81   switch (proj->_con) {
  82   case TypeFunc::Control:
  83   case TypeFunc::I_O:
  84   case TypeFunc::Memory:
  85     return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
  86   case TypeFunc::FramePtr:
  87     return new MachProjNode(this,proj->_con,Matcher::c_frame_ptr_mask, Op_RegP);
  88   case TypeFunc::ReturnAdr:
  89     return new MachProjNode(this,proj->_con,match->_return_addr_mask,Op_RegP);
  90   case TypeFunc::Parms:
  91   default: {
  92       uint parm_num = proj->_con - TypeFunc::Parms;
  93       const Type *t = _domain->field_at(proj->_con);
  94       if (t->base() == Type::Half)  // 2nd half of Longs and Doubles
  95         return new ConNode(Type::TOP);
  96       uint ideal_reg = t->ideal_reg();
  97       RegMask &rm = match->_calling_convention_mask[parm_num];
  98       return new MachProjNode(this,proj->_con,rm,ideal_reg);
  99     }
 100   }


 464       if (cik->is_instance_klass()) {
 465         cik->print_name_on(st);
 466         iklass = cik->as_instance_klass();
 467       } else if (cik->is_type_array_klass()) {
 468         cik->as_array_klass()->base_element_type()->print_name_on(st);
 469         st->print("[%d]", spobj->n_fields());
 470       } else if (cik->is_obj_array_klass()) {
 471         ciKlass* cie = cik->as_obj_array_klass()->base_element_klass();
 472         if (cie->is_instance_klass()) {
 473           cie->print_name_on(st);
 474         } else if (cie->is_type_array_klass()) {
 475           cie->as_array_klass()->base_element_type()->print_name_on(st);
 476         } else {
 477           ShouldNotReachHere();
 478         }
 479         st->print("[%d]", spobj->n_fields());
 480         int ndim = cik->as_array_klass()->dimension() - 1;
 481         while (ndim-- > 0) {
 482           st->print("[]");
 483         }
 484       } else if (cik->is_value_array_klass()) {
 485         ciKlass* cie = cik->as_value_array_klass()->base_element_klass();
 486         cie->print_name_on(st);
 487         st->print("[%d]", spobj->n_fields());
 488         int ndim = cik->as_array_klass()->dimension() - 1;
 489         while (ndim-- > 0) {
 490           st->print("[]");
 491         }
 492       }
 493       st->print("={");
 494       uint nf = spobj->n_fields();
 495       if (nf > 0) {
 496         uint first_ind = spobj->first_index(mcall->jvms());
 497         Node* fld_node = mcall->in(first_ind);
 498         ciField* cifield;
 499         if (iklass != NULL) {
 500           st->print(" [");
 501           cifield = iklass->nonstatic_field_at(0);
 502           cifield->print_name_on(st);
 503           format_helper(regalloc, st, fld_node, ":", 0, &scobjs);
 504         } else {
 505           format_helper(regalloc, st, fld_node, "[", 0, &scobjs);
 506         }
 507         for (uint j = 1; j < nf; j++) {
 508           fld_node = mcall->in(first_ind+j);
 509           if (iklass != NULL) {
 510             st->print(", [");
 511             cifield = iklass->nonstatic_field_at(j);


 681 #ifndef PRODUCT
 682 void CallNode::dump_req(outputStream *st) const {
 683   // Dump the required inputs, enclosed in '(' and ')'
 684   uint i;                       // Exit value of loop
 685   for (i = 0; i < req(); i++) {    // For all required inputs
 686     if (i == TypeFunc::Parms) st->print("(");
 687     if (in(i)) st->print("%c%d ", Compile::current()->node_arena()->contains(in(i)) ? ' ' : 'o', in(i)->_idx);
 688     else st->print("_ ");
 689   }
 690   st->print(")");
 691 }
 692 
 693 void CallNode::dump_spec(outputStream *st) const {
 694   st->print(" ");
 695   if (tf() != NULL)  tf()->dump_on(st);
 696   if (_cnt != COUNT_UNKNOWN)  st->print(" C=%f",_cnt);
 697   if (jvms() != NULL)  jvms()->dump_spec(st);
 698 }
 699 #endif
 700 
 701 const Type *CallNode::bottom_type() const { return tf()->range_cc(); }
 702 const Type* CallNode::Value(PhaseGVN* phase) const {
 703   if (!in(0) || phase->type(in(0)) == Type::TOP) {
 704     return Type::TOP;
 705   }
 706   return tf()->range_cc();
 707 }
 708 
 709 //------------------------------calling_convention-----------------------------
 710 void CallNode::calling_convention(BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt) const {
 711   if (_entry_point == StubRoutines::store_value_type_fields_to_buf()) {
 712     // The call to that stub is a special case: its inputs are
 713     // multiple values returned from a call and so it should follow
 714     // the return convention.
 715     SharedRuntime::java_return_convention(sig_bt, parm_regs, argcnt);
 716     return;
 717   }
 718   // Use the standard compiler calling convention
 719   Matcher::calling_convention( sig_bt, parm_regs, argcnt, true );
 720 }
 721 
 722 
 723 //------------------------------match------------------------------------------
 724 // Construct projections for control, I/O, memory-fields, ..., and
 725 // return result(s) along with their RegMask info
 726 Node *CallNode::match(const ProjNode *proj, const Matcher *match, const RegMask* mask) {
 727   uint con = proj->_con;
 728   const TypeTuple *range_cc = tf()->range_cc();
 729   if (con >= TypeFunc::Parms) {
 730     if (is_CallRuntime()) {
 731       if (con == TypeFunc::Parms) {
 732         uint ideal_reg = range_cc->field_at(TypeFunc::Parms)->ideal_reg();
 733         OptoRegPair regs = match->c_return_value(ideal_reg,true);
 734         RegMask rm = RegMask(regs.first());
 735         if (OptoReg::is_valid(regs.second())) {
 736           rm.Insert(regs.second());
 737         }
 738         return new MachProjNode(this,con,rm,ideal_reg);
 739       } else {
 740         assert(con == TypeFunc::Parms+1, "only one return value");
 741         assert(range_cc->field_at(TypeFunc::Parms+1) == Type::HALF, "");
 742         return new MachProjNode(this,con, RegMask::Empty, (uint)OptoReg::Bad);
 743       }
 744     } else {
 745       // The Call may return multiple values (value type fields): we
 746       // create one projection per returned values.
 747       assert(con <= TypeFunc::Parms+1 || ValueTypeReturnedAsFields, "only for multi value return");
 748       uint ideal_reg = range_cc->field_at(con)->ideal_reg();
 749       return new MachProjNode(this, con, mask[con-TypeFunc::Parms], ideal_reg);
 750     }
 751   }
 752 
 753   switch (con) {
 754   case TypeFunc::Control:
 755   case TypeFunc::I_O:
 756   case TypeFunc::Memory:
 757     return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
 758 
















 759   case TypeFunc::ReturnAdr:
 760   case TypeFunc::FramePtr:
 761   default:
 762     ShouldNotReachHere();
 763   }
 764   return NULL;
 765 }
 766 
 767 // Do we Match on this edge index or not?  Match no edges
 768 uint CallNode::match_edge(uint idx) const {
 769   return 0;
 770 }
 771 
 772 //
 773 // Determine whether the call could modify the field of the specified
 774 // instance at the specified offset.
 775 //
 776 bool CallNode::may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase) {
 777   assert((t_oop != NULL), "sanity");
 778   if (is_call_to_arraycopystub() && strcmp(_name, "unsafe_arraycopy") != 0) {
 779     const TypeTuple* args = _tf->domain_sig();
 780     Node* dest = NULL;
 781     // Stubs that can be called once an ArrayCopyNode is expanded have
 782     // different signatures. Look for the second pointer argument,
 783     // that is the destination of the copy.
 784     for (uint i = TypeFunc::Parms, j = 0; i < args->cnt(); i++) {
 785       if (args->field_at(i)->isa_ptr()) {
 786         j++;
 787         if (j == 2) {
 788           dest = in(i);
 789           break;
 790         }
 791       }
 792     }
 793     guarantee(dest != NULL, "Call had only one ptr in, broken IR!");
 794     if (!dest->is_top() && may_modify_arraycopy_helper(phase->type(dest)->is_oopptr(), t_oop, phase)) {
 795       return true;
 796     }
 797     return false;
 798   }
 799   if (t_oop->is_known_instance()) {


 808       Node* proj = proj_out_or_null(TypeFunc::Parms);
 809       if ((proj == NULL) || (phase->type(proj)->is_instptr()->klass() != boxing_klass)) {
 810         return false;
 811       }
 812     }
 813     if (is_CallJava() && as_CallJava()->method() != NULL) {
 814       ciMethod* meth = as_CallJava()->method();
 815       if (meth->is_getter()) {
 816         return false;
 817       }
 818       // May modify (by reflection) if an boxing object is passed
 819       // as argument or returned.
 820       Node* proj = returns_pointer() ? proj_out_or_null(TypeFunc::Parms) : NULL;
 821       if (proj != NULL) {
 822         const TypeInstPtr* inst_t = phase->type(proj)->isa_instptr();
 823         if ((inst_t != NULL) && (!inst_t->klass_is_exact() ||
 824                                  (inst_t->klass() == boxing_klass))) {
 825           return true;
 826         }
 827       }
 828       const TypeTuple* d = tf()->domain_cc();
 829       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
 830         const TypeInstPtr* inst_t = d->field_at(i)->isa_instptr();
 831         if ((inst_t != NULL) && (!inst_t->klass_is_exact() ||
 832                                  (inst_t->klass() == boxing_klass))) {
 833           return true;
 834         }
 835       }
 836       return false;
 837     }
 838   }
 839   return true;
 840 }
 841 
 842 // Does this call have a direct reference to n other than debug information?
 843 bool CallNode::has_non_debug_use(Node *n) {
 844   const TypeTuple * d = tf()->domain_cc();
 845   for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
 846     Node *arg = in(i);
 847     if (arg == n) {
 848       return true;
 849     }
 850   }
 851   return false;
 852 }
 853 
 854 bool CallNode::has_debug_use(Node *n) {
 855   assert(jvms() != NULL, "jvms should not be null");
 856   for (uint i = jvms()->debug_start(); i < jvms()->debug_end(); i++) {
 857     Node *arg = in(i);
 858     if (arg == n) {
 859       return true;
 860     }
 861   }
 862   return false;
 863 }
 864 
 865 // Returns the unique CheckCastPP of a call
 866 // or 'this' if there are several CheckCastPP or unexpected uses
 867 // or returns NULL if there is no one.
 868 Node *CallNode::result_cast() {
 869   Node *cast = NULL;
 870 
 871   Node *p = proj_out_or_null(TypeFunc::Parms);
 872   if (p == NULL)
 873     return NULL;
 874 
 875   for (DUIterator_Fast imax, i = p->fast_outs(imax); i < imax; i++) {
 876     Node *use = p->fast_out(i);
 877     if (use->is_CheckCastPP()) {
 878       if (cast != NULL) {
 879         return this;  // more than 1 CheckCastPP
 880       }
 881       cast = use;
 882     } else if (!use->is_Initialize() &&
 883                !use->is_AddP() &&
 884                use->Opcode() != Op_MemBarStoreStore) {
 885       // Expected uses are restricted to a CheckCastPP, an Initialize
 886       // node, a MemBarStoreStore (clone) and AddP nodes. If we
 887       // encounter any other use (a Phi node can be seen in rare
 888       // cases) return this to prevent incorrect optimizations.
 889       return this;
 890     }
 891   }
 892   return cast;
 893 }
 894 
 895 
 896 CallProjections* CallNode::extract_projections(bool separate_io_proj, bool do_asserts) {
 897   uint max_res = TypeFunc::Parms-1;
 898   for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
 899     ProjNode *pn = fast_out(i)->as_Proj();
 900     max_res = MAX2(max_res, pn->_con);
 901   }
 902 
 903   assert(max_res < _tf->range_cc()->cnt(), "result out of bounds");
 904 
 905   uint projs_size = sizeof(CallProjections);
 906   if (max_res > TypeFunc::Parms) {
 907     projs_size += (max_res-TypeFunc::Parms)*sizeof(Node*);
 908   }
 909   char* projs_storage = resource_allocate_bytes(projs_size);
 910   CallProjections* projs = new(projs_storage)CallProjections(max_res - TypeFunc::Parms + 1);
 911 
 912   for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
 913     ProjNode *pn = fast_out(i)->as_Proj();
 914     if (pn->outcnt() == 0) continue;
 915     switch (pn->_con) {
 916     case TypeFunc::Control:
 917       {
 918         // For Control (fallthrough) and I_O (catch_all_index) we have CatchProj -> Catch -> Proj
 919         projs->fallthrough_proj = pn;
 920         DUIterator_Fast jmax, j = pn->fast_outs(jmax);
 921         const Node *cn = pn->fast_out(j);
 922         if (cn->is_Catch()) {
 923           ProjNode *cpn = NULL;
 924           for (DUIterator_Fast kmax, k = cn->fast_outs(kmax); k < kmax; k++) {
 925             cpn = cn->fast_out(k)->as_Proj();
 926             assert(cpn->is_CatchProj(), "must be a CatchProjNode");
 927             if (cpn->_con == CatchProjNode::fall_through_index)
 928               projs->fallthrough_catchproj = cpn;
 929             else {
 930               assert(cpn->_con == CatchProjNode::catch_all_index, "must be correct index.");


 937     case TypeFunc::I_O:
 938       if (pn->_is_io_use)
 939         projs->catchall_ioproj = pn;
 940       else
 941         projs->fallthrough_ioproj = pn;
 942       for (DUIterator j = pn->outs(); pn->has_out(j); j++) {
 943         Node* e = pn->out(j);
 944         if (e->Opcode() == Op_CreateEx && e->in(0)->is_CatchProj() && e->outcnt() > 0) {
 945           assert(projs->exobj == NULL, "only one");
 946           projs->exobj = e;
 947         }
 948       }
 949       break;
 950     case TypeFunc::Memory:
 951       if (pn->_is_io_use)
 952         projs->catchall_memproj = pn;
 953       else
 954         projs->fallthrough_memproj = pn;
 955       break;
 956     case TypeFunc::Parms:
 957       projs->resproj[0] = pn;
 958       break;
 959     default:
 960       assert(pn->_con <= max_res, "unexpected projection from allocation node.");
 961       projs->resproj[pn->_con-TypeFunc::Parms] = pn;
 962       break;
 963     }
 964   }
 965 
 966   // The resproj may not exist because the result could be ignored
 967   // and the exception object may not exist if an exception handler
 968   // swallows the exception but all the other must exist and be found.
 969   assert(projs->fallthrough_proj      != NULL, "must be found");
 970   do_asserts = do_asserts && !Compile::current()->inlining_incrementally();
 971   assert(!do_asserts || projs->fallthrough_catchproj != NULL, "must be found");
 972   assert(!do_asserts || projs->fallthrough_memproj   != NULL, "must be found");
 973   assert(!do_asserts || projs->fallthrough_ioproj    != NULL, "must be found");
 974   assert(!do_asserts || projs->catchall_catchproj    != NULL, "must be found");
 975   if (separate_io_proj) {
 976     assert(!do_asserts || projs->catchall_memproj    != NULL, "must be found");
 977     assert(!do_asserts || projs->catchall_ioproj     != NULL, "must be found");
 978   }
 979   return projs;
 980 }
 981 
 982 Node *CallNode::Ideal(PhaseGVN *phase, bool can_reshape) {
 983   CallGenerator* cg = generator();
 984   if (can_reshape && cg != NULL && cg->is_mh_late_inline() && !cg->already_attempted()) {
 985     // Check whether this MH handle call becomes a candidate for inlining
 986     ciMethod* callee = cg->method();
 987     vmIntrinsics::ID iid = callee->intrinsic_id();
 988     if (iid == vmIntrinsics::_invokeBasic) {
 989       if (in(TypeFunc::Parms)->Opcode() == Op_ConP) {
 990         phase->C->prepend_late_inline(cg);
 991         set_generator(NULL);
 992       }
 993     } else {
 994       assert(callee->has_member_arg(), "wrong type of call?");
 995       if (in(TypeFunc::Parms + callee->arg_size() - 1)->Opcode() == Op_ConP) {
 996         phase->C->prepend_late_inline(cg);
 997         set_generator(NULL);
 998       }
 999     }


1003 
1004 bool CallNode::is_call_to_arraycopystub() const {
1005   if (_name != NULL && strstr(_name, "arraycopy") != 0) {
1006     return true;
1007   }
1008   return false;
1009 }
1010 
1011 //=============================================================================
1012 uint CallJavaNode::size_of() const { return sizeof(*this); }
1013 uint CallJavaNode::cmp( const Node &n ) const {
1014   CallJavaNode &call = (CallJavaNode&)n;
1015   return CallNode::cmp(call) && _method == call._method &&
1016          _override_symbolic_info == call._override_symbolic_info;
1017 }
1018 #ifdef ASSERT
1019 bool CallJavaNode::validate_symbolic_info() const {
1020   if (method() == NULL) {
1021     return true; // call into runtime or uncommon trap
1022   }
1023   Bytecodes::Code bc = jvms()->method()->java_code_at_bci(_bci);
1024   if (ACmpOnValues == 3 && (bc == Bytecodes::_if_acmpeq || bc == Bytecodes::_if_acmpne)) {
1025     return true;
1026   }
1027   ciMethod* symbolic_info = jvms()->method()->get_method_at_bci(_bci);
1028   ciMethod* callee = method();
1029   if (symbolic_info->is_method_handle_intrinsic() && !callee->is_method_handle_intrinsic()) {
1030     assert(override_symbolic_info(), "should be set");
1031   }
1032   assert(ciMethod::is_consistent_info(symbolic_info, callee), "inconsistent info");
1033   return true;
1034 }
1035 #endif
1036 
1037 #ifndef PRODUCT
1038 void CallJavaNode::dump_spec(outputStream *st) const {
1039   if( _method ) _method->print_short_name(st);
1040   CallNode::dump_spec(st);
1041 }
1042 
1043 void CallJavaNode::dump_compact_spec(outputStream* st) const {
1044   if (_method) {
1045     _method->print_short_name(st);
1046   } else {


1118   CallJavaNode::dump_spec(st);
1119 }
1120 #endif
1121 
1122 //=============================================================================
1123 uint CallRuntimeNode::size_of() const { return sizeof(*this); }
1124 uint CallRuntimeNode::cmp( const Node &n ) const {
1125   CallRuntimeNode &call = (CallRuntimeNode&)n;
1126   return CallNode::cmp(call) && !strcmp(_name,call._name);
1127 }
1128 #ifndef PRODUCT
1129 void CallRuntimeNode::dump_spec(outputStream *st) const {
1130   st->print("# ");
1131   st->print("%s", _name);
1132   CallNode::dump_spec(st);
1133 }
1134 #endif
1135 
1136 //------------------------------calling_convention-----------------------------
1137 void CallRuntimeNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const {
1138   if (_entry_point == NULL) {
1139     // The call to that stub is a special case: its inputs are
1140     // multiple values returned from a call and so it should follow
1141     // the return convention.
1142     SharedRuntime::java_return_convention(sig_bt, parm_regs, argcnt);
1143     return;
1144   }
1145   Matcher::c_calling_convention( sig_bt, parm_regs, argcnt );
1146 }
1147 
1148 //=============================================================================
1149 //------------------------------calling_convention-----------------------------
1150 
1151 
1152 //=============================================================================
1153 #ifndef PRODUCT
1154 void CallLeafNode::dump_spec(outputStream *st) const {
1155   st->print("# ");
1156   st->print("%s", _name);
1157   CallNode::dump_spec(st);
1158 }
1159 #endif
1160 
1161 uint CallLeafNoFPNode::match_edge(uint idx) const {
1162   // Null entry point is a special case for which the target is in a
1163   // register. Need to match that edge.
1164   return entry_point() == NULL && idx == TypeFunc::Parms;
1165 }
1166 
1167 //=============================================================================
1168 
1169 void SafePointNode::set_local(JVMState* jvms, uint idx, Node *c) {
1170   assert(verify_jvms(jvms), "jvms must match");
1171   int loc = jvms->locoff() + idx;
1172   if (in(loc)->is_top() && idx > 0 && !c->is_top() ) {
1173     // If current local idx is top then local idx - 1 could
1174     // be a long/double that needs to be killed since top could
1175     // represent the 2nd half ofthe long/double.
1176     uint ideal = in(loc -1)->ideal_reg();
1177     if (ideal == Op_RegD || ideal == Op_RegL) {
1178       // set other (low index) half to top
1179       set_req(loc - 1, in(loc));
1180     }
1181   }
1182   set_req(loc, c);
1183 }
1184 
1185 uint SafePointNode::size_of() const { return sizeof(*this); }
1186 uint SafePointNode::cmp( const Node &n ) const {


1407   }
1408   SafePointScalarObjectNode* res = (SafePointScalarObjectNode*)Node::clone();
1409   sosn_map->Insert((void*)this, (void*)res);
1410   return res;
1411 }
1412 
1413 
1414 #ifndef PRODUCT
1415 void SafePointScalarObjectNode::dump_spec(outputStream *st) const {
1416   st->print(" # fields@[%d..%d]", first_index(),
1417              first_index() + n_fields() - 1);
1418 }
1419 
1420 #endif
1421 
1422 //=============================================================================
1423 uint AllocateNode::size_of() const { return sizeof(*this); }
1424 
1425 AllocateNode::AllocateNode(Compile* C, const TypeFunc *atype,
1426                            Node *ctrl, Node *mem, Node *abio,
1427                            Node *size, Node *klass_node,
1428                            Node* initial_test,
1429                            ValueTypeBaseNode* value_node)
1430   : CallNode(atype, NULL, TypeRawPtr::BOTTOM)
1431 {
1432   init_class_id(Class_Allocate);
1433   init_flags(Flag_is_macro);
1434   _is_scalar_replaceable = false;
1435   _is_non_escaping = false;
1436   _is_allocation_MemBar_redundant = false;
1437   _larval = false;
1438   Node *topnode = C->top();
1439 
1440   init_req( TypeFunc::Control  , ctrl );
1441   init_req( TypeFunc::I_O      , abio );
1442   init_req( TypeFunc::Memory   , mem );
1443   init_req( TypeFunc::ReturnAdr, topnode );
1444   init_req( TypeFunc::FramePtr , topnode );
1445   init_req( AllocSize          , size);
1446   init_req( KlassNode          , klass_node);
1447   init_req( InitialTest        , initial_test);
1448   init_req( ALength            , topnode);
1449   init_req( ValueNode          , value_node);
1450   // DefaultValue defaults to NULL
1451   // RawDefaultValue defaults to NULL
1452   C->add_macro_node(this);
1453 }
1454 
1455 void AllocateNode::compute_MemBar_redundancy(ciMethod* initializer)
1456 {
1457   assert(initializer != NULL &&
1458          initializer->is_initializer() &&
1459          !initializer->is_static(),
1460              "unexpected initializer method");
1461   BCEscapeAnalyzer* analyzer = initializer->get_bcea();
1462   if (analyzer == NULL) {
1463     return;
1464   }
1465 
1466   // Allocation node is first parameter in its initializer
1467   if (analyzer->is_arg_stack(0) || analyzer->is_arg_local(0)) {
1468     _is_allocation_MemBar_redundant = true;
1469   }
1470 }
1471 
1472 Node* AllocateNode::Ideal(PhaseGVN* phase, bool can_reshape) {
1473   // Check for unused value type allocation
1474   if (can_reshape && in(AllocateNode::ValueNode) != NULL &&
1475       outcnt() != 0 && result_cast() == NULL) {
1476     // Remove allocation by replacing the projection nodes with its inputs
1477     InitializeNode* init = initialization();
1478     PhaseIterGVN* igvn = phase->is_IterGVN();
1479     CallProjections* projs = extract_projections(true, false);
1480     assert(projs->nb_resproj <= 1, "unexpected number of results");
1481     if (projs->fallthrough_catchproj != NULL) {
1482       igvn->replace_node(projs->fallthrough_catchproj, in(TypeFunc::Control));
1483     }
1484     if (projs->fallthrough_memproj != NULL) {
1485       igvn->replace_node(projs->fallthrough_memproj, in(TypeFunc::Memory));
1486     }
1487     if (projs->catchall_memproj != NULL) {
1488       igvn->replace_node(projs->catchall_memproj, phase->C->top());
1489     }
1490     if (projs->fallthrough_ioproj != NULL) {
1491       igvn->replace_node(projs->fallthrough_ioproj, in(TypeFunc::I_O));
1492     }
1493     if (projs->catchall_ioproj != NULL) {
1494       igvn->replace_node(projs->catchall_ioproj, phase->C->top());
1495     }
1496     if (projs->catchall_catchproj != NULL) {
1497       igvn->replace_node(projs->catchall_catchproj, phase->C->top());
1498     }
1499     if (projs->resproj[0] != NULL) {
1500       igvn->replace_node(projs->resproj[0], phase->C->top());
1501     }
1502     igvn->replace_node(this, phase->C->top());
1503     if (init != NULL) {
1504       Node* ctrl_proj = init->proj_out_or_null(TypeFunc::Control);
1505       Node* mem_proj = init->proj_out_or_null(TypeFunc::Memory);
1506       if (ctrl_proj != NULL) {
1507         igvn->replace_node(ctrl_proj, init->in(TypeFunc::Control));
1508       }
1509       if (mem_proj != NULL) {
1510         igvn->replace_node(mem_proj, init->in(TypeFunc::Memory));
1511       }
1512     }
1513     return NULL;
1514   }
1515 
1516   return CallNode::Ideal(phase, can_reshape);
1517 }
1518 
1519 Node *AllocateNode::make_ideal_mark(PhaseGVN *phase, Node* obj, Node* control, Node* mem, Node* klass_node) {
1520   Node* mark_node = NULL;
1521   // For now only enable fast locking for non-array types
1522   if ((EnableValhalla || UseBiasedLocking) && Opcode() == Op_Allocate) {
1523     if (klass_node == NULL) {
1524       Node* k_adr = phase->transform(new AddPNode(obj, obj, phase->MakeConX(oopDesc::klass_offset_in_bytes())));
1525       klass_node = phase->transform(LoadKlassNode::make(*phase, NULL, phase->C->immutable_memory(), k_adr, phase->type(k_adr)->is_ptr()));
1526     }
1527     Node* proto_adr = phase->transform(new AddPNode(klass_node, klass_node, phase->MakeConX(in_bytes(Klass::prototype_header_offset()))));
1528     mark_node = LoadNode::make(*phase, control, mem, proto_adr, TypeRawPtr::BOTTOM, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
1529   } else {
1530     mark_node = phase->MakeConX((intptr_t)markOopDesc::prototype());
1531   }
1532   mark_node = phase->transform(mark_node);
1533   // Avoid returning a constant (old node) here because this method is used by LoadNode::Ideal
1534   return new OrXNode(mark_node, phase->MakeConX(_larval ? markOopDesc::larval_state_pattern : 0));
1535 }
1536 
1537 
1538 //=============================================================================
1539 Node* AllocateArrayNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1540   Node* res = SafePointNode::Ideal(phase, can_reshape);
1541   if (res != NULL) {
1542     return res;
1543   }
1544   // Don't bother trying to transform a dead node
1545   if (in(0) && in(0)->is_top())  return NULL;
1546 
1547   const Type* type = phase->type(Ideal_length());
1548   if (type->isa_int() && type->is_int()->_hi < 0) {
1549     if (can_reshape) {
1550       PhaseIterGVN *igvn = phase->is_IterGVN();
1551       // Unreachable fall through path (negative array length),
1552       // the allocation can only throw so disconnect it.
1553       Node* proj = proj_out_or_null(TypeFunc::Control);
1554       Node* catchproj = NULL;
1555       if (proj != NULL) {
1556         for (DUIterator_Fast imax, i = proj->fast_outs(imax); i < imax; i++) {
1557           Node *cn = proj->fast_out(i);
1558           if (cn->is_Catch()) {
1559             catchproj = cn->as_Multi()->proj_out_or_null(CatchProjNode::fall_through_index);
1560             break;
1561           }
1562         }
1563       }


2198     }
2199     // unrelated
2200     return false;
2201   }
2202 
2203   if (dest_t->isa_aryptr()) {
2204     // arraycopy or array clone
2205     if (t_oop->isa_instptr()) {
2206       return false;
2207     }
2208     if (!t_oop->isa_aryptr()) {
2209       return true;
2210     }
2211 
2212     const Type* elem = dest_t->is_aryptr()->elem();
2213     if (elem == Type::BOTTOM) {
2214       // An array but we don't know what elements are
2215       return true;
2216     }
2217 
2218     dest_t = dest_t->is_aryptr()->with_field_offset(Type::OffsetBot)->add_offset(Type::OffsetBot)->is_oopptr();
2219     t_oop = t_oop->is_aryptr()->with_field_offset(Type::OffsetBot);
2220     uint dest_alias = phase->C->get_alias_index(dest_t);
2221     uint t_oop_alias = phase->C->get_alias_index(t_oop);
2222 
2223     return dest_alias == t_oop_alias;
2224   }
2225 
2226   return true;
2227 }
2228 
< prev index next >