src/share/vm/opto/macro.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File 6934604 Sdiff src/share/vm/opto

src/share/vm/opto/macro.cpp

Print this page




 649           if (use->Opcode() == Op_Return) {
 650             NOT_PRODUCT(fail_eliminate = "Object is return value";)
 651           }else {
 652             NOT_PRODUCT(fail_eliminate = "Object is referenced by node";)
 653           }
 654           DEBUG_ONLY(disq_node = use;)
 655         }
 656         can_eliminate = false;
 657       }
 658     }
 659   }
 660 
 661 #ifndef PRODUCT
 662   if (PrintEliminateAllocations) {
 663     if (can_eliminate) {
 664       tty->print("Scalar ");
 665       if (res == NULL)
 666         alloc->dump();
 667       else
 668         res->dump();
 669     } else {
 670       tty->print("NotScalar (%s)", fail_eliminate);
 671       if (res == NULL)
 672         alloc->dump();
 673       else
 674         res->dump();
 675 #ifdef ASSERT
 676       if (disq_node != NULL) {
 677           tty->print("  >>>> ");
 678           disq_node->dump();
 679       }
 680 #endif /*ASSERT*/
 681     }
 682   }
 683 #endif
 684   return can_eliminate;
 685 }
 686 
 687 // Do scalar replacement.
 688 bool PhaseMacroExpand::scalar_replacement(AllocateNode *alloc, GrowableArray <SafePointNode *>& safepoints) {
 689   GrowableArray <SafePointNode *> safepoints_done;


 828 #endif
 829         return false;
 830       }
 831       if (UseCompressedOops && field_type->isa_narrowoop()) {
 832         // Enable "DecodeN(EncodeP(Allocate)) --> Allocate" transformation
 833         // to be able scalar replace the allocation.
 834         if (field_val->is_EncodeP()) {
 835           field_val = field_val->in(1);
 836         } else {
 837           field_val = transform_later(new (C) DecodeNNode(field_val, field_val->bottom_type()->make_ptr()));
 838         }
 839       }
 840       sfpt->add_req(field_val);
 841     }
 842     JVMState *jvms = sfpt->jvms();
 843     jvms->set_endoff(sfpt->req());
 844     // Now make a pass over the debug information replacing any references
 845     // to the allocated object with "sobj"
 846     int start = jvms->debug_start();
 847     int end   = jvms->debug_end();
 848     for (int i = start; i < end; i++) {
 849       if (sfpt->in(i) == res) {
 850         sfpt->set_req(i, sobj);
 851       }
 852     }
 853     safepoints_done.append_if_missing(sfpt); // keep it for rollback
 854   }
 855   return true;
 856 }
 857 
 858 // Process users of eliminated allocation.
 859 void PhaseMacroExpand::process_users_of_allocation(AllocateNode *alloc) {
 860   Node* res = alloc->result_cast();
 861   if (res != NULL) {
 862     for (DUIterator_Last jmin, j = res->last_outs(jmin); j >= jmin; ) {
 863       Node *use = res->last_out(j);
 864       uint oc1 = res->outcnt();
 865 
 866       if (use->is_AddP()) {
 867         for (DUIterator_Last kmin, k = use->last_outs(kmin); k >= kmin; ) {
 868           Node *n = use->last_out(k);
 869           uint oc2 = use->outcnt();
 870           if (n->is_Store()) {
 871 #ifdef ASSERT
 872             // Verify that there is no dependent MemBarVolatile nodes,
 873             // they should be removed during IGVN, see MemBarNode::Ideal().
 874             for (DUIterator_Fast pmax, p = n->fast_outs(pmax);
 875                                        p < pmax; p++) {
 876               Node* mb = n->fast_out(p);
 877               assert(mb->is_Initialize() || !mb->is_MemBar() ||
 878                      mb->req() <= MemBarNode::Precedent ||
 879                      mb->in(MemBarNode::Precedent) != n,


 882 #endif
 883             _igvn.replace_node(n, n->in(MemNode::Memory));
 884           } else {
 885             eliminate_card_mark(n);
 886           }
 887           k -= (oc2 - use->outcnt());
 888         }
 889       } else {
 890         eliminate_card_mark(use);
 891       }
 892       j -= (oc1 - res->outcnt());
 893     }
 894     assert(res->outcnt() == 0, "all uses of allocated objects must be deleted");
 895     _igvn.remove_dead_node(res);
 896   }
 897 
 898   //
 899   // Process other users of allocation's projections
 900   //
 901   if (_resproj != NULL && _resproj->outcnt() != 0) {











 902     for (DUIterator_Last jmin, j = _resproj->last_outs(jmin); j >= jmin; ) {
 903       Node *use = _resproj->last_out(j);
 904       uint oc1 = _resproj->outcnt();
 905       if (use->is_Initialize()) {
 906         // Eliminate Initialize node.
 907         InitializeNode *init = use->as_Initialize();
 908         assert(init->outcnt() <= 2, "only a control and memory projection expected");
 909         Node *ctrl_proj = init->proj_out(TypeFunc::Control);
 910         if (ctrl_proj != NULL) {
 911            assert(init->in(TypeFunc::Control) == _fallthroughcatchproj, "allocation control projection");
 912           _igvn.replace_node(ctrl_proj, _fallthroughcatchproj);
 913         }
 914         Node *mem_proj = init->proj_out(TypeFunc::Memory);
 915         if (mem_proj != NULL) {
 916           Node *mem = init->in(TypeFunc::Memory);
 917 #ifdef ASSERT
 918           if (mem->is_MergeMem()) {
 919             assert(mem->in(TypeFunc::Memory) == _memproj_fallthrough, "allocation memory projection");
 920           } else {
 921             assert(mem == _memproj_fallthrough, "allocation memory projection");
 922           }
 923 #endif
 924           _igvn.replace_node(mem_proj, mem);
 925         }
 926       } else if (use->is_AddP()) {
 927         // raw memory addresses used only by the initialization
 928         _igvn.replace_node(use, C->top());
 929       } else  {
 930         assert(false, "only Initialize or AddP expected");
 931       }
 932       j -= (oc1 - _resproj->outcnt());
 933     }
 934   }
 935   if (_fallthroughcatchproj != NULL) {
 936     _igvn.replace_node(_fallthroughcatchproj, alloc->in(TypeFunc::Control));
 937   }
 938   if (_memproj_fallthrough != NULL) {
 939     _igvn.replace_node(_memproj_fallthrough, alloc->in(TypeFunc::Memory));
 940   }
 941   if (_memproj_catchall != NULL) {
 942     _igvn.replace_node(_memproj_catchall, C->top());
 943   }
 944   if (_ioproj_fallthrough != NULL) {
 945     _igvn.replace_node(_ioproj_fallthrough, alloc->in(TypeFunc::I_O));
 946   }
 947   if (_ioproj_catchall != NULL) {
 948     _igvn.replace_node(_ioproj_catchall, C->top());
 949   }
 950   if (_catchallcatchproj != NULL) {
 951     _igvn.replace_node(_catchallcatchproj, C->top());
 952   }
 953 }
 954 
 955 bool PhaseMacroExpand::eliminate_allocate_node(AllocateNode *alloc) {
 956 
 957   if (!EliminateAllocations || !alloc->_is_scalar_replaceable) {










 958     return false;
 959   }
 960 
 961   extract_call_projections(alloc);
 962 
 963   GrowableArray <SafePointNode *> safepoints;
 964   if (!can_eliminate_allocation(alloc, safepoints)) {
 965     return false;
 966   }
 967 










 968   if (!scalar_replacement(alloc, safepoints)) {
 969     return false;
 970   }
 971 
 972   CompileLog* log = C->log();
 973   if (log != NULL) {
 974     Node* klass = alloc->in(AllocateNode::KlassNode);
 975     const TypeKlassPtr* tklass = _igvn.type(klass)->is_klassptr();
 976     log->head("eliminate_allocation type='%d'",
 977               log->identify(tklass->klass()));
 978     JVMState* p = alloc->jvms();
 979     while (p != NULL) {
 980       log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method()));
 981       p = p->caller();
 982     }
 983     log->tail("eliminate_allocation");
 984   }
 985 
 986   process_users_of_allocation(alloc);
 987 
 988 #ifndef PRODUCT
 989   if (PrintEliminateAllocations) {
 990     if (alloc->is_AllocateArray())
 991       tty->print_cr("++++ Eliminated: %d AllocateArray", alloc->_idx);
 992     else
 993       tty->print_cr("++++ Eliminated: %d Allocate", alloc->_idx);
 994   }
 995 #endif
 996 
 997   return true;
 998 }
 999 





































1000 
1001 //---------------------------set_eden_pointers-------------------------
1002 void PhaseMacroExpand::set_eden_pointers(Node* &eden_top_adr, Node* &eden_end_adr) {
1003   if (UseTLAB) {                // Private allocation: load from TLS
1004     Node* thread = transform_later(new (C) ThreadLocalNode());
1005     int tlab_top_offset = in_bytes(JavaThread::tlab_top_offset());
1006     int tlab_end_offset = in_bytes(JavaThread::tlab_end_offset());
1007     eden_top_adr = basic_plus_adr(top()/*not oop*/, thread, tlab_top_offset);
1008     eden_end_adr = basic_plus_adr(top()/*not oop*/, thread, tlab_end_offset);
1009   } else {                      // Shared allocation: load from globals
1010     CollectedHeap* ch = Universe::heap();
1011     address top_adr = (address)ch->top_addr();
1012     address end_adr = (address)ch->end_addr();
1013     eden_top_adr = makecon(TypeRawPtr::make(top_adr));
1014     eden_end_adr = basic_plus_adr(eden_top_adr, end_adr - top_adr);
1015   }
1016 }
1017 
1018 
1019 Node* PhaseMacroExpand::make_load(Node* ctl, Node* mem, Node* base, int offset, const Type* value_type, BasicType bt) {


2367       if (n->is_AbstractLock()) {
2368         success = eliminate_locking_node(n->as_AbstractLock());
2369       }
2370       assert(success == (C->macro_count() < old_macro_count), "elimination reduces macro count");
2371       progress = progress || success;
2372     }
2373   }
2374   // Next, attempt to eliminate allocations
2375   progress = true;
2376   while (progress) {
2377     progress = false;
2378     for (int i = C->macro_count(); i > 0; i--) {
2379       Node * n = C->macro_node(i-1);
2380       bool success = false;
2381       debug_only(int old_macro_count = C->macro_count(););
2382       switch (n->class_id()) {
2383       case Node::Class_Allocate:
2384       case Node::Class_AllocateArray:
2385         success = eliminate_allocate_node(n->as_Allocate());
2386         break;



2387       case Node::Class_Lock:
2388       case Node::Class_Unlock:
2389         assert(!n->as_AbstractLock()->is_eliminated(), "sanity");
2390         break;
2391       default:
2392         assert(n->Opcode() == Op_LoopLimit ||
2393                n->Opcode() == Op_Opaque1   ||
2394                n->Opcode() == Op_Opaque2, "unknown node type in macro list");
2395       }
2396       assert(success == (C->macro_count() < old_macro_count), "elimination reduces macro count");
2397       progress = progress || success;
2398     }
2399   }
2400 }
2401 
2402 //------------------------------expand_macro_nodes----------------------
2403 //  Returns true if a failure occurred.
2404 bool PhaseMacroExpand::expand_macro_nodes() {
2405   // Last attempt to eliminate macro nodes.
2406   eliminate_macro_nodes();
2407 
2408   // Make sure expansion will not cause node limit to be exceeded.
2409   // Worst case is a macro node gets expanded into about 50 nodes.
2410   // Allow 50% more for optimization.
2411   if (C->check_node_count(C->macro_count() * 75, "out of nodes before macro expansion" ) )
2412     return true;
2413 
2414   // Eliminate Opaque and LoopLimit nodes. Do it after all loop optimizations.
2415   bool progress = true;
2416   while (progress) {
2417     progress = false;
2418     for (int i = C->macro_count(); i > 0; i--) {
2419       Node * n = C->macro_node(i-1);
2420       bool success = false;
2421       debug_only(int old_macro_count = C->macro_count(););
2422       if (n->Opcode() == Op_LoopLimit) {
2423         // Remove it from macro list and put on IGVN worklist to optimize.
2424         C->remove_macro_node(n);
2425         _igvn._worklist.push(n);
2426         success = true;





2427       } else if (n->Opcode() == Op_Opaque1 || n->Opcode() == Op_Opaque2) {
2428         _igvn.replace_node(n, n->in(1));
2429         success = true;
2430       }
2431       assert(success == (C->macro_count() < old_macro_count), "elimination reduces macro count");
2432       progress = progress || success;
2433     }
2434   }
2435 
2436   // expand "macro" nodes
2437   // nodes are removed from the macro list as they are processed
2438   while (C->macro_count() > 0) {
2439     int macro_count = C->macro_count();
2440     Node * n = C->macro_node(macro_count-1);
2441     assert(n->is_macro(), "only macro nodes expected here");
2442     if (_igvn.type(n) == Type::TOP || n->in(0)->is_top() ) {
2443       // node is unreachable, so don't try to expand it
2444       C->remove_macro_node(n);
2445       continue;
2446     }




 649           if (use->Opcode() == Op_Return) {
 650             NOT_PRODUCT(fail_eliminate = "Object is return value";)
 651           }else {
 652             NOT_PRODUCT(fail_eliminate = "Object is referenced by node";)
 653           }
 654           DEBUG_ONLY(disq_node = use;)
 655         }
 656         can_eliminate = false;
 657       }
 658     }
 659   }
 660 
 661 #ifndef PRODUCT
 662   if (PrintEliminateAllocations) {
 663     if (can_eliminate) {
 664       tty->print("Scalar ");
 665       if (res == NULL)
 666         alloc->dump();
 667       else
 668         res->dump();
 669     } else if (alloc->_is_scalar_replaceable) {
 670       tty->print("NotScalar (%s)", fail_eliminate);
 671       if (res == NULL)
 672         alloc->dump();
 673       else
 674         res->dump();
 675 #ifdef ASSERT
 676       if (disq_node != NULL) {
 677           tty->print("  >>>> ");
 678           disq_node->dump();
 679       }
 680 #endif /*ASSERT*/
 681     }
 682   }
 683 #endif
 684   return can_eliminate;
 685 }
 686 
 687 // Do scalar replacement.
 688 bool PhaseMacroExpand::scalar_replacement(AllocateNode *alloc, GrowableArray <SafePointNode *>& safepoints) {
 689   GrowableArray <SafePointNode *> safepoints_done;


 828 #endif
 829         return false;
 830       }
 831       if (UseCompressedOops && field_type->isa_narrowoop()) {
 832         // Enable "DecodeN(EncodeP(Allocate)) --> Allocate" transformation
 833         // to be able scalar replace the allocation.
 834         if (field_val->is_EncodeP()) {
 835           field_val = field_val->in(1);
 836         } else {
 837           field_val = transform_later(new (C) DecodeNNode(field_val, field_val->bottom_type()->make_ptr()));
 838         }
 839       }
 840       sfpt->add_req(field_val);
 841     }
 842     JVMState *jvms = sfpt->jvms();
 843     jvms->set_endoff(sfpt->req());
 844     // Now make a pass over the debug information replacing any references
 845     // to the allocated object with "sobj"
 846     int start = jvms->debug_start();
 847     int end   = jvms->debug_end();
 848     sfpt->replace_edges_in_range(res, sobj, start, end);




 849     safepoints_done.append_if_missing(sfpt); // keep it for rollback
 850   }
 851   return true;
 852 }
 853 
 854 // Process users of eliminated allocation.
 855 void PhaseMacroExpand::process_users_of_allocation(CallNode *alloc) {
 856   Node* res = alloc->result_cast();
 857   if (res != NULL) {
 858     for (DUIterator_Last jmin, j = res->last_outs(jmin); j >= jmin; ) {
 859       Node *use = res->last_out(j);
 860       uint oc1 = res->outcnt();
 861 
 862       if (use->is_AddP()) {
 863         for (DUIterator_Last kmin, k = use->last_outs(kmin); k >= kmin; ) {
 864           Node *n = use->last_out(k);
 865           uint oc2 = use->outcnt();
 866           if (n->is_Store()) {
 867 #ifdef ASSERT
 868             // Verify that there is no dependent MemBarVolatile nodes,
 869             // they should be removed during IGVN, see MemBarNode::Ideal().
 870             for (DUIterator_Fast pmax, p = n->fast_outs(pmax);
 871                                        p < pmax; p++) {
 872               Node* mb = n->fast_out(p);
 873               assert(mb->is_Initialize() || !mb->is_MemBar() ||
 874                      mb->req() <= MemBarNode::Precedent ||
 875                      mb->in(MemBarNode::Precedent) != n,


 878 #endif
 879             _igvn.replace_node(n, n->in(MemNode::Memory));
 880           } else {
 881             eliminate_card_mark(n);
 882           }
 883           k -= (oc2 - use->outcnt());
 884         }
 885       } else {
 886         eliminate_card_mark(use);
 887       }
 888       j -= (oc1 - res->outcnt());
 889     }
 890     assert(res->outcnt() == 0, "all uses of allocated objects must be deleted");
 891     _igvn.remove_dead_node(res);
 892   }
 893 
 894   //
 895   // Process other users of allocation's projections
 896   //
 897   if (_resproj != NULL && _resproj->outcnt() != 0) {
 898     // First disconnect stores captured by Initialize node.
 899     // If Initialize node is eliminated first in the following code,
 900     // it will kill such stores and DUIterator_Last will assert.
 901     for (DUIterator_Fast jmax, j = _resproj->fast_outs(jmax);  j < jmax; j++) {
 902       Node *use = _resproj->fast_out(j);
 903       if (use->is_AddP()) {
 904         // raw memory addresses used only by the initialization
 905         _igvn.replace_node(use, C->top());
 906         --j; --jmax;
 907       }
 908     }
 909     for (DUIterator_Last jmin, j = _resproj->last_outs(jmin); j >= jmin; ) {
 910       Node *use = _resproj->last_out(j);
 911       uint oc1 = _resproj->outcnt();
 912       if (use->is_Initialize()) {
 913         // Eliminate Initialize node.
 914         InitializeNode *init = use->as_Initialize();
 915         assert(init->outcnt() <= 2, "only a control and memory projection expected");
 916         Node *ctrl_proj = init->proj_out(TypeFunc::Control);
 917         if (ctrl_proj != NULL) {
 918            assert(init->in(TypeFunc::Control) == _fallthroughcatchproj, "allocation control projection");
 919           _igvn.replace_node(ctrl_proj, _fallthroughcatchproj);
 920         }
 921         Node *mem_proj = init->proj_out(TypeFunc::Memory);
 922         if (mem_proj != NULL) {
 923           Node *mem = init->in(TypeFunc::Memory);
 924 #ifdef ASSERT
 925           if (mem->is_MergeMem()) {
 926             assert(mem->in(TypeFunc::Memory) == _memproj_fallthrough, "allocation memory projection");
 927           } else {
 928             assert(mem == _memproj_fallthrough, "allocation memory projection");
 929           }
 930 #endif
 931           _igvn.replace_node(mem_proj, mem);
 932         }



 933       } else  {
 934         assert(false, "only Initialize or AddP expected");
 935       }
 936       j -= (oc1 - _resproj->outcnt());
 937     }
 938   }
 939   if (_fallthroughcatchproj != NULL) {
 940     _igvn.replace_node(_fallthroughcatchproj, alloc->in(TypeFunc::Control));
 941   }
 942   if (_memproj_fallthrough != NULL) {
 943     _igvn.replace_node(_memproj_fallthrough, alloc->in(TypeFunc::Memory));
 944   }
 945   if (_memproj_catchall != NULL) {
 946     _igvn.replace_node(_memproj_catchall, C->top());
 947   }
 948   if (_ioproj_fallthrough != NULL) {
 949     _igvn.replace_node(_ioproj_fallthrough, alloc->in(TypeFunc::I_O));
 950   }
 951   if (_ioproj_catchall != NULL) {
 952     _igvn.replace_node(_ioproj_catchall, C->top());
 953   }
 954   if (_catchallcatchproj != NULL) {
 955     _igvn.replace_node(_catchallcatchproj, C->top());
 956   }
 957 }
 958 
 959 bool PhaseMacroExpand::eliminate_allocate_node(AllocateNode *alloc) {
 960   if (!EliminateAllocations || !alloc->_is_non_escaping) {
 961     return false;
 962   }
 963   Node* klass = alloc->in(AllocateNode::KlassNode);
 964   const TypeKlassPtr* tklass = _igvn.type(klass)->is_klassptr();
 965   Node* res = alloc->result_cast();
 966   // Eliminate boxing allocations which are not used
 967   // regardless scalar replacable status.
 968   bool boxing_alloc = C->eliminate_boxing() &&
 969                       tklass->klass()->is_instance_klass()  &&
 970                       tklass->klass()->as_instance_klass()->is_box_klass();
 971   if (!alloc->_is_scalar_replaceable && (!boxing_alloc || (res != NULL))) {
 972     return false;
 973   }
 974 
 975   extract_call_projections(alloc);
 976 
 977   GrowableArray <SafePointNode *> safepoints;
 978   if (!can_eliminate_allocation(alloc, safepoints)) {
 979     return false;
 980   }
 981 
 982   if (!alloc->_is_scalar_replaceable) {
 983     assert(res == NULL, "sanity");
 984     // We can only eliminate allocation if all debug info references
 985     // are already replaced with SafePointScalarObject because
 986     // we can't search for a fields value without instance_id.
 987     if (safepoints.length() > 0) {
 988       return false;
 989     }
 990   }
 991 
 992   if (!scalar_replacement(alloc, safepoints)) {
 993     return false;
 994   }
 995 
 996   CompileLog* log = C->log();
 997   if (log != NULL) {


 998     log->head("eliminate_allocation type='%d'",
 999               log->identify(tklass->klass()));
1000     JVMState* p = alloc->jvms();
1001     while (p != NULL) {
1002       log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method()));
1003       p = p->caller();
1004     }
1005     log->tail("eliminate_allocation");
1006   }
1007 
1008   process_users_of_allocation(alloc);
1009 
1010 #ifndef PRODUCT
1011   if (PrintEliminateAllocations) {
1012     if (alloc->is_AllocateArray())
1013       tty->print_cr("++++ Eliminated: %d AllocateArray", alloc->_idx);
1014     else
1015       tty->print_cr("++++ Eliminated: %d Allocate", alloc->_idx);
1016   }
1017 #endif
1018 
1019   return true;
1020 }
1021 
1022 bool PhaseMacroExpand::eliminate_boxing_node(CallStaticJavaNode *boxing) {
1023   // EA should remove all uses of non-escaping boxing node.
1024   if (!C->eliminate_boxing() || boxing->proj_out(TypeFunc::Parms) != NULL) {
1025     return false;
1026   }
1027 
1028   extract_call_projections(boxing);
1029 
1030   const TypeTuple* r = boxing->tf()->range();
1031   assert(r->cnt() > TypeFunc::Parms, "sanity");
1032   const TypeInstPtr* t = r->field_at(TypeFunc::Parms)->isa_instptr();
1033   assert(t != NULL, "sanity");
1034 
1035   CompileLog* log = C->log();
1036   if (log != NULL) {
1037     log->head("eliminate_boxing type='%d'",
1038               log->identify(t->klass()));
1039     JVMState* p = boxing->jvms();
1040     while (p != NULL) {
1041       log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method()));
1042       p = p->caller();
1043     }
1044     log->tail("eliminate_boxing");
1045   }
1046 
1047   process_users_of_allocation(boxing);
1048 
1049 #ifndef PRODUCT
1050   if (PrintEliminateAllocations) {
1051     tty->print("++++ Eliminated: %d ", boxing->_idx);
1052     boxing->method()->print_short_name(tty);
1053     tty->cr();
1054   }
1055 #endif
1056 
1057   return true;
1058 }
1059 
1060 //---------------------------set_eden_pointers-------------------------
1061 void PhaseMacroExpand::set_eden_pointers(Node* &eden_top_adr, Node* &eden_end_adr) {
1062   if (UseTLAB) {                // Private allocation: load from TLS
1063     Node* thread = transform_later(new (C) ThreadLocalNode());
1064     int tlab_top_offset = in_bytes(JavaThread::tlab_top_offset());
1065     int tlab_end_offset = in_bytes(JavaThread::tlab_end_offset());
1066     eden_top_adr = basic_plus_adr(top()/*not oop*/, thread, tlab_top_offset);
1067     eden_end_adr = basic_plus_adr(top()/*not oop*/, thread, tlab_end_offset);
1068   } else {                      // Shared allocation: load from globals
1069     CollectedHeap* ch = Universe::heap();
1070     address top_adr = (address)ch->top_addr();
1071     address end_adr = (address)ch->end_addr();
1072     eden_top_adr = makecon(TypeRawPtr::make(top_adr));
1073     eden_end_adr = basic_plus_adr(eden_top_adr, end_adr - top_adr);
1074   }
1075 }
1076 
1077 
1078 Node* PhaseMacroExpand::make_load(Node* ctl, Node* mem, Node* base, int offset, const Type* value_type, BasicType bt) {


2426       if (n->is_AbstractLock()) {
2427         success = eliminate_locking_node(n->as_AbstractLock());
2428       }
2429       assert(success == (C->macro_count() < old_macro_count), "elimination reduces macro count");
2430       progress = progress || success;
2431     }
2432   }
2433   // Next, attempt to eliminate allocations
2434   progress = true;
2435   while (progress) {
2436     progress = false;
2437     for (int i = C->macro_count(); i > 0; i--) {
2438       Node * n = C->macro_node(i-1);
2439       bool success = false;
2440       debug_only(int old_macro_count = C->macro_count(););
2441       switch (n->class_id()) {
2442       case Node::Class_Allocate:
2443       case Node::Class_AllocateArray:
2444         success = eliminate_allocate_node(n->as_Allocate());
2445         break;
2446       case Node::Class_CallStaticJava:
2447         success = eliminate_boxing_node(n->as_CallStaticJava());
2448         break;
2449       case Node::Class_Lock:
2450       case Node::Class_Unlock:
2451         assert(!n->as_AbstractLock()->is_eliminated(), "sanity");
2452         break;
2453       default:
2454         assert(n->Opcode() == Op_LoopLimit ||
2455                n->Opcode() == Op_Opaque1   ||
2456                n->Opcode() == Op_Opaque2, "unknown node type in macro list");
2457       }
2458       assert(success == (C->macro_count() < old_macro_count), "elimination reduces macro count");
2459       progress = progress || success;
2460     }
2461   }
2462 }
2463 
2464 //------------------------------expand_macro_nodes----------------------
2465 //  Returns true if a failure occurred.
2466 bool PhaseMacroExpand::expand_macro_nodes() {
2467   // Last attempt to eliminate macro nodes.
2468   eliminate_macro_nodes();
2469 
2470   // Make sure expansion will not cause node limit to be exceeded.
2471   // Worst case is a macro node gets expanded into about 50 nodes.
2472   // Allow 50% more for optimization.
2473   if (C->check_node_count(C->macro_count() * 75, "out of nodes before macro expansion" ) )
2474     return true;
2475 
2476   // Eliminate Opaque and LoopLimit nodes. Do it after all loop optimizations.
2477   bool progress = true;
2478   while (progress) {
2479     progress = false;
2480     for (int i = C->macro_count(); i > 0; i--) {
2481       Node * n = C->macro_node(i-1);
2482       bool success = false;
2483       debug_only(int old_macro_count = C->macro_count(););
2484       if (n->Opcode() == Op_LoopLimit) {
2485         // Remove it from macro list and put on IGVN worklist to optimize.
2486         C->remove_macro_node(n);
2487         _igvn._worklist.push(n);
2488         success = true;
2489       } else if (n->Opcode() == Op_CallStaticJava) {
2490         // Remove it from macro list and put on IGVN worklist to optimize.
2491         C->remove_macro_node(n);
2492         _igvn._worklist.push(n);
2493         success = true;
2494       } else if (n->Opcode() == Op_Opaque1 || n->Opcode() == Op_Opaque2) {
2495         _igvn.replace_node(n, n->in(1));
2496         success = true;
2497       }
2498       assert(success == (C->macro_count() < old_macro_count), "elimination reduces macro count");
2499       progress = progress || success;
2500     }
2501   }
2502 
2503   // expand "macro" nodes
2504   // nodes are removed from the macro list as they are processed
2505   while (C->macro_count() > 0) {
2506     int macro_count = C->macro_count();
2507     Node * n = C->macro_node(macro_count-1);
2508     assert(n->is_macro(), "only macro nodes expected here");
2509     if (_igvn.type(n) == Type::TOP || n->in(0)->is_top() ) {
2510       // node is unreachable, so don't try to expand it
2511       C->remove_macro_node(n);
2512       continue;
2513     }


src/share/vm/opto/macro.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File