< prev index next >

src/hotspot/share/opto/compile.cpp

Print this page
rev 52443 : 8213489: GC/C2 abstraction for Compile::final_graph_reshaping()
rev 52444 : [mq]: JDK-8213489-1.patch


2775     int alias_idx = get_alias_index(n->as_Mem()->adr_type());
2776     assert( n->in(0) != NULL || alias_idx != Compile::AliasIdxRaw ||
2777             // oop will be recorded in oop map if load crosses safepoint
2778             n->is_Load() && (n->as_Load()->bottom_type()->isa_oopptr() ||
2779                              LoadNode::is_immutable_value(n->in(MemNode::Address))),
2780             "raw memory operations should have control edge");
2781   }
2782   if (n->is_MemBar()) {
2783     MemBarNode* mb = n->as_MemBar();
2784     if (mb->trailing_store() || mb->trailing_load_store()) {
2785       assert(mb->leading_membar()->trailing_membar() == mb, "bad membar pair");
2786       Node* mem = mb->in(MemBarNode::Precedent);
2787       assert((mb->trailing_store() && mem->is_Store() && mem->as_Store()->is_release()) ||
2788              (mb->trailing_load_store() && mem->is_LoadStore()), "missing mem op");
2789     } else if (mb->leading()) {
2790       assert(mb->trailing_membar()->leading_membar() == mb, "bad membar pair");
2791     }
2792   }
2793 #endif
2794   // Count FPU ops and common calls, implements item (3)












2795   switch( nop ) {
2796   // Count all float operations that may use FPU
2797   case Op_AddF:
2798   case Op_SubF:
2799   case Op_MulF:
2800   case Op_DivF:
2801   case Op_NegF:
2802   case Op_ModF:
2803   case Op_ConvI2F:
2804   case Op_ConF:
2805   case Op_CmpF:
2806   case Op_CmpF3:
2807   // case Op_ConvL2F: // longs are split into 32-bit halves
2808     frc.inc_float_count();
2809     break;
2810 
2811   case Op_ConvF2D:
2812   case Op_ConvD2F:
2813     frc.inc_float_count();
2814     frc.inc_double_count();


2921   case Op_GetAndAddL:
2922   case Op_GetAndSetS:
2923   case Op_GetAndSetB:
2924   case Op_GetAndSetI:
2925   case Op_GetAndSetL:
2926   case Op_GetAndSetP:
2927   case Op_GetAndSetN:
2928   case Op_StoreP:
2929   case Op_StoreN:
2930   case Op_StoreNKlass:
2931   case Op_LoadB:
2932   case Op_LoadUB:
2933   case Op_LoadUS:
2934   case Op_LoadI:
2935   case Op_LoadKlass:
2936   case Op_LoadNKlass:
2937   case Op_LoadL:
2938   case Op_LoadL_unaligned:
2939   case Op_LoadPLocked:
2940   case Op_LoadP:
2941 #if INCLUDE_ZGC
2942   case Op_LoadBarrierSlowReg:
2943   case Op_LoadBarrierWeakSlowReg:
2944 #endif
2945   case Op_LoadN:
2946   case Op_LoadRange:
2947   case Op_LoadS: {
2948   handle_mem:
2949 #ifdef ASSERT
2950     if( VerifyOptoOopOffsets ) {
2951       assert( n->is_Mem(), "" );
2952       MemNode *mem  = (MemNode*)n;
2953       // Check to see if address types have grounded out somehow.
2954       const TypeInstPtr *tp = mem->in(MemNode::Address)->bottom_type()->isa_instptr();
2955       assert( !tp || oop_offset_is_sane(tp), "" );
2956     }
2957 #endif
2958     break;
2959   }
2960 
2961   case Op_AddP: {               // Assert sane base pointers
2962     Node *addp = n->in(AddPNode::Address);
2963     assert( !addp->is_AddP() ||
2964             addp->in(AddPNode::Base)->is_top() || // Top OK for allocation
2965             addp->in(AddPNode::Base) == n->in(AddPNode::Base),
2966             "Base pointers must match (addp %u)", addp->_idx );
2967 #ifdef _LP64
2968     if ((UseCompressedOops || UseCompressedClassPointers) &&
2969         addp->Opcode() == Op_ConP &&
2970         addp == n->in(AddPNode::Base) &&
2971         n->in(AddPNode::Offset)->is_Con()) {
2972       // If the transformation of ConP to ConN+DecodeN is beneficial depends


3450         }
3451       }
3452     }
3453     break;
3454   }
3455   case Op_CmpUL: {
3456     if (!Matcher::has_match_rule(Op_CmpUL)) {
3457       // We don't support unsigned long comparisons. Set 'max_idx_expr'
3458       // to max_julong if < 0 to make the signed comparison fail.
3459       ConINode* sign_pos = new ConINode(TypeInt::make(BitsPerLong - 1));
3460       Node* sign_bit_mask = new RShiftLNode(n->in(1), sign_pos);
3461       Node* orl = new OrLNode(n->in(1), sign_bit_mask);
3462       ConLNode* remove_sign_mask = new ConLNode(TypeLong::make(max_jlong));
3463       Node* andl = new AndLNode(orl, remove_sign_mask);
3464       Node* cmp = new CmpLNode(andl, n->in(2));
3465       n->subsume_by(cmp, this);
3466     }
3467     break;
3468   }
3469   default:
3470     assert( !n->is_Call(), "" );
3471     assert( !n->is_Mem(), "" );
3472     assert( nop != Op_ProfileBoolean, "should be eliminated during IGVN");
3473     break;
3474   }
3475 
3476   // Collect CFG split points
3477   if (n->is_MultiBranch() && !n->is_RangeCheck()) {
3478     frc._tests.push(n);
3479   }
3480 }
3481 
3482 //------------------------------final_graph_reshaping_walk---------------------
3483 // Replacing Opaque nodes with their input in final_graph_reshaping_impl(),
3484 // requires that the walk visits a node's inputs before visiting the node.
3485 void Compile::final_graph_reshaping_walk( Node_Stack &nstack, Node *root, Final_Reshape_Counts &frc ) {
3486   ResourceArea *area = Thread::current()->resource_area();
3487   Unique_Node_List sfpt(area);
3488 
3489   frc._visited.set(root->_idx); // first, mark node as visited
3490   uint cnt = root->req();
3491   Node *n = root;
3492   uint  i = 0;
3493   while (true) {
3494     if (i < cnt) {
3495       // Place all non-visited non-null inputs onto stack
3496       Node* m = n->in(i);
3497       ++i;
3498       if (m != NULL && !frc._visited.test_set(m->_idx)) {




2775     int alias_idx = get_alias_index(n->as_Mem()->adr_type());
2776     assert( n->in(0) != NULL || alias_idx != Compile::AliasIdxRaw ||
2777             // oop will be recorded in oop map if load crosses safepoint
2778             n->is_Load() && (n->as_Load()->bottom_type()->isa_oopptr() ||
2779                              LoadNode::is_immutable_value(n->in(MemNode::Address))),
2780             "raw memory operations should have control edge");
2781   }
2782   if (n->is_MemBar()) {
2783     MemBarNode* mb = n->as_MemBar();
2784     if (mb->trailing_store() || mb->trailing_load_store()) {
2785       assert(mb->leading_membar()->trailing_membar() == mb, "bad membar pair");
2786       Node* mem = mb->in(MemBarNode::Precedent);
2787       assert((mb->trailing_store() && mem->is_Store() && mem->as_Store()->is_release()) ||
2788              (mb->trailing_load_store() && mem->is_LoadStore()), "missing mem op");
2789     } else if (mb->leading()) {
2790       assert(mb->trailing_membar()->leading_membar() == mb, "bad membar pair");
2791     }
2792   }
2793 #endif
2794   // Count FPU ops and common calls, implements item (3)
2795   bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->final_graph_reshaping(this, n, nop);
2796   if (!gc_handled) {
2797     final_graph_reshaping_main_switch(n, frc, nop);
2798   }
2799 
2800   // Collect CFG split points
2801   if (n->is_MultiBranch() && !n->is_RangeCheck()) {
2802     frc._tests.push(n);
2803   }
2804 }
2805 
2806 void Compile::final_graph_reshaping_main_switch(Node* n, Final_Reshape_Counts& frc, uint nop) {
2807   switch( nop ) {
2808   // Count all float operations that may use FPU
2809   case Op_AddF:
2810   case Op_SubF:
2811   case Op_MulF:
2812   case Op_DivF:
2813   case Op_NegF:
2814   case Op_ModF:
2815   case Op_ConvI2F:
2816   case Op_ConF:
2817   case Op_CmpF:
2818   case Op_CmpF3:
2819   // case Op_ConvL2F: // longs are split into 32-bit halves
2820     frc.inc_float_count();
2821     break;
2822 
2823   case Op_ConvF2D:
2824   case Op_ConvD2F:
2825     frc.inc_float_count();
2826     frc.inc_double_count();


2933   case Op_GetAndAddL:
2934   case Op_GetAndSetS:
2935   case Op_GetAndSetB:
2936   case Op_GetAndSetI:
2937   case Op_GetAndSetL:
2938   case Op_GetAndSetP:
2939   case Op_GetAndSetN:
2940   case Op_StoreP:
2941   case Op_StoreN:
2942   case Op_StoreNKlass:
2943   case Op_LoadB:
2944   case Op_LoadUB:
2945   case Op_LoadUS:
2946   case Op_LoadI:
2947   case Op_LoadKlass:
2948   case Op_LoadNKlass:
2949   case Op_LoadL:
2950   case Op_LoadL_unaligned:
2951   case Op_LoadPLocked:
2952   case Op_LoadP:




2953   case Op_LoadN:
2954   case Op_LoadRange:
2955   case Op_LoadS: {
2956   handle_mem:
2957 #ifdef ASSERT
2958     if( VerifyOptoOopOffsets ) {
2959       MemNode* mem  = n->as_Mem();

2960       // Check to see if address types have grounded out somehow.
2961       const TypeInstPtr *tp = mem->in(MemNode::Address)->bottom_type()->isa_instptr();
2962       assert( !tp || oop_offset_is_sane(tp), "" );
2963     }
2964 #endif
2965     break;
2966   }
2967 
2968   case Op_AddP: {               // Assert sane base pointers
2969     Node *addp = n->in(AddPNode::Address);
2970     assert( !addp->is_AddP() ||
2971             addp->in(AddPNode::Base)->is_top() || // Top OK for allocation
2972             addp->in(AddPNode::Base) == n->in(AddPNode::Base),
2973             "Base pointers must match (addp %u)", addp->_idx );
2974 #ifdef _LP64
2975     if ((UseCompressedOops || UseCompressedClassPointers) &&
2976         addp->Opcode() == Op_ConP &&
2977         addp == n->in(AddPNode::Base) &&
2978         n->in(AddPNode::Offset)->is_Con()) {
2979       // If the transformation of ConP to ConN+DecodeN is beneficial depends


3457         }
3458       }
3459     }
3460     break;
3461   }
3462   case Op_CmpUL: {
3463     if (!Matcher::has_match_rule(Op_CmpUL)) {
3464       // We don't support unsigned long comparisons. Set 'max_idx_expr'
3465       // to max_julong if < 0 to make the signed comparison fail.
3466       ConINode* sign_pos = new ConINode(TypeInt::make(BitsPerLong - 1));
3467       Node* sign_bit_mask = new RShiftLNode(n->in(1), sign_pos);
3468       Node* orl = new OrLNode(n->in(1), sign_bit_mask);
3469       ConLNode* remove_sign_mask = new ConLNode(TypeLong::make(max_jlong));
3470       Node* andl = new AndLNode(orl, remove_sign_mask);
3471       Node* cmp = new CmpLNode(andl, n->in(2));
3472       n->subsume_by(cmp, this);
3473     }
3474     break;
3475   }
3476   default:
3477     assert(!n->is_Call(), "");
3478     assert(!n->is_Mem(), "");
3479     assert(nop != Op_ProfileBoolean, "should be eliminated during IGVN");
3480     break;





3481   }
3482 }
3483 
3484 //------------------------------final_graph_reshaping_walk---------------------
3485 // Replacing Opaque nodes with their input in final_graph_reshaping_impl(),
3486 // requires that the walk visits a node's inputs before visiting the node.
3487 void Compile::final_graph_reshaping_walk( Node_Stack &nstack, Node *root, Final_Reshape_Counts &frc ) {
3488   ResourceArea *area = Thread::current()->resource_area();
3489   Unique_Node_List sfpt(area);
3490 
3491   frc._visited.set(root->_idx); // first, mark node as visited
3492   uint cnt = root->req();
3493   Node *n = root;
3494   uint  i = 0;
3495   while (true) {
3496     if (i < cnt) {
3497       // Place all non-visited non-null inputs onto stack
3498       Node* m = n->in(i);
3499       ++i;
3500       if (m != NULL && !frc._visited.test_set(m->_idx)) {


< prev index next >