< prev index next >

src/share/vm/opto/memnode.cpp

Print this page




2534 const Type *StoreNode::Value( PhaseTransform *phase ) const {
2535   // Either input is TOP ==> the result is TOP
2536   const Type *t1 = phase->type( in(MemNode::Memory) );
2537   if( t1 == Type::TOP ) return Type::TOP;
2538   const Type *t2 = phase->type( in(MemNode::Address) );
2539   if( t2 == Type::TOP ) return Type::TOP;
2540   const Type *t3 = phase->type( in(MemNode::ValueIn) );
2541   if( t3 == Type::TOP ) return Type::TOP;
2542   return Type::MEMORY;
2543 }
2544 
2545 //------------------------------Identity---------------------------------------
2546 // Remove redundant stores:
2547 //   Store(m, p, Load(m, p)) changes to m.
2548 //   Store(, p, x) -> Store(m, p, x) changes to Store(m, p, x).
2549 Node *StoreNode::Identity( PhaseTransform *phase ) {
2550   Node* mem = in(MemNode::Memory);
2551   Node* adr = in(MemNode::Address);
2552   Node* val = in(MemNode::ValueIn);
2553 


2554   // Load then Store?  Then the Store is useless
2555   if (val->is_Load() &&
2556       val->in(MemNode::Address)->eqv_uncast(adr) &&
2557       val->in(MemNode::Memory )->eqv_uncast(mem) &&
2558       val->as_Load()->store_Opcode() == Opcode()) {
2559     return mem;
2560   }
2561 
2562   // Two stores in a row of the same value?
2563   if (mem->is_Store() &&
2564       mem->in(MemNode::Address)->eqv_uncast(adr) &&
2565       mem->in(MemNode::ValueIn)->eqv_uncast(val) &&
2566       mem->Opcode() == Opcode()) {
2567     return mem;
2568   }
2569 
2570   // Store of zero anywhere into a freshly-allocated object?
2571   // Then the store is useless.
2572   // (It must already have been captured by the InitializeNode.)
2573   if (ReduceFieldZeroing && phase->type(val)->is_zero_type()) {

2574     // a newly allocated object is already all-zeroes everywhere
2575     if (mem->is_Proj() && mem->in(0)->is_Allocate()) {
2576       return mem;
2577     }
2578 

2579     // the store may also apply to zero-bits in an earlier object
2580     Node* prev_mem = find_previous_store(phase);
2581     // Steps (a), (b):  Walk past independent stores to find an exact match.
2582     if (prev_mem != NULL) {
2583       Node* prev_val = can_see_stored_value(prev_mem, phase);
2584       if (prev_val != NULL && phase->eqv(prev_val, val)) {
2585         // prev_val and val might differ by a cast; it would be good
2586         // to keep the more informative of the two.
2587         return mem;

2588       }
2589     }
2590   }
2591 
2592   return this;












2593 }
2594 
2595 //------------------------------match_edge-------------------------------------
2596 // Do we Match on this edge index or not?  Match only memory & value
2597 uint StoreNode::match_edge(uint idx) const {
2598   return idx == MemNode::Address || idx == MemNode::ValueIn;
2599 }
2600 
2601 //------------------------------cmp--------------------------------------------
2602 // Do not common stores up together.  They generally have to be split
2603 // back up anyways, so do not bother.
2604 uint StoreNode::cmp( const Node &n ) const {
2605   return (&n == this);          // Always fail except on self
2606 }
2607 
2608 //------------------------------Ideal_masked_input-----------------------------
2609 // Check for a useless mask before a partial-word store
2610 // (StoreB ... (AndI valIn conIa) )
2611 // If (conIa & mask == mask) this simplifies to
2612 // (StoreB ... (valIn) )


2651 // For simplicity, we actually check if there are any loads from the
2652 // address stored to, not just for loads of the value stored by this node.
2653 //
2654 bool StoreNode::value_never_loaded( PhaseTransform *phase) const {
2655   Node *adr = in(Address);
2656   const TypeOopPtr *adr_oop = phase->type(adr)->isa_oopptr();
2657   if (adr_oop == NULL)
2658     return false;
2659   if (!adr_oop->is_known_instance_field())
2660     return false; // if not a distinct instance, there may be aliases of the address
2661   for (DUIterator_Fast imax, i = adr->fast_outs(imax); i < imax; i++) {
2662     Node *use = adr->fast_out(i);
2663     int opc = use->Opcode();
2664     if (use->is_Load() || use->is_LoadStore()) {
2665       return false;
2666     }
2667   }
2668   return true;
2669 }
2670 


























2671 //=============================================================================
2672 //------------------------------Ideal------------------------------------------
2673 // If the store is from an AND mask that leaves the low bits untouched, then
2674 // we can skip the AND operation.  If the store is from a sign-extension
2675 // (a left shift, then right shift) we can skip both.
2676 Node *StoreBNode::Ideal(PhaseGVN *phase, bool can_reshape){
2677   Node *progress = StoreNode::Ideal_masked_input(phase, 0xFF);
2678   if( progress != NULL ) return progress;
2679 
2680   progress = StoreNode::Ideal_sign_extended_input(phase, 24);
2681   if( progress != NULL ) return progress;
2682 
2683   // Finally check the default case
2684   return StoreNode::Ideal(phase, can_reshape);
2685 }
2686 
2687 //=============================================================================
2688 //------------------------------Ideal------------------------------------------
2689 // If the store is from an AND mask that leaves the low bits untouched, then
2690 // we can skip the AND operation


2763   init_req(MemNode::Control, c  );
2764   init_req(MemNode::Memory , mem);
2765   init_req(MemNode::Address, adr);
2766   init_req(MemNode::ValueIn, val);
2767   init_class_id(Class_LoadStore);
2768 }
2769 
2770 uint LoadStoreNode::ideal_reg() const {
2771   return _type->ideal_reg();
2772 }
2773 
2774 bool LoadStoreNode::result_not_used() const {
2775   for( DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++ ) {
2776     Node *x = fast_out(i);
2777     if (x->Opcode() == Op_SCMemProj) continue;
2778     return false;
2779   }
2780   return true;
2781 }
2782 
























2783 uint LoadStoreNode::size_of() const { return sizeof(*this); }
2784 
2785 //=============================================================================
2786 //----------------------------------LoadStoreConditionalNode--------------------
2787 LoadStoreConditionalNode::LoadStoreConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex ) : LoadStoreNode(c, mem, adr, val, NULL, TypeInt::BOOL, 5) {
2788   init_req(ExpectedIn, ex );
2789 }
2790 
2791 //=============================================================================
2792 //-------------------------------adr_type--------------------------------------
2793 // Do we Match on this edge index or not?  Do not match memory
2794 const TypePtr* ClearArrayNode::adr_type() const {
2795   Node *adr = in(3);
2796   return MemNode::calculate_adr_type(adr->bottom_type());
2797 }
2798 
2799 //------------------------------match_edge-------------------------------------
2800 // Do we Match on this edge index or not?  Do not match memory
2801 uint ClearArrayNode::match_edge(uint idx) const {
2802   return idx > 1;


2997 uint EncodeISOArrayNode::match_edge(uint idx) const {
2998   return idx == 2 || idx == 3; // EncodeISOArray src (Binary dst len)
2999 }
3000 
3001 //------------------------------Ideal------------------------------------------
3002 // Return a node which is more "ideal" than the current node.  Strip out
3003 // control copies
3004 Node *EncodeISOArrayNode::Ideal(PhaseGVN *phase, bool can_reshape) {
3005   return remove_dead_region(phase, can_reshape) ? this : NULL;
3006 }
3007 
3008 //------------------------------Value------------------------------------------
3009 const Type *EncodeISOArrayNode::Value(PhaseTransform *phase) const {
3010   if (in(0) && phase->type(in(0)) == Type::TOP) return Type::TOP;
3011   return bottom_type();
3012 }
3013 
3014 //=============================================================================
3015 MemBarNode::MemBarNode(Compile* C, int alias_idx, Node* precedent)
3016   : MultiNode(TypeFunc::Parms + (precedent == NULL? 0: 1)),
3017     _adr_type(C->get_adr_type(alias_idx))



3018 {
3019   init_class_id(Class_MemBar);
3020   Node* top = C->top();
3021   init_req(TypeFunc::I_O,top);
3022   init_req(TypeFunc::FramePtr,top);
3023   init_req(TypeFunc::ReturnAdr,top);
3024   if (precedent != NULL)
3025     init_req(TypeFunc::Parms, precedent);
3026 }
3027 
3028 //------------------------------cmp--------------------------------------------
3029 uint MemBarNode::hash() const { return NO_HASH; }
3030 uint MemBarNode::cmp( const Node &n ) const {
3031   return (&n == this);          // Always fail except on self
3032 }
3033 
3034 //------------------------------make-------------------------------------------
3035 MemBarNode* MemBarNode::make(Compile* C, int opcode, int atp, Node* pn) {
3036   switch (opcode) {
3037   case Op_MemBarAcquire:     return new(C) MemBarAcquireNode(C, atp, pn);
3038   case Op_LoadFence:         return new(C) LoadFenceNode(C, atp, pn);
3039   case Op_MemBarRelease:     return new(C) MemBarReleaseNode(C, atp, pn);
3040   case Op_StoreFence:        return new(C) StoreFenceNode(C, atp, pn);
3041   case Op_MemBarAcquireLock: return new(C) MemBarAcquireLockNode(C, atp, pn);
3042   case Op_MemBarReleaseLock: return new(C) MemBarReleaseLockNode(C, atp, pn);
3043   case Op_MemBarVolatile:    return new(C) MemBarVolatileNode(C, atp, pn);
3044   case Op_MemBarCPUOrder:    return new(C) MemBarCPUOrderNode(C, atp, pn);
3045   case Op_Initialize:        return new(C) InitializeNode(C, atp, pn);
3046   case Op_MemBarStoreStore:  return new(C) MemBarStoreStoreNode(C, atp, pn);
3047   default: ShouldNotReachHere(); return NULL;
3048   }
3049 }
3050 















3051 //------------------------------Ideal------------------------------------------
3052 // Return a node which is more "ideal" than the current node.  Strip out
3053 // control copies
3054 Node *MemBarNode::Ideal(PhaseGVN *phase, bool can_reshape) {
3055   if (remove_dead_region(phase, can_reshape)) return this;
3056   // Don't bother trying to transform a dead node
3057   if (in(0) && in(0)->is_top()) {
3058     return NULL;
3059   }
3060 
3061   // Eliminate volatile MemBars for scalar replaced objects.
3062   if (can_reshape && req() == (Precedent+1)) {
3063     bool eliminate = false;
3064     int opc = Opcode();
3065     if ((opc == Op_MemBarAcquire || opc == Op_MemBarVolatile)) {
3066       // Volatile field loads and stores.
3067       Node* my_mem = in(MemBarNode::Precedent);
3068       // The MembarAquire may keep an unused LoadNode alive through the Precedent edge
3069       if ((my_mem != NULL) && (opc == Op_MemBarAcquire) && (my_mem->outcnt() == 1)) {
3070         // if the Precedent is a decodeN and its input (a Load) is used at more than one place,


3077         } else {
3078           assert(my_mem->unique_out() == this, "sanity");
3079           del_req(Precedent);
3080           phase->is_IterGVN()->_worklist.push(my_mem); // remove dead node later
3081           my_mem = NULL;
3082         }
3083       }
3084       if (my_mem != NULL && my_mem->is_Mem()) {
3085         const TypeOopPtr* t_oop = my_mem->in(MemNode::Address)->bottom_type()->isa_oopptr();
3086         // Check for scalar replaced object reference.
3087         if( t_oop != NULL && t_oop->is_known_instance_field() &&
3088             t_oop->offset() != Type::OffsetBot &&
3089             t_oop->offset() != Type::OffsetTop) {
3090           eliminate = true;
3091         }
3092       }
3093     } else if (opc == Op_MemBarRelease) {
3094       // Final field stores.
3095       Node* alloc = AllocateNode::Ideal_allocation(in(MemBarNode::Precedent), phase);
3096       if ((alloc != NULL) && alloc->is_Allocate() &&
3097           alloc->as_Allocate()->_is_non_escaping) {


3098         // The allocated object does not escape.
3099         eliminate = true;
3100       }
3101     }
3102     if (eliminate) {
3103       // Replace MemBar projections by its inputs.
3104       PhaseIterGVN* igvn = phase->is_IterGVN();
3105       igvn->replace_node(proj_out(TypeFunc::Memory), in(TypeFunc::Memory));
3106       igvn->replace_node(proj_out(TypeFunc::Control), in(TypeFunc::Control));
3107       // Must return either the original node (now dead) or a new node
3108       // (Do not return a top here, since that would break the uniqueness of top.)
3109       return new (phase->C) ConINode(TypeInt::ZERO);
3110     }
3111   }
3112   return NULL;
3113 }
3114 
3115 //------------------------------Value------------------------------------------
3116 const Type *MemBarNode::Value( PhaseTransform *phase ) const {
3117   if( !in(0) ) return Type::TOP;
3118   if( phase->type(in(0)) == Type::TOP )
3119     return Type::TOP;
3120   return TypeTuple::MEMBAR;
3121 }
3122 
3123 //------------------------------match------------------------------------------
3124 // Construct projections for memory.
3125 Node *MemBarNode::match( const ProjNode *proj, const Matcher *m ) {
3126   switch (proj->_con) {
3127   case TypeFunc::Control:
3128   case TypeFunc::Memory:
3129     return new (m->C) MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
3130   }
3131   ShouldNotReachHere();
3132   return NULL;



































































































































3133 }
3134 
3135 //===========================InitializeNode====================================
3136 // SUMMARY:
3137 // This node acts as a memory barrier on raw memory, after some raw stores.
3138 // The 'cooked' oop value feeds from the Initialize, not the Allocation.
3139 // The Initialize can 'capture' suitably constrained stores as raw inits.
3140 // It can coalesce related raw stores into larger units (called 'tiles').
3141 // It can avoid zeroing new storage for memory units which have raw inits.
3142 // At macro-expansion, it is marked 'complete', and does not optimize further.
3143 //
3144 // EXAMPLE:
3145 // The object 'new short[2]' occupies 16 bytes in a 32-bit machine.
3146 //   ctl = incoming control; mem* = incoming memory
3147 // (Note:  A star * on a memory edge denotes I/O and other standard edges.)
3148 // First allocate uninitialized memory and fill in the header:
3149 //   alloc = (Allocate ctl mem* 16 #short[].klass ...)
3150 //   ctl := alloc.Control; mem* := alloc.Memory*
3151 //   rawmem = alloc.Memory; rawoop = alloc.RawAddress
3152 // Then initialize to zero the non-header parts of the raw memory block:




2534 const Type *StoreNode::Value( PhaseTransform *phase ) const {
2535   // Either input is TOP ==> the result is TOP
2536   const Type *t1 = phase->type( in(MemNode::Memory) );
2537   if( t1 == Type::TOP ) return Type::TOP;
2538   const Type *t2 = phase->type( in(MemNode::Address) );
2539   if( t2 == Type::TOP ) return Type::TOP;
2540   const Type *t3 = phase->type( in(MemNode::ValueIn) );
2541   if( t3 == Type::TOP ) return Type::TOP;
2542   return Type::MEMORY;
2543 }
2544 
2545 //------------------------------Identity---------------------------------------
2546 // Remove redundant stores:
2547 //   Store(m, p, Load(m, p)) changes to m.
2548 //   Store(, p, x) -> Store(m, p, x) changes to Store(m, p, x).
2549 Node *StoreNode::Identity( PhaseTransform *phase ) {
2550   Node* mem = in(MemNode::Memory);
2551   Node* adr = in(MemNode::Address);
2552   Node* val = in(MemNode::ValueIn);
2553 
2554   Node* result = this;
2555 
2556   // Load then Store?  Then the Store is useless
2557   if (val->is_Load() &&
2558       val->in(MemNode::Address)->eqv_uncast(adr) &&
2559       val->in(MemNode::Memory )->eqv_uncast(mem) &&
2560       val->as_Load()->store_Opcode() == Opcode()) {
2561     result = mem;
2562   }
2563 
2564   // Two stores in a row of the same value?
2565   if (mem->is_Store() &&
2566       mem->in(MemNode::Address)->eqv_uncast(adr) &&
2567       mem->in(MemNode::ValueIn)->eqv_uncast(val) &&
2568       mem->Opcode() == Opcode()) {
2569     result = mem;
2570   }
2571 
2572   // Store of zero anywhere into a freshly-allocated object?
2573   // Then the store is useless.
2574   // (It must already have been captured by the InitializeNode.)
2575   if (result == this &&
2576       ReduceFieldZeroing && phase->type(val)->is_zero_type()) {
2577     // a newly allocated object is already all-zeroes everywhere
2578     if (mem->is_Proj() && mem->in(0)->is_Allocate()) {
2579       result = mem;
2580     }
2581 
2582     if (result == this) {
2583       // the store may also apply to zero-bits in an earlier object
2584       Node* prev_mem = find_previous_store(phase);
2585       // Steps (a), (b):  Walk past independent stores to find an exact match.
2586       if (prev_mem != NULL) {
2587         Node* prev_val = can_see_stored_value(prev_mem, phase);
2588         if (prev_val != NULL && phase->eqv(prev_val, val)) {
2589           // prev_val and val might differ by a cast; it would be good
2590           // to keep the more informative of the two.
2591           result = mem;
2592         }
2593       }
2594     }
2595   }
2596 
2597   if (result != this && phase->is_IterGVN() != NULL) {
2598     MemBarNode* trailing = trailing_membar();
2599     if (trailing != NULL) {
2600 #ifdef ASSERT
2601       const TypeOopPtr* t_oop = phase->type(in(Address))->isa_oopptr();
2602       assert(t_oop == NULL || t_oop->is_known_instance_field(), "only for non escaping objects");
2603 #endif
2604       PhaseIterGVN* igvn = phase->is_IterGVN();
2605       trailing->remove(igvn);
2606     }
2607   }
2608 
2609   return result;
2610 }
2611 
2612 //------------------------------match_edge-------------------------------------
2613 // Do we Match on this edge index or not?  Match only memory & value
2614 uint StoreNode::match_edge(uint idx) const {
2615   return idx == MemNode::Address || idx == MemNode::ValueIn;
2616 }
2617 
2618 //------------------------------cmp--------------------------------------------
2619 // Do not common stores up together.  They generally have to be split
2620 // back up anyways, so do not bother.
2621 uint StoreNode::cmp( const Node &n ) const {
2622   return (&n == this);          // Always fail except on self
2623 }
2624 
2625 //------------------------------Ideal_masked_input-----------------------------
2626 // Check for a useless mask before a partial-word store
2627 // (StoreB ... (AndI valIn conIa) )
2628 // If (conIa & mask == mask) this simplifies to
2629 // (StoreB ... (valIn) )


2668 // For simplicity, we actually check if there are any loads from the
2669 // address stored to, not just for loads of the value stored by this node.
2670 //
2671 bool StoreNode::value_never_loaded( PhaseTransform *phase) const {
2672   Node *adr = in(Address);
2673   const TypeOopPtr *adr_oop = phase->type(adr)->isa_oopptr();
2674   if (adr_oop == NULL)
2675     return false;
2676   if (!adr_oop->is_known_instance_field())
2677     return false; // if not a distinct instance, there may be aliases of the address
2678   for (DUIterator_Fast imax, i = adr->fast_outs(imax); i < imax; i++) {
2679     Node *use = adr->fast_out(i);
2680     int opc = use->Opcode();
2681     if (use->is_Load() || use->is_LoadStore()) {
2682       return false;
2683     }
2684   }
2685   return true;
2686 }
2687 
2688 MemBarNode* StoreNode::trailing_membar() const {
2689   if (is_release()) {
2690     MemBarNode* trailing_mb = NULL;
2691     for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
2692       Node* u = fast_out(i);
2693       if (u->is_MemBar()) {
2694         if (u->as_MemBar()->trailing_store()) {
2695           assert(u->Opcode() == Op_MemBarVolatile, "");
2696           assert(trailing_mb == NULL, "only one");
2697           trailing_mb = u->as_MemBar();
2698 #ifdef ASSERT
2699           Node* leading = u->as_MemBar()->leading_membar();
2700           assert(leading->Opcode() == Op_MemBarRelease, "incorrect membar");
2701           assert(leading->as_MemBar()->leading_store(), "incorrect membar pair");
2702           assert(leading->as_MemBar()->trailing_membar() == u, "incorrect membar pair");
2703 #endif
2704         } else {
2705           assert(u->as_MemBar()->standalone(), "");
2706         }
2707       }
2708     }
2709     return trailing_mb;
2710   }
2711   return NULL;
2712 }
2713 
2714 //=============================================================================
2715 //------------------------------Ideal------------------------------------------
2716 // If the store is from an AND mask that leaves the low bits untouched, then
2717 // we can skip the AND operation.  If the store is from a sign-extension
2718 // (a left shift, then right shift) we can skip both.
2719 Node *StoreBNode::Ideal(PhaseGVN *phase, bool can_reshape){
2720   Node *progress = StoreNode::Ideal_masked_input(phase, 0xFF);
2721   if( progress != NULL ) return progress;
2722 
2723   progress = StoreNode::Ideal_sign_extended_input(phase, 24);
2724   if( progress != NULL ) return progress;
2725 
2726   // Finally check the default case
2727   return StoreNode::Ideal(phase, can_reshape);
2728 }
2729 
2730 //=============================================================================
2731 //------------------------------Ideal------------------------------------------
2732 // If the store is from an AND mask that leaves the low bits untouched, then
2733 // we can skip the AND operation


2806   init_req(MemNode::Control, c  );
2807   init_req(MemNode::Memory , mem);
2808   init_req(MemNode::Address, adr);
2809   init_req(MemNode::ValueIn, val);
2810   init_class_id(Class_LoadStore);
2811 }
2812 
2813 uint LoadStoreNode::ideal_reg() const {
2814   return _type->ideal_reg();
2815 }
2816 
2817 bool LoadStoreNode::result_not_used() const {
2818   for( DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++ ) {
2819     Node *x = fast_out(i);
2820     if (x->Opcode() == Op_SCMemProj) continue;
2821     return false;
2822   }
2823   return true;
2824 }
2825 
2826 MemBarNode* LoadStoreNode::trailing_membar() const {
2827   MemBarNode* trailing = NULL;
2828   for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
2829     Node* u = fast_out(i);
2830     if (u->is_MemBar()) {
2831       if (u->as_MemBar()->trailing_load_store()) {
2832         assert(u->Opcode() == Op_MemBarAcquire, "");
2833         assert(trailing == NULL, "only one");
2834         trailing = u->as_MemBar();
2835 #ifdef ASSERT
2836         Node* leading = trailing->leading_membar();
2837         assert(support_IRIW_for_not_multiple_copy_atomic_cpu || leading->Opcode() == Op_MemBarRelease, "incorrect membar");
2838         assert(leading->as_MemBar()->leading_load_store(), "incorrect membar pair");
2839         assert(leading->as_MemBar()->trailing_membar() == trailing, "incorrect membar pair");
2840 #endif
2841       } else {
2842         assert(u->as_MemBar()->standalone(), "wrong barrier kind");
2843       }
2844     }
2845   }
2846 
2847   return trailing;
2848 }
2849 
2850 uint LoadStoreNode::size_of() const { return sizeof(*this); }
2851 
2852 //=============================================================================
2853 //----------------------------------LoadStoreConditionalNode--------------------
2854 LoadStoreConditionalNode::LoadStoreConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex ) : LoadStoreNode(c, mem, adr, val, NULL, TypeInt::BOOL, 5) {
2855   init_req(ExpectedIn, ex );
2856 }
2857 
2858 //=============================================================================
2859 //-------------------------------adr_type--------------------------------------
2860 // Do we Match on this edge index or not?  Do not match memory
2861 const TypePtr* ClearArrayNode::adr_type() const {
2862   Node *adr = in(3);
2863   return MemNode::calculate_adr_type(adr->bottom_type());
2864 }
2865 
2866 //------------------------------match_edge-------------------------------------
2867 // Do we Match on this edge index or not?  Do not match memory
2868 uint ClearArrayNode::match_edge(uint idx) const {
2869   return idx > 1;


3064 uint EncodeISOArrayNode::match_edge(uint idx) const {
3065   return idx == 2 || idx == 3; // EncodeISOArray src (Binary dst len)
3066 }
3067 
3068 //------------------------------Ideal------------------------------------------
3069 // Return a node which is more "ideal" than the current node.  Strip out
3070 // control copies
3071 Node *EncodeISOArrayNode::Ideal(PhaseGVN *phase, bool can_reshape) {
3072   return remove_dead_region(phase, can_reshape) ? this : NULL;
3073 }
3074 
3075 //------------------------------Value------------------------------------------
3076 const Type *EncodeISOArrayNode::Value(PhaseTransform *phase) const {
3077   if (in(0) && phase->type(in(0)) == Type::TOP) return Type::TOP;
3078   return bottom_type();
3079 }
3080 
3081 //=============================================================================
3082 MemBarNode::MemBarNode(Compile* C, int alias_idx, Node* precedent)
3083   : MultiNode(TypeFunc::Parms + (precedent == NULL? 0: 1)),
3084   _adr_type(C->get_adr_type(alias_idx)), _kind(Standalone)
3085 #ifdef ASSERT
3086   , _pair_idx(0)
3087 #endif
3088 {
3089   init_class_id(Class_MemBar);
3090   Node* top = C->top();
3091   init_req(TypeFunc::I_O,top);
3092   init_req(TypeFunc::FramePtr,top);
3093   init_req(TypeFunc::ReturnAdr,top);
3094   if (precedent != NULL)
3095     init_req(TypeFunc::Parms, precedent);
3096 }
3097 
3098 //------------------------------cmp--------------------------------------------
3099 uint MemBarNode::hash() const { return NO_HASH; }
3100 uint MemBarNode::cmp( const Node &n ) const {
3101   return (&n == this);          // Always fail except on self
3102 }
3103 
3104 //------------------------------make-------------------------------------------
3105 MemBarNode* MemBarNode::make(Compile* C, int opcode, int atp, Node* pn) {
3106   switch (opcode) {
3107   case Op_MemBarAcquire:     return new(C) MemBarAcquireNode(C, atp, pn);
3108   case Op_LoadFence:         return new(C) LoadFenceNode(C, atp, pn);
3109   case Op_MemBarRelease:     return new(C) MemBarReleaseNode(C, atp, pn);
3110   case Op_StoreFence:        return new(C) StoreFenceNode(C, atp, pn);
3111   case Op_MemBarAcquireLock: return new(C) MemBarAcquireLockNode(C, atp, pn);
3112   case Op_MemBarReleaseLock: return new(C) MemBarReleaseLockNode(C, atp, pn);
3113   case Op_MemBarVolatile:    return new(C) MemBarVolatileNode(C, atp, pn);
3114   case Op_MemBarCPUOrder:    return new(C) MemBarCPUOrderNode(C, atp, pn);
3115   case Op_Initialize:        return new(C) InitializeNode(C, atp, pn);
3116   case Op_MemBarStoreStore:  return new(C) MemBarStoreStoreNode(C, atp, pn);
3117   default: ShouldNotReachHere(); return NULL;
3118   }
3119 }
3120 
3121 void MemBarNode::remove(PhaseIterGVN *igvn) {
3122   if (outcnt() != 2) {
3123     return;
3124   }
3125   if (trailing_store() || trailing_load_store()) {
3126     MemBarNode* leading = leading_membar();
3127     if (leading != NULL) {
3128       assert(leading->trailing_membar() == this, "inconsistent leading/trailing membars");
3129       leading->remove(igvn);
3130     }
3131   }
3132   igvn->replace_node(proj_out(TypeFunc::Memory), in(TypeFunc::Memory));
3133   igvn->replace_node(proj_out(TypeFunc::Control), in(TypeFunc::Control));
3134 }
3135 
3136 //------------------------------Ideal------------------------------------------
3137 // Return a node which is more "ideal" than the current node.  Strip out
3138 // control copies
3139 Node *MemBarNode::Ideal(PhaseGVN *phase, bool can_reshape) {
3140   if (remove_dead_region(phase, can_reshape)) return this;
3141   // Don't bother trying to transform a dead node
3142   if (in(0) && in(0)->is_top()) {
3143     return NULL;
3144   }
3145 
3146   // Eliminate volatile MemBars for scalar replaced objects.
3147   if (can_reshape && req() == (Precedent+1)) {
3148     bool eliminate = false;
3149     int opc = Opcode();
3150     if ((opc == Op_MemBarAcquire || opc == Op_MemBarVolatile)) {
3151       // Volatile field loads and stores.
3152       Node* my_mem = in(MemBarNode::Precedent);
3153       // The MembarAquire may keep an unused LoadNode alive through the Precedent edge
3154       if ((my_mem != NULL) && (opc == Op_MemBarAcquire) && (my_mem->outcnt() == 1)) {
3155         // if the Precedent is a decodeN and its input (a Load) is used at more than one place,


3162         } else {
3163           assert(my_mem->unique_out() == this, "sanity");
3164           del_req(Precedent);
3165           phase->is_IterGVN()->_worklist.push(my_mem); // remove dead node later
3166           my_mem = NULL;
3167         }
3168       }
3169       if (my_mem != NULL && my_mem->is_Mem()) {
3170         const TypeOopPtr* t_oop = my_mem->in(MemNode::Address)->bottom_type()->isa_oopptr();
3171         // Check for scalar replaced object reference.
3172         if( t_oop != NULL && t_oop->is_known_instance_field() &&
3173             t_oop->offset() != Type::OffsetBot &&
3174             t_oop->offset() != Type::OffsetTop) {
3175           eliminate = true;
3176         }
3177       }
3178     } else if (opc == Op_MemBarRelease) {
3179       // Final field stores.
3180       Node* alloc = AllocateNode::Ideal_allocation(in(MemBarNode::Precedent), phase);
3181       if ((alloc != NULL) && alloc->is_Allocate() &&
3182           AARCH64_ONLY ( alloc->as_Allocate()->does_not_escape_thread() )
3183           NOT_AARCH64  ( alloc->as_Allocate()->_is_non_escaping )
3184          ) {
3185         // The allocated object does not escape.
3186         eliminate = true;
3187       }
3188     }
3189     if (eliminate) {
3190       // Replace MemBar projections by its inputs.
3191       PhaseIterGVN* igvn = phase->is_IterGVN();
3192       remove(igvn);

3193       // Must return either the original node (now dead) or a new node
3194       // (Do not return a top here, since that would break the uniqueness of top.)
3195       return new (phase->C) ConINode(TypeInt::ZERO);
3196     }
3197   }
3198   return NULL;
3199 }
3200 
3201 //------------------------------Value------------------------------------------
3202 const Type *MemBarNode::Value( PhaseTransform *phase ) const {
3203   if( !in(0) ) return Type::TOP;
3204   if( phase->type(in(0)) == Type::TOP )
3205     return Type::TOP;
3206   return TypeTuple::MEMBAR;
3207 }
3208 
3209 //------------------------------match------------------------------------------
3210 // Construct projections for memory.
3211 Node *MemBarNode::match( const ProjNode *proj, const Matcher *m ) {
3212   switch (proj->_con) {
3213   case TypeFunc::Control:
3214   case TypeFunc::Memory:
3215     return new (m->C) MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
3216   }
3217   ShouldNotReachHere();
3218   return NULL;
3219 }
3220 
3221 void MemBarNode::set_store_pair(MemBarNode* leading, MemBarNode* trailing) {
3222   trailing->_kind = TrailingStore;
3223   leading->_kind = LeadingStore;
3224 #ifdef ASSERT
3225   trailing->_pair_idx = leading->_idx;
3226   leading->_pair_idx = leading->_idx;
3227 #endif
3228 }
3229 
3230 void MemBarNode::set_load_store_pair(MemBarNode* leading, MemBarNode* trailing) {
3231   trailing->_kind = TrailingLoadStore;
3232   leading->_kind = LeadingLoadStore;
3233 #ifdef ASSERT
3234   trailing->_pair_idx = leading->_idx;
3235   leading->_pair_idx = leading->_idx;
3236 #endif
3237 }
3238 
3239 MemBarNode* MemBarNode::trailing_membar() const {
3240   ResourceMark rm;
3241   Node* trailing = (Node*)this;
3242   VectorSet seen(Thread::current()->resource_area());
3243 
3244   Node_Stack multis(0);
3245   do {
3246     Node* c = trailing;
3247     uint i = 0;
3248     do {
3249       trailing = NULL;
3250       for (; i < c->outcnt(); i++) {
3251         Node* next = c->raw_out(i);
3252         if (next != c && next->is_CFG()) {
3253           if (c->is_MultiBranch()) {
3254             if (multis.node() == c) {
3255               multis.set_index(i+1);
3256             } else {
3257               multis.push(c, i+1);
3258             }
3259           }
3260           trailing = next;
3261           break;
3262         }
3263       }
3264       if (trailing != NULL && !seen.test_set(trailing->_idx)) {
3265          break;
3266        }
3267       while (multis.size() > 0) {
3268         c = multis.node();
3269         i = multis.index();
3270         if (i < c->req()) {
3271           break;
3272         }
3273         multis.pop();
3274       }
3275     } while (multis.size() > 0);
3276   } while (!trailing->is_MemBar() || !trailing->as_MemBar()->trailing());
3277 
3278   MemBarNode* mb = trailing->as_MemBar();
3279   assert((mb->_kind == TrailingStore && _kind == LeadingStore) ||
3280          (mb->_kind == TrailingLoadStore && _kind == LeadingLoadStore), "bad trailing membar");
3281   assert(mb->_pair_idx == _pair_idx, "bad trailing membar");
3282   return mb;
3283 }
3284 
3285 MemBarNode* MemBarNode::leading_membar() const {
3286   ResourceMark rm;
3287   VectorSet seen(Thread::current()->resource_area());
3288   Node_Stack regions(0);
3289   Node* leading = in(0);
3290   while (leading != NULL && (!leading->is_MemBar() || !leading->as_MemBar()->leading())) {
3291     while (leading == NULL || leading->is_top() || seen.test_set(leading->_idx)) {
3292       leading = NULL;
3293       while (regions.size() > 0 && leading == NULL) {
3294         Node* r = regions.node();
3295         uint i = regions.index();
3296         if (i < r->req()) {
3297           leading = r->in(i);
3298           regions.set_index(i+1);
3299         } else {
3300           regions.pop();
3301         }
3302       }
3303       if (leading == NULL) {
3304         assert(regions.size() == 0, "all paths should have been tried");
3305         return NULL;
3306       }
3307     }
3308     if (leading->is_Region()) {
3309       regions.push(leading, 2);
3310       leading = leading->in(1);
3311     } else {
3312       leading = leading->in(0);
3313     }
3314   }
3315 #ifdef ASSERT
3316   Unique_Node_List wq;
3317   wq.push((Node*)this);
3318   uint found = 0;
3319   for (uint i = 0; i < wq.size(); i++) {
3320     Node* n = wq.at(i);
3321     if (n->is_Region()) {
3322       for (uint j = 1; j < n->req(); j++) {
3323         Node* in = n->in(j);
3324         if (in != NULL && !in->is_top()) {
3325           wq.push(in);
3326         }
3327       }
3328     } else {
3329       if (n->is_MemBar() && n->as_MemBar()->leading()) {
3330         assert(n == leading, "consistency check failed");
3331         found++;
3332       } else {
3333         Node* in = n->in(0);
3334         if (in != NULL && !in->is_top()) {
3335           wq.push(in);
3336         }
3337       }
3338     }
3339   }
3340   assert(found == 1 || (found == 0 && leading == NULL), "consistency check failed");
3341 #endif
3342   if (leading == NULL) {
3343     return NULL;
3344   }
3345   MemBarNode* mb = leading->as_MemBar();
3346   assert((mb->_kind == LeadingStore && _kind == TrailingStore) ||
3347          (mb->_kind == LeadingLoadStore && _kind == TrailingLoadStore), "bad leading membar");
3348   assert(mb->_pair_idx == _pair_idx, "bad leading membar");
3349   return mb;
3350 }
3351 
3352 //===========================InitializeNode====================================
3353 // SUMMARY:
3354 // This node acts as a memory barrier on raw memory, after some raw stores.
3355 // The 'cooked' oop value feeds from the Initialize, not the Allocation.
3356 // The Initialize can 'capture' suitably constrained stores as raw inits.
3357 // It can coalesce related raw stores into larger units (called 'tiles').
3358 // It can avoid zeroing new storage for memory units which have raw inits.
3359 // At macro-expansion, it is marked 'complete', and does not optimize further.
3360 //
3361 // EXAMPLE:
3362 // The object 'new short[2]' occupies 16 bytes in a 32-bit machine.
3363 //   ctl = incoming control; mem* = incoming memory
3364 // (Note:  A star * on a memory edge denotes I/O and other standard edges.)
3365 // First allocate uninitialized memory and fill in the header:
3366 //   alloc = (Allocate ctl mem* 16 #short[].klass ...)
3367 //   ctl := alloc.Control; mem* := alloc.Memory*
3368 //   rawmem = alloc.Memory; rawoop = alloc.RawAddress
3369 // Then initialize to zero the non-header parts of the raw memory block:


< prev index next >