891 return (uintptr_t)in(Control) + (uintptr_t)in(Memory) + (uintptr_t)in(Address);
892 }
893
894 static bool skip_through_membars(Compile::AliasType* atp, const TypeInstPtr* tp, bool eliminate_boxing) {
895 if ((atp != NULL) && (atp->index() >= Compile::AliasIdxRaw)) {
896 bool non_volatile = (atp->field() != NULL) && !atp->field()->is_volatile();
897 bool is_stable_ary = FoldStableValues &&
898 (tp != NULL) && (tp->isa_aryptr() != NULL) &&
899 tp->isa_aryptr()->is_stable();
900
901 return (eliminate_boxing && non_volatile) || is_stable_ary;
902 }
903
904 return false;
905 }
906
907 // Is the value loaded previously stored by an arraycopy? If so return
908 // a load node that reads from the source array so we may be able to
909 // optimize out the ArrayCopy node later.
910 Node* LoadNode::can_see_arraycopy_value(Node* st, PhaseGVN* phase) const {
911 #if INCLUDE_ZGC
912 if (UseZGC) {
913 if (bottom_type()->make_oopptr() != NULL) {
914 return NULL;
915 }
916 }
917 #endif
918
919 Node* ld_adr = in(MemNode::Address);
920 intptr_t ld_off = 0;
921 AllocateNode* ld_alloc = AllocateNode::Ideal_allocation(ld_adr, phase, ld_off);
922 Node* ac = find_previous_arraycopy(phase, ld_alloc, st, true);
923 if (ac != NULL) {
924 assert(ac->is_ArrayCopy(), "what kind of node can this be?");
925
926 Node* mem = ac->in(TypeFunc::Memory);
927 Node* ctl = ac->in(0);
928 Node* src = ac->in(ArrayCopyNode::Src);
929
930 if (!ac->as_ArrayCopy()->is_clonebasic() && !phase->type(src)->isa_aryptr()) {
931 return NULL;
932 }
933
934 LoadNode* ld = clone()->as_Load();
935 Node* addp = in(MemNode::Address)->clone();
936 if (ac->as_ArrayCopy()->is_clonebasic()) {
937 assert(ld_alloc != NULL, "need an alloc");
938 assert(addp->is_AddP(), "address must be addp");
2794 // If extra input is TOP ==> the result is TOP
2795 t = phase->type( in(MemNode::OopStore) );
2796 if( t == Type::TOP ) return Type::TOP;
2797
2798 return StoreNode::Value( phase );
2799 }
2800
2801
2802 //=============================================================================
2803 //----------------------------------SCMemProjNode------------------------------
2804 const Type* SCMemProjNode::Value(PhaseGVN* phase) const
2805 {
2806 return bottom_type();
2807 }
2808
2809 //=============================================================================
2810 //----------------------------------LoadStoreNode------------------------------
2811 LoadStoreNode::LoadStoreNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* rt, uint required )
2812 : Node(required),
2813 _type(rt),
2814 _adr_type(at)
2815 {
2816 init_req(MemNode::Control, c );
2817 init_req(MemNode::Memory , mem);
2818 init_req(MemNode::Address, adr);
2819 init_req(MemNode::ValueIn, val);
2820 init_class_id(Class_LoadStore);
2821 }
2822
2823 uint LoadStoreNode::ideal_reg() const {
2824 return _type->ideal_reg();
2825 }
2826
2827 bool LoadStoreNode::result_not_used() const {
2828 for( DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++ ) {
2829 Node *x = fast_out(i);
2830 if (x->Opcode() == Op_SCMemProj) continue;
2831 return false;
2832 }
2833 return true;
2834 }
3087 if (trailing_store() || trailing_load_store()) {
3088 MemBarNode* leading = leading_membar();
3089 if (leading != NULL) {
3090 assert(leading->trailing_membar() == this, "inconsistent leading/trailing membars");
3091 leading->remove(igvn);
3092 }
3093 }
3094 igvn->replace_node(proj_out(TypeFunc::Memory), in(TypeFunc::Memory));
3095 igvn->replace_node(proj_out(TypeFunc::Control), in(TypeFunc::Control));
3096 }
3097
3098 //------------------------------Ideal------------------------------------------
3099 // Return a node which is more "ideal" than the current node. Strip out
3100 // control copies
3101 Node *MemBarNode::Ideal(PhaseGVN *phase, bool can_reshape) {
3102 if (remove_dead_region(phase, can_reshape)) return this;
3103 // Don't bother trying to transform a dead node
3104 if (in(0) && in(0)->is_top()) {
3105 return NULL;
3106 }
3107
3108 #if INCLUDE_ZGC
3109 if (UseZGC) {
3110 if (req() == (Precedent+1) && in(MemBarNode::Precedent)->in(0) != NULL && in(MemBarNode::Precedent)->in(0)->is_LoadBarrier()) {
3111 Node* load_node = in(MemBarNode::Precedent)->in(0)->in(LoadBarrierNode::Oop);
3112 set_req(MemBarNode::Precedent, load_node);
3113 return this;
3114 }
3115 }
3116 #endif
3117
3118 bool progress = false;
3119 // Eliminate volatile MemBars for scalar replaced objects.
3120 if (can_reshape && req() == (Precedent+1)) {
3121 bool eliminate = false;
3122 int opc = Opcode();
3123 if ((opc == Op_MemBarAcquire || opc == Op_MemBarVolatile)) {
3124 // Volatile field loads and stores.
3125 Node* my_mem = in(MemBarNode::Precedent);
3126 // The MembarAquire may keep an unused LoadNode alive through the Precedent edge
3127 if ((my_mem != NULL) && (opc == Op_MemBarAcquire) && (my_mem->outcnt() == 1)) {
3128 // if the Precedent is a decodeN and its input (a Load) is used at more than one place,
3129 // replace this Precedent (decodeN) with the Load instead.
3130 if ((my_mem->Opcode() == Op_DecodeN) && (my_mem->in(1)->outcnt() > 1)) {
3131 Node* load_node = my_mem->in(1);
3132 set_req(MemBarNode::Precedent, load_node);
3133 phase->is_IterGVN()->_worklist.push(my_mem);
3134 my_mem = load_node;
3135 } else {
3136 assert(my_mem->unique_out() == this, "sanity");
|
891 return (uintptr_t)in(Control) + (uintptr_t)in(Memory) + (uintptr_t)in(Address);
892 }
893
894 static bool skip_through_membars(Compile::AliasType* atp, const TypeInstPtr* tp, bool eliminate_boxing) {
895 if ((atp != NULL) && (atp->index() >= Compile::AliasIdxRaw)) {
896 bool non_volatile = (atp->field() != NULL) && !atp->field()->is_volatile();
897 bool is_stable_ary = FoldStableValues &&
898 (tp != NULL) && (tp->isa_aryptr() != NULL) &&
899 tp->isa_aryptr()->is_stable();
900
901 return (eliminate_boxing && non_volatile) || is_stable_ary;
902 }
903
904 return false;
905 }
906
907 // Is the value loaded previously stored by an arraycopy? If so return
908 // a load node that reads from the source array so we may be able to
909 // optimize out the ArrayCopy node later.
910 Node* LoadNode::can_see_arraycopy_value(Node* st, PhaseGVN* phase) const {
911 Node* ld_adr = in(MemNode::Address);
912 intptr_t ld_off = 0;
913 AllocateNode* ld_alloc = AllocateNode::Ideal_allocation(ld_adr, phase, ld_off);
914 Node* ac = find_previous_arraycopy(phase, ld_alloc, st, true);
915 if (ac != NULL) {
916 assert(ac->is_ArrayCopy(), "what kind of node can this be?");
917
918 Node* mem = ac->in(TypeFunc::Memory);
919 Node* ctl = ac->in(0);
920 Node* src = ac->in(ArrayCopyNode::Src);
921
922 if (!ac->as_ArrayCopy()->is_clonebasic() && !phase->type(src)->isa_aryptr()) {
923 return NULL;
924 }
925
926 LoadNode* ld = clone()->as_Load();
927 Node* addp = in(MemNode::Address)->clone();
928 if (ac->as_ArrayCopy()->is_clonebasic()) {
929 assert(ld_alloc != NULL, "need an alloc");
930 assert(addp->is_AddP(), "address must be addp");
2786 // If extra input is TOP ==> the result is TOP
2787 t = phase->type( in(MemNode::OopStore) );
2788 if( t == Type::TOP ) return Type::TOP;
2789
2790 return StoreNode::Value( phase );
2791 }
2792
2793
2794 //=============================================================================
2795 //----------------------------------SCMemProjNode------------------------------
2796 const Type* SCMemProjNode::Value(PhaseGVN* phase) const
2797 {
2798 return bottom_type();
2799 }
2800
2801 //=============================================================================
2802 //----------------------------------LoadStoreNode------------------------------
2803 LoadStoreNode::LoadStoreNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* rt, uint required )
2804 : Node(required),
2805 _type(rt),
2806 _adr_type(at),
2807 _has_barrier(false)
2808 {
2809 init_req(MemNode::Control, c );
2810 init_req(MemNode::Memory , mem);
2811 init_req(MemNode::Address, adr);
2812 init_req(MemNode::ValueIn, val);
2813 init_class_id(Class_LoadStore);
2814 }
2815
2816 uint LoadStoreNode::ideal_reg() const {
2817 return _type->ideal_reg();
2818 }
2819
2820 bool LoadStoreNode::result_not_used() const {
2821 for( DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++ ) {
2822 Node *x = fast_out(i);
2823 if (x->Opcode() == Op_SCMemProj) continue;
2824 return false;
2825 }
2826 return true;
2827 }
3080 if (trailing_store() || trailing_load_store()) {
3081 MemBarNode* leading = leading_membar();
3082 if (leading != NULL) {
3083 assert(leading->trailing_membar() == this, "inconsistent leading/trailing membars");
3084 leading->remove(igvn);
3085 }
3086 }
3087 igvn->replace_node(proj_out(TypeFunc::Memory), in(TypeFunc::Memory));
3088 igvn->replace_node(proj_out(TypeFunc::Control), in(TypeFunc::Control));
3089 }
3090
3091 //------------------------------Ideal------------------------------------------
3092 // Return a node which is more "ideal" than the current node. Strip out
3093 // control copies
3094 Node *MemBarNode::Ideal(PhaseGVN *phase, bool can_reshape) {
3095 if (remove_dead_region(phase, can_reshape)) return this;
3096 // Don't bother trying to transform a dead node
3097 if (in(0) && in(0)->is_top()) {
3098 return NULL;
3099 }
3100
3101 bool progress = false;
3102 // Eliminate volatile MemBars for scalar replaced objects.
3103 if (can_reshape && req() == (Precedent+1)) {
3104 bool eliminate = false;
3105 int opc = Opcode();
3106 if ((opc == Op_MemBarAcquire || opc == Op_MemBarVolatile)) {
3107 // Volatile field loads and stores.
3108 Node* my_mem = in(MemBarNode::Precedent);
3109 // The MembarAquire may keep an unused LoadNode alive through the Precedent edge
3110 if ((my_mem != NULL) && (opc == Op_MemBarAcquire) && (my_mem->outcnt() == 1)) {
3111 // if the Precedent is a decodeN and its input (a Load) is used at more than one place,
3112 // replace this Precedent (decodeN) with the Load instead.
3113 if ((my_mem->Opcode() == Op_DecodeN) && (my_mem->in(1)->outcnt() > 1)) {
3114 Node* load_node = my_mem->in(1);
3115 set_req(MemBarNode::Precedent, load_node);
3116 phase->is_IterGVN()->_worklist.push(my_mem);
3117 my_mem = load_node;
3118 } else {
3119 assert(my_mem->unique_out() == this, "sanity");
|