27 #include "compiler/compileLog.hpp"
28 #include "memory/allocation.inline.hpp"
29 #include "memory/resourceArea.hpp"
30 #include "oops/objArrayKlass.hpp"
31 #include "opto/addnode.hpp"
32 #include "opto/arraycopynode.hpp"
33 #include "opto/cfgnode.hpp"
34 #include "opto/compile.hpp"
35 #include "opto/connode.hpp"
36 #include "opto/convertnode.hpp"
37 #include "opto/loopnode.hpp"
38 #include "opto/machnode.hpp"
39 #include "opto/matcher.hpp"
40 #include "opto/memnode.hpp"
41 #include "opto/mulnode.hpp"
42 #include "opto/narrowptrnode.hpp"
43 #include "opto/phaseX.hpp"
44 #include "opto/regmask.hpp"
45 #include "utilities/align.hpp"
46 #include "utilities/copy.hpp"
47 #include "utilities/vmError.hpp"
48
49 // Portions of code courtesy of Clifford Click
50
51 // Optimization - Graph Style
52
53 static Node *step_through_mergemem(PhaseGVN *phase, MergeMemNode *mmem, const TypePtr *tp, const TypePtr *adr_check, outputStream *st);
54
55 //=============================================================================
56 uint MemNode::size_of() const { return sizeof(*this); }
57
58 const TypePtr *MemNode::adr_type() const {
59 Node* adr = in(Address);
60 if (adr == NULL) return NULL; // node is dead
61 const TypePtr* cross_check = NULL;
62 DEBUG_ONLY(cross_check = _adr_type);
63 return calculate_adr_type(adr->bottom_type(), cross_check);
64 }
65
66 bool MemNode::check_if_adr_maybe_raw(Node* adr) {
67 if (adr != NULL) {
874 return (uintptr_t)in(Control) + (uintptr_t)in(Memory) + (uintptr_t)in(Address);
875 }
876
877 static bool skip_through_membars(Compile::AliasType* atp, const TypeInstPtr* tp, bool eliminate_boxing) {
878 if ((atp != NULL) && (atp->index() >= Compile::AliasIdxRaw)) {
879 bool non_volatile = (atp->field() != NULL) && !atp->field()->is_volatile();
880 bool is_stable_ary = FoldStableValues &&
881 (tp != NULL) && (tp->isa_aryptr() != NULL) &&
882 tp->isa_aryptr()->is_stable();
883
884 return (eliminate_boxing && non_volatile) || is_stable_ary;
885 }
886
887 return false;
888 }
889
890 // Is the value loaded previously stored by an arraycopy? If so return
891 // a load node that reads from the source array so we may be able to
892 // optimize out the ArrayCopy node later.
893 Node* LoadNode::can_see_arraycopy_value(Node* st, PhaseGVN* phase) const {
894 Node* ld_adr = in(MemNode::Address);
895 intptr_t ld_off = 0;
896 AllocateNode* ld_alloc = AllocateNode::Ideal_allocation(ld_adr, phase, ld_off);
897 Node* ac = find_previous_arraycopy(phase, ld_alloc, st, true);
898 if (ac != NULL) {
899 assert(ac->is_ArrayCopy(), "what kind of node can this be?");
900
901 Node* mem = ac->in(TypeFunc::Memory);
902 Node* ctl = ac->in(0);
903 Node* src = ac->in(ArrayCopyNode::Src);
904
905 if (!ac->as_ArrayCopy()->is_clonebasic() && !phase->type(src)->isa_aryptr()) {
906 return NULL;
907 }
908
909 LoadNode* ld = clone()->as_Load();
910 Node* addp = in(MemNode::Address)->clone();
911 if (ac->as_ArrayCopy()->is_clonebasic()) {
912 assert(ld_alloc != NULL, "need an alloc");
913 assert(addp->is_AddP(), "address must be addp");
1557 PhaseIterGVN *igvn = phase->is_IterGVN();
1558 if (igvn != NULL && igvn->_worklist.member(opt_mem)) {
1559 // Delay this transformation until memory Phi is processed.
1560 phase->is_IterGVN()->_worklist.push(this);
1561 return NULL;
1562 }
1563 // Split instance field load through Phi.
1564 Node* result = split_through_phi(phase);
1565 if (result != NULL) return result;
1566
1567 if (t_oop->is_ptr_to_boxed_value()) {
1568 Node* result = eliminate_autobox(phase);
1569 if (result != NULL) return result;
1570 }
1571 }
1572 }
1573
1574 // Is there a dominating load that loads the same value? Leave
1575 // anything that is not a load of a field/array element (like
1576 // barriers etc.) alone
1577 if (in(0) != NULL && adr_type() != TypeRawPtr::BOTTOM && can_reshape) {
1578 for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
1579 Node *use = mem->fast_out(i);
1580 if (use != this &&
1581 use->Opcode() == Opcode() &&
1582 use->in(0) != NULL &&
1583 use->in(0) != in(0) &&
1584 use->in(Address) == in(Address)) {
1585 Node* ctl = in(0);
1586 for (int i = 0; i < 10 && ctl != NULL; i++) {
1587 ctl = IfNode::up_one_dom(ctl);
1588 if (ctl == use->in(0)) {
1589 set_req(0, use->in(0));
1590 return this;
1591 }
1592 }
1593 }
1594 }
1595 }
1596
1597 // Check for prior store with a different base or offset; make Load
2950 case Op_MemBarAcquireLock: return new MemBarAcquireLockNode(C, atp, pn);
2951 case Op_MemBarReleaseLock: return new MemBarReleaseLockNode(C, atp, pn);
2952 case Op_MemBarVolatile: return new MemBarVolatileNode(C, atp, pn);
2953 case Op_MemBarCPUOrder: return new MemBarCPUOrderNode(C, atp, pn);
2954 case Op_OnSpinWait: return new OnSpinWaitNode(C, atp, pn);
2955 case Op_Initialize: return new InitializeNode(C, atp, pn);
2956 case Op_MemBarStoreStore: return new MemBarStoreStoreNode(C, atp, pn);
2957 default: ShouldNotReachHere(); return NULL;
2958 }
2959 }
2960
2961 //------------------------------Ideal------------------------------------------
2962 // Return a node which is more "ideal" than the current node. Strip out
2963 // control copies
2964 Node *MemBarNode::Ideal(PhaseGVN *phase, bool can_reshape) {
2965 if (remove_dead_region(phase, can_reshape)) return this;
2966 // Don't bother trying to transform a dead node
2967 if (in(0) && in(0)->is_top()) {
2968 return NULL;
2969 }
2970
2971 bool progress = false;
2972 // Eliminate volatile MemBars for scalar replaced objects.
2973 if (can_reshape && req() == (Precedent+1)) {
2974 bool eliminate = false;
2975 int opc = Opcode();
2976 if ((opc == Op_MemBarAcquire || opc == Op_MemBarVolatile)) {
2977 // Volatile field loads and stores.
2978 Node* my_mem = in(MemBarNode::Precedent);
2979 // The MembarAquire may keep an unused LoadNode alive through the Precedent edge
2980 if ((my_mem != NULL) && (opc == Op_MemBarAcquire) && (my_mem->outcnt() == 1)) {
2981 // if the Precedent is a decodeN and its input (a Load) is used at more than one place,
2982 // replace this Precedent (decodeN) with the Load instead.
2983 if ((my_mem->Opcode() == Op_DecodeN) && (my_mem->in(1)->outcnt() > 1)) {
2984 Node* load_node = my_mem->in(1);
2985 set_req(MemBarNode::Precedent, load_node);
2986 phase->is_IterGVN()->_worklist.push(my_mem);
2987 my_mem = load_node;
2988 } else {
2989 assert(my_mem->unique_out() == this, "sanity");
|
27 #include "compiler/compileLog.hpp"
28 #include "memory/allocation.inline.hpp"
29 #include "memory/resourceArea.hpp"
30 #include "oops/objArrayKlass.hpp"
31 #include "opto/addnode.hpp"
32 #include "opto/arraycopynode.hpp"
33 #include "opto/cfgnode.hpp"
34 #include "opto/compile.hpp"
35 #include "opto/connode.hpp"
36 #include "opto/convertnode.hpp"
37 #include "opto/loopnode.hpp"
38 #include "opto/machnode.hpp"
39 #include "opto/matcher.hpp"
40 #include "opto/memnode.hpp"
41 #include "opto/mulnode.hpp"
42 #include "opto/narrowptrnode.hpp"
43 #include "opto/phaseX.hpp"
44 #include "opto/regmask.hpp"
45 #include "utilities/align.hpp"
46 #include "utilities/copy.hpp"
47 #include "utilities/macros.hpp"
48 #include "utilities/vmError.hpp"
49 #if INCLUDE_ZGC
50 #include "gc/z/c2/zBarrierSetC2.hpp"
51 #endif
52
53 // Portions of code courtesy of Clifford Click
54
55 // Optimization - Graph Style
56
57 static Node *step_through_mergemem(PhaseGVN *phase, MergeMemNode *mmem, const TypePtr *tp, const TypePtr *adr_check, outputStream *st);
58
59 //=============================================================================
60 uint MemNode::size_of() const { return sizeof(*this); }
61
62 const TypePtr *MemNode::adr_type() const {
63 Node* adr = in(Address);
64 if (adr == NULL) return NULL; // node is dead
65 const TypePtr* cross_check = NULL;
66 DEBUG_ONLY(cross_check = _adr_type);
67 return calculate_adr_type(adr->bottom_type(), cross_check);
68 }
69
70 bool MemNode::check_if_adr_maybe_raw(Node* adr) {
71 if (adr != NULL) {
878 return (uintptr_t)in(Control) + (uintptr_t)in(Memory) + (uintptr_t)in(Address);
879 }
880
881 static bool skip_through_membars(Compile::AliasType* atp, const TypeInstPtr* tp, bool eliminate_boxing) {
882 if ((atp != NULL) && (atp->index() >= Compile::AliasIdxRaw)) {
883 bool non_volatile = (atp->field() != NULL) && !atp->field()->is_volatile();
884 bool is_stable_ary = FoldStableValues &&
885 (tp != NULL) && (tp->isa_aryptr() != NULL) &&
886 tp->isa_aryptr()->is_stable();
887
888 return (eliminate_boxing && non_volatile) || is_stable_ary;
889 }
890
891 return false;
892 }
893
894 // Is the value loaded previously stored by an arraycopy? If so return
895 // a load node that reads from the source array so we may be able to
896 // optimize out the ArrayCopy node later.
897 Node* LoadNode::can_see_arraycopy_value(Node* st, PhaseGVN* phase) const {
898 #if INCLUDE_ZGC
899 if (UseZGC) {
900 if (bottom_type()->make_oopptr() != NULL) {
901 return NULL;
902 }
903 }
904 #endif
905
906 Node* ld_adr = in(MemNode::Address);
907 intptr_t ld_off = 0;
908 AllocateNode* ld_alloc = AllocateNode::Ideal_allocation(ld_adr, phase, ld_off);
909 Node* ac = find_previous_arraycopy(phase, ld_alloc, st, true);
910 if (ac != NULL) {
911 assert(ac->is_ArrayCopy(), "what kind of node can this be?");
912
913 Node* mem = ac->in(TypeFunc::Memory);
914 Node* ctl = ac->in(0);
915 Node* src = ac->in(ArrayCopyNode::Src);
916
917 if (!ac->as_ArrayCopy()->is_clonebasic() && !phase->type(src)->isa_aryptr()) {
918 return NULL;
919 }
920
921 LoadNode* ld = clone()->as_Load();
922 Node* addp = in(MemNode::Address)->clone();
923 if (ac->as_ArrayCopy()->is_clonebasic()) {
924 assert(ld_alloc != NULL, "need an alloc");
925 assert(addp->is_AddP(), "address must be addp");
1569 PhaseIterGVN *igvn = phase->is_IterGVN();
1570 if (igvn != NULL && igvn->_worklist.member(opt_mem)) {
1571 // Delay this transformation until memory Phi is processed.
1572 phase->is_IterGVN()->_worklist.push(this);
1573 return NULL;
1574 }
1575 // Split instance field load through Phi.
1576 Node* result = split_through_phi(phase);
1577 if (result != NULL) return result;
1578
1579 if (t_oop->is_ptr_to_boxed_value()) {
1580 Node* result = eliminate_autobox(phase);
1581 if (result != NULL) return result;
1582 }
1583 }
1584 }
1585
1586 // Is there a dominating load that loads the same value? Leave
1587 // anything that is not a load of a field/array element (like
1588 // barriers etc.) alone
1589 if (in(0) != NULL && !adr_type()->isa_rawptr() && can_reshape) {
1590 for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
1591 Node *use = mem->fast_out(i);
1592 if (use != this &&
1593 use->Opcode() == Opcode() &&
1594 use->in(0) != NULL &&
1595 use->in(0) != in(0) &&
1596 use->in(Address) == in(Address)) {
1597 Node* ctl = in(0);
1598 for (int i = 0; i < 10 && ctl != NULL; i++) {
1599 ctl = IfNode::up_one_dom(ctl);
1600 if (ctl == use->in(0)) {
1601 set_req(0, use->in(0));
1602 return this;
1603 }
1604 }
1605 }
1606 }
1607 }
1608
1609 // Check for prior store with a different base or offset; make Load
2962 case Op_MemBarAcquireLock: return new MemBarAcquireLockNode(C, atp, pn);
2963 case Op_MemBarReleaseLock: return new MemBarReleaseLockNode(C, atp, pn);
2964 case Op_MemBarVolatile: return new MemBarVolatileNode(C, atp, pn);
2965 case Op_MemBarCPUOrder: return new MemBarCPUOrderNode(C, atp, pn);
2966 case Op_OnSpinWait: return new OnSpinWaitNode(C, atp, pn);
2967 case Op_Initialize: return new InitializeNode(C, atp, pn);
2968 case Op_MemBarStoreStore: return new MemBarStoreStoreNode(C, atp, pn);
2969 default: ShouldNotReachHere(); return NULL;
2970 }
2971 }
2972
2973 //------------------------------Ideal------------------------------------------
2974 // Return a node which is more "ideal" than the current node. Strip out
2975 // control copies
2976 Node *MemBarNode::Ideal(PhaseGVN *phase, bool can_reshape) {
2977 if (remove_dead_region(phase, can_reshape)) return this;
2978 // Don't bother trying to transform a dead node
2979 if (in(0) && in(0)->is_top()) {
2980 return NULL;
2981 }
2982
2983 #if INCLUDE_ZGC
2984 if (UseZGC) {
2985 if (req() == (Precedent+1) && in(MemBarNode::Precedent)->in(0) != NULL && in(MemBarNode::Precedent)->in(0)->is_LoadBarrier()) {
2986 Node* load_node = in(MemBarNode::Precedent)->in(0)->in(LoadBarrierNode::Oop);
2987 set_req(MemBarNode::Precedent, load_node);
2988 return this;
2989 }
2990 }
2991 #endif
2992
2993 bool progress = false;
2994 // Eliminate volatile MemBars for scalar replaced objects.
2995 if (can_reshape && req() == (Precedent+1)) {
2996 bool eliminate = false;
2997 int opc = Opcode();
2998 if ((opc == Op_MemBarAcquire || opc == Op_MemBarVolatile)) {
2999 // Volatile field loads and stores.
3000 Node* my_mem = in(MemBarNode::Precedent);
3001 // The MembarAquire may keep an unused LoadNode alive through the Precedent edge
3002 if ((my_mem != NULL) && (opc == Op_MemBarAcquire) && (my_mem->outcnt() == 1)) {
3003 // if the Precedent is a decodeN and its input (a Load) is used at more than one place,
3004 // replace this Precedent (decodeN) with the Load instead.
3005 if ((my_mem->Opcode() == Op_DecodeN) && (my_mem->in(1)->outcnt() > 1)) {
3006 Node* load_node = my_mem->in(1);
3007 set_req(MemBarNode::Precedent, load_node);
3008 phase->is_IterGVN()->_worklist.push(my_mem);
3009 my_mem = load_node;
3010 } else {
3011 assert(my_mem->unique_out() == this, "sanity");
|