91 st->print(", idx=Bot;");
92 else if (atp->index() == Compile::AliasIdxTop)
93 st->print(", idx=Top;");
94 else if (atp->index() == Compile::AliasIdxRaw)
95 st->print(", idx=Raw;");
96 else {
97 ciField* field = atp->field();
98 if (field) {
99 st->print(", name=");
100 field->print_name_on(st);
101 }
102 st->print(", idx=%d;", atp->index());
103 }
104 }
105 }
106
107 extern void print_alias_types();
108
109 #endif
110
111 static bool membar_for_arraycopy_helper(const TypeOopPtr *t_oop, Node* n, PhaseTransform *phase) {
112 if (n->is_Proj()) {
113 n = n->in(0);
114 if (n->is_Call() && n->as_Call()->may_modify(t_oop, phase)) {
115 return true;
116 }
117 }
118 return false;
119 }
120
121 static bool membar_for_arraycopy(const TypeOopPtr *t_oop, MemBarNode* mb, PhaseTransform *phase) {
122 Node* mem = mb->in(TypeFunc::Memory);
123
124 if (mem->is_MergeMem()) {
125 Node* n = mem->as_MergeMem()->memory_at(Compile::AliasIdxRaw);
126 if (membar_for_arraycopy_helper(t_oop, n, phase)) {
127 return true;
128 } else if (n->is_Phi()) {
129 for (uint i = 1; i < n->req(); i++) {
130 if (n->in(i) != NULL) {
131 if (membar_for_arraycopy_helper(t_oop, n->in(i), phase)) {
132 return true;
133 }
134 }
135 }
136 }
137 }
138
139 return false;
140 }
141
142 Node *MemNode::optimize_simple_memory_chain(Node *mchain, const TypeOopPtr *t_oop, Node *load, PhaseGVN *phase) {
143 assert((t_oop != NULL), "sanity");
144 bool is_instance = t_oop->is_known_instance_field();
145 bool is_boxed_value_load = t_oop->is_ptr_to_boxed_value() &&
146 (load != NULL) && load->is_Load() &&
147 (phase->is_IterGVN() != NULL);
148 if (!(is_instance || is_boxed_value_load))
149 return mchain; // don't try to optimize non-instance types
150 uint instance_id = t_oop->instance_id();
151 Node *start_mem = phase->C->start()->proj_out(TypeFunc::Memory);
152 Node *prev = NULL;
153 Node *result = mchain;
154 while (prev != result) {
155 prev = result;
156 if (result == start_mem)
157 break; // hit one of our sentinels
158 // skip over a call which does not affect this memory slice
159 if (result->is_Proj() && result->as_Proj()->_con == TypeFunc::Memory) {
160 Node *proj_in = result->in(0);
161 if (proj_in->is_Allocate() && proj_in->_idx == instance_id) {
166 if (!call->may_modify(t_oop, phase)) { // returns false for instances
167 result = call->in(TypeFunc::Memory);
168 }
169 } else if (proj_in->is_Initialize()) {
170 AllocateNode* alloc = proj_in->as_Initialize()->allocation();
171 // Stop if this is the initialization for the object instance which
172 // contains this memory slice, otherwise skip over it.
173 if ((alloc == NULL) || (alloc->_idx == instance_id)) {
174 break;
175 }
176 if (is_instance) {
177 result = proj_in->in(TypeFunc::Memory);
178 } else if (is_boxed_value_load) {
179 Node* klass = alloc->in(AllocateNode::KlassNode);
180 const TypeKlassPtr* tklass = phase->type(klass)->is_klassptr();
181 if (tklass->klass_is_exact() && !tklass->klass()->equals(t_oop->klass())) {
182 result = proj_in->in(TypeFunc::Memory); // not related allocation
183 }
184 }
185 } else if (proj_in->is_MemBar()) {
186 if (membar_for_arraycopy(t_oop, proj_in->as_MemBar(), phase)) {
187 break;
188 }
189 result = proj_in->in(TypeFunc::Memory);
190 } else {
191 assert(false, "unexpected projection");
192 }
193 } else if (result->is_ClearArray()) {
194 if (!is_instance || !ClearArrayNode::step_through(&result, instance_id, phase)) {
195 // Can not bypass initialization of the instance
196 // we are looking for.
197 break;
198 }
199 // Otherwise skip it (the call updated 'result' value).
200 } else if (result->is_MergeMem()) {
201 result = step_through_mergemem(phase, result->as_MergeMem(), t_oop, NULL, tty);
202 }
203 }
204 return result;
205 }
206
528 assert(alloc != NULL && alloc->initialization()->is_complete_with_arraycopy(), "broken allocation");
529 if (alloc == ld_alloc) {
530 return ac;
531 }
532 }
533 }
534 } else if (mem->is_Proj() && mem->in(0) != NULL && mem->in(0)->is_ArrayCopy()) {
535 ArrayCopyNode* ac = mem->in(0)->as_ArrayCopy();
536
537 if (ac->is_arraycopy_validated() ||
538 ac->is_copyof_validated() ||
539 ac->is_copyofrange_validated()) {
540 Node* ld_addp = in(MemNode::Address);
541 if (ld_addp->is_AddP()) {
542 Node* ld_base = ld_addp->in(AddPNode::Address);
543 Node* ld_offs = ld_addp->in(AddPNode::Offset);
544
545 Node* dest = ac->in(ArrayCopyNode::Dest);
546
547 if (dest == ld_base) {
548 Node* src_pos = ac->in(ArrayCopyNode::SrcPos);
549 Node* dest_pos = ac->in(ArrayCopyNode::DestPos);
550 Node* len = ac->in(ArrayCopyNode::Length);
551
552 const TypeInt *dest_pos_t = phase->type(dest_pos)->isa_int();
553 const TypeX *ld_offs_t = phase->type(ld_offs)->isa_intptr_t();
554 const TypeInt *len_t = phase->type(len)->isa_int();
555 const TypeAryPtr* ary_t = phase->type(dest)->isa_aryptr();
556
557 if (dest_pos_t != NULL && ld_offs_t != NULL && len_t != NULL && ary_t != NULL) {
558 BasicType ary_elem = ary_t->klass()->as_array_klass()->element_type()->basic_type();
559 uint header = arrayOopDesc::base_offset_in_bytes(ary_elem);
560 uint elemsize = type2aelembytes(ary_elem);
561
562 intptr_t dest_pos_plus_len_lo = (((intptr_t)dest_pos_t->_lo) + len_t->_lo) * elemsize + header;
563 intptr_t dest_pos_plus_len_hi = (((intptr_t)dest_pos_t->_hi) + len_t->_hi) * elemsize + header;
564 intptr_t dest_pos_lo = ((intptr_t)dest_pos_t->_lo) * elemsize + header;
565 intptr_t dest_pos_hi = ((intptr_t)dest_pos_t->_hi) * elemsize + header;
566
567 if (can_see_stored_value) {
568 if (ld_offs_t->_lo >= dest_pos_hi && ld_offs_t->_hi < dest_pos_plus_len_lo) {
569 return ac;
570 }
571 } else {
572 if (ld_offs_t->_hi < dest_pos_lo || ld_offs_t->_lo >= dest_pos_plus_len_hi) {
573 mem = ac->in(TypeFunc::Memory);
574 }
575 return ac;
576 }
577 }
578 }
579 }
580 }
581 }
582 return NULL;
583 }
584
585 // The logic for reordering loads and stores uses four steps:
586 // (a) Walk carefully past stores and initializations which we
587 // can prove are independent of this load.
588 // (b) Observe that the next memory state makes an exact match
589 // with self (load or store), and locate the relevant store.
590 // (c) Ensure that, if we were to wire self directly to the store,
591 // the optimizer would fold it up somehow.
592 // (d) Do the rewiring, and return, depending on some other part of
593 // the optimizer to fold up the load.
594 // This routine handles steps (a) and (b). Steps (c) and (d) are
595 // specific to loads and stores, so they are handled by the callers.
596 // (Currently, only LoadNode::Ideal has steps (c), (d). More later.)
597 //
686 return mem; // let caller handle steps (c), (d)
687 }
688
689 } else if (find_previous_arraycopy(phase, alloc, mem, false) != NULL) {
690 if (prev != mem) {
691 // Found an arraycopy but it doesn't affect that load
692 continue;
693 }
694 // Found an arraycopy that may affect that load
695 return mem;
696 } else if (addr_t != NULL && addr_t->is_known_instance_field()) {
697 // Can't use optimize_simple_memory_chain() since it needs PhaseGVN.
698 if (mem->is_Proj() && mem->in(0)->is_Call()) {
699 // ArrayCopyNodes processed here as well.
700 CallNode *call = mem->in(0)->as_Call();
701 if (!call->may_modify(addr_t, phase)) {
702 mem = call->in(TypeFunc::Memory);
703 continue; // (a) advance through independent call memory
704 }
705 } else if (mem->is_Proj() && mem->in(0)->is_MemBar()) {
706 if (membar_for_arraycopy(addr_t, mem->in(0)->as_MemBar(), phase)) {
707 break;
708 }
709 mem = mem->in(0)->in(TypeFunc::Memory);
710 continue; // (a) advance through independent MemBar memory
711 } else if (mem->is_ClearArray()) {
712 if (ClearArrayNode::step_through(&mem, (uint)addr_t->instance_id(), phase)) {
713 // (the call updated 'mem' value)
714 continue; // (a) advance through independent allocation memory
715 } else {
716 // Can not bypass initialization of the instance
717 // we are looking for.
718 return mem;
719 }
720 } else if (mem->is_MergeMem()) {
721 int alias_idx = phase->C->get_alias_index(adr_type());
722 mem = mem->as_MergeMem()->memory_at(alias_idx);
723 continue; // (a) advance through independent MergeMem memory
724 }
725 }
726
866 // unroll addition of interesting fields
867 return (uintptr_t)in(Control) + (uintptr_t)in(Memory) + (uintptr_t)in(Address);
868 }
869
870 static bool skip_through_membars(Compile::AliasType* atp, const TypeInstPtr* tp, bool eliminate_boxing) {
871 if ((atp != NULL) && (atp->index() >= Compile::AliasIdxRaw)) {
872 bool non_volatile = (atp->field() != NULL) && !atp->field()->is_volatile();
873 bool is_stable_ary = FoldStableValues &&
874 (tp != NULL) && (tp->isa_aryptr() != NULL) &&
875 tp->isa_aryptr()->is_stable();
876
877 return (eliminate_boxing && non_volatile) || is_stable_ary;
878 }
879
880 return false;
881 }
882
883 // Is the value loaded previously stored by an arraycopy? If so return
884 // a load node that reads from the source array so we may be able to
885 // optimize out the ArrayCopy node later.
886 Node* MemNode::can_see_arraycopy_value(Node* st, PhaseTransform* phase) const {
887 Node* ld_adr = in(MemNode::Address);
888 intptr_t ld_off = 0;
889 AllocateNode* ld_alloc = AllocateNode::Ideal_allocation(ld_adr, phase, ld_off);
890 Node* ac = find_previous_arraycopy(phase, ld_alloc, st, true);
891 if (ac != NULL) {
892 assert(ac->is_ArrayCopy(), "what kind of node can this be?");
893 assert(is_Load(), "only for loads");
894
895 if (ac->as_ArrayCopy()->is_clonebasic()) {
896 assert(ld_alloc != NULL, "need an alloc");
897 Node* ld = clone();
898 Node* addp = in(MemNode::Address)->clone();
899 assert(addp->is_AddP(), "address must be addp");
900 assert(addp->in(AddPNode::Base) == ac->in(ArrayCopyNode::Dest)->in(AddPNode::Base), "strange pattern");
901 assert(addp->in(AddPNode::Address) == ac->in(ArrayCopyNode::Dest)->in(AddPNode::Address), "strange pattern");
902 addp->set_req(AddPNode::Base, ac->in(ArrayCopyNode::Src)->in(AddPNode::Base));
903 addp->set_req(AddPNode::Address, ac->in(ArrayCopyNode::Src)->in(AddPNode::Address));
904 ld->set_req(MemNode::Address, phase->transform(addp));
905 if (in(0) != NULL) {
906 assert(ld_alloc->in(0) != NULL, "alloc must have control");
907 ld->set_req(0, ld_alloc->in(0));
908 }
909 return ld;
910 } else {
911 Node* ld = clone();
912 Node* addp = in(MemNode::Address)->clone();
913 assert(addp->in(AddPNode::Base) == addp->in(AddPNode::Address), "should be");
914 addp->set_req(AddPNode::Base, ac->in(ArrayCopyNode::Src));
915 addp->set_req(AddPNode::Address, ac->in(ArrayCopyNode::Src));
916
917 const TypeAryPtr* ary_t = phase->type(in(MemNode::Address))->isa_aryptr();
918 BasicType ary_elem = ary_t->klass()->as_array_klass()->element_type()->basic_type();
919 uint header = arrayOopDesc::base_offset_in_bytes(ary_elem);
920 uint shift = exact_log2(type2aelembytes(ary_elem));
921
922 Node* diff = phase->transform(new SubINode(ac->in(ArrayCopyNode::SrcPos), ac->in(ArrayCopyNode::DestPos)));
923 #ifdef _LP64
924 diff = phase->transform(new ConvI2LNode(diff));
925 #endif
926 diff = phase->transform(new LShiftXNode(diff, phase->intcon(shift)));
927
928 Node* offset = phase->transform(new AddXNode(addp->in(AddPNode::Offset), diff));
929 addp->set_req(AddPNode::Offset, offset);
930 ld->set_req(MemNode::Address, phase->transform(addp));
931
932 if (in(0) != NULL) {
933 assert(ac->in(0) != NULL, "alloc must have control");
934 ld->set_req(0, ac->in(0));
935 }
936 return ld;
937 }
938 }
939 return NULL;
940 }
941
942
943 //---------------------------can_see_stored_value------------------------------
944 // This routine exists to make sure this set of tests is done the same
945 // everywhere. We need to make a coordinated change: first LoadNode::Ideal
946 // will change the graph shape in a way which makes memory alive twice at the
947 // same time (uses the Oracle model of aliasing), then some
948 // LoadXNode::Identity will fold things back to the equivalence-class model
949 // of aliasing.
950 Node* MemNode::can_see_stored_value(Node* st, PhaseTransform* phase) const {
951 Node* ld_adr = in(MemNode::Address);
952 intptr_t ld_off = 0;
953 AllocateNode* ld_alloc = AllocateNode::Ideal_allocation(ld_adr, phase, ld_off);
954 const TypeInstPtr* tp = phase->type(ld_adr)->isa_instptr();
955 Compile::AliasType* atp = (tp != NULL) ? phase->C->alias_type(tp) : NULL;
956 // This is more general than load from boxing objects.
957 if (skip_through_membars(atp, tp, phase->C->eliminate_boxing())) {
|
91 st->print(", idx=Bot;");
92 else if (atp->index() == Compile::AliasIdxTop)
93 st->print(", idx=Top;");
94 else if (atp->index() == Compile::AliasIdxRaw)
95 st->print(", idx=Raw;");
96 else {
97 ciField* field = atp->field();
98 if (field) {
99 st->print(", name=");
100 field->print_name_on(st);
101 }
102 st->print(", idx=%d;", atp->index());
103 }
104 }
105 }
106
107 extern void print_alias_types();
108
109 #endif
110
111 Node *MemNode::optimize_simple_memory_chain(Node *mchain, const TypeOopPtr *t_oop, Node *load, PhaseGVN *phase) {
112 assert((t_oop != NULL), "sanity");
113 bool is_instance = t_oop->is_known_instance_field();
114 bool is_boxed_value_load = t_oop->is_ptr_to_boxed_value() &&
115 (load != NULL) && load->is_Load() &&
116 (phase->is_IterGVN() != NULL);
117 if (!(is_instance || is_boxed_value_load))
118 return mchain; // don't try to optimize non-instance types
119 uint instance_id = t_oop->instance_id();
120 Node *start_mem = phase->C->start()->proj_out(TypeFunc::Memory);
121 Node *prev = NULL;
122 Node *result = mchain;
123 while (prev != result) {
124 prev = result;
125 if (result == start_mem)
126 break; // hit one of our sentinels
127 // skip over a call which does not affect this memory slice
128 if (result->is_Proj() && result->as_Proj()->_con == TypeFunc::Memory) {
129 Node *proj_in = result->in(0);
130 if (proj_in->is_Allocate() && proj_in->_idx == instance_id) {
135 if (!call->may_modify(t_oop, phase)) { // returns false for instances
136 result = call->in(TypeFunc::Memory);
137 }
138 } else if (proj_in->is_Initialize()) {
139 AllocateNode* alloc = proj_in->as_Initialize()->allocation();
140 // Stop if this is the initialization for the object instance which
141 // contains this memory slice, otherwise skip over it.
142 if ((alloc == NULL) || (alloc->_idx == instance_id)) {
143 break;
144 }
145 if (is_instance) {
146 result = proj_in->in(TypeFunc::Memory);
147 } else if (is_boxed_value_load) {
148 Node* klass = alloc->in(AllocateNode::KlassNode);
149 const TypeKlassPtr* tklass = phase->type(klass)->is_klassptr();
150 if (tklass->klass_is_exact() && !tklass->klass()->equals(t_oop->klass())) {
151 result = proj_in->in(TypeFunc::Memory); // not related allocation
152 }
153 }
154 } else if (proj_in->is_MemBar()) {
155 if (ArrayCopyNode::membar_for_arraycopy(t_oop, proj_in->as_MemBar(), phase)) {
156 break;
157 }
158 result = proj_in->in(TypeFunc::Memory);
159 } else {
160 assert(false, "unexpected projection");
161 }
162 } else if (result->is_ClearArray()) {
163 if (!is_instance || !ClearArrayNode::step_through(&result, instance_id, phase)) {
164 // Can not bypass initialization of the instance
165 // we are looking for.
166 break;
167 }
168 // Otherwise skip it (the call updated 'result' value).
169 } else if (result->is_MergeMem()) {
170 result = step_through_mergemem(phase, result->as_MergeMem(), t_oop, NULL, tty);
171 }
172 }
173 return result;
174 }
175
497 assert(alloc != NULL && alloc->initialization()->is_complete_with_arraycopy(), "broken allocation");
498 if (alloc == ld_alloc) {
499 return ac;
500 }
501 }
502 }
503 } else if (mem->is_Proj() && mem->in(0) != NULL && mem->in(0)->is_ArrayCopy()) {
504 ArrayCopyNode* ac = mem->in(0)->as_ArrayCopy();
505
506 if (ac->is_arraycopy_validated() ||
507 ac->is_copyof_validated() ||
508 ac->is_copyofrange_validated()) {
509 Node* ld_addp = in(MemNode::Address);
510 if (ld_addp->is_AddP()) {
511 Node* ld_base = ld_addp->in(AddPNode::Address);
512 Node* ld_offs = ld_addp->in(AddPNode::Offset);
513
514 Node* dest = ac->in(ArrayCopyNode::Dest);
515
516 if (dest == ld_base) {
517 const TypeX *ld_offs_t = phase->type(ld_offs)->isa_intptr_t();
518 if (ac->modifies(ld_offs_t->_lo, ld_offs_t->_hi, phase, can_see_stored_value)) {
519 return ac;
520 }
521 if (!can_see_stored_value) {
522 mem = ac->in(TypeFunc::Memory);
523 }
524 }
525 }
526 }
527 }
528 return NULL;
529 }
530
531 // The logic for reordering loads and stores uses four steps:
532 // (a) Walk carefully past stores and initializations which we
533 // can prove are independent of this load.
534 // (b) Observe that the next memory state makes an exact match
535 // with self (load or store), and locate the relevant store.
536 // (c) Ensure that, if we were to wire self directly to the store,
537 // the optimizer would fold it up somehow.
538 // (d) Do the rewiring, and return, depending on some other part of
539 // the optimizer to fold up the load.
540 // This routine handles steps (a) and (b). Steps (c) and (d) are
541 // specific to loads and stores, so they are handled by the callers.
542 // (Currently, only LoadNode::Ideal has steps (c), (d). More later.)
543 //
632 return mem; // let caller handle steps (c), (d)
633 }
634
635 } else if (find_previous_arraycopy(phase, alloc, mem, false) != NULL) {
636 if (prev != mem) {
637 // Found an arraycopy but it doesn't affect that load
638 continue;
639 }
640 // Found an arraycopy that may affect that load
641 return mem;
642 } else if (addr_t != NULL && addr_t->is_known_instance_field()) {
643 // Can't use optimize_simple_memory_chain() since it needs PhaseGVN.
644 if (mem->is_Proj() && mem->in(0)->is_Call()) {
645 // ArrayCopyNodes processed here as well.
646 CallNode *call = mem->in(0)->as_Call();
647 if (!call->may_modify(addr_t, phase)) {
648 mem = call->in(TypeFunc::Memory);
649 continue; // (a) advance through independent call memory
650 }
651 } else if (mem->is_Proj() && mem->in(0)->is_MemBar()) {
652 if (ArrayCopyNode::membar_for_arraycopy(addr_t, mem->in(0)->as_MemBar(), phase)) {
653 break;
654 }
655 mem = mem->in(0)->in(TypeFunc::Memory);
656 continue; // (a) advance through independent MemBar memory
657 } else if (mem->is_ClearArray()) {
658 if (ClearArrayNode::step_through(&mem, (uint)addr_t->instance_id(), phase)) {
659 // (the call updated 'mem' value)
660 continue; // (a) advance through independent allocation memory
661 } else {
662 // Can not bypass initialization of the instance
663 // we are looking for.
664 return mem;
665 }
666 } else if (mem->is_MergeMem()) {
667 int alias_idx = phase->C->get_alias_index(adr_type());
668 mem = mem->as_MergeMem()->memory_at(alias_idx);
669 continue; // (a) advance through independent MergeMem memory
670 }
671 }
672
812 // unroll addition of interesting fields
813 return (uintptr_t)in(Control) + (uintptr_t)in(Memory) + (uintptr_t)in(Address);
814 }
815
816 static bool skip_through_membars(Compile::AliasType* atp, const TypeInstPtr* tp, bool eliminate_boxing) {
817 if ((atp != NULL) && (atp->index() >= Compile::AliasIdxRaw)) {
818 bool non_volatile = (atp->field() != NULL) && !atp->field()->is_volatile();
819 bool is_stable_ary = FoldStableValues &&
820 (tp != NULL) && (tp->isa_aryptr() != NULL) &&
821 tp->isa_aryptr()->is_stable();
822
823 return (eliminate_boxing && non_volatile) || is_stable_ary;
824 }
825
826 return false;
827 }
828
829 // Is the value loaded previously stored by an arraycopy? If so return
830 // a load node that reads from the source array so we may be able to
831 // optimize out the ArrayCopy node later.
832 Node* LoadNode::can_see_arraycopy_value(Node* st, PhaseTransform* phase) const {
833 Node* ld_adr = in(MemNode::Address);
834 intptr_t ld_off = 0;
835 AllocateNode* ld_alloc = AllocateNode::Ideal_allocation(ld_adr, phase, ld_off);
836 Node* ac = find_previous_arraycopy(phase, ld_alloc, st, true);
837 if (ac != NULL) {
838 assert(ac->is_ArrayCopy(), "what kind of node can this be?");
839
840 Node* ld = clone();
841 if (ac->as_ArrayCopy()->is_clonebasic()) {
842 assert(ld_alloc != NULL, "need an alloc");
843 Node* addp = in(MemNode::Address)->clone();
844 assert(addp->is_AddP(), "address must be addp");
845 assert(addp->in(AddPNode::Base) == ac->in(ArrayCopyNode::Dest)->in(AddPNode::Base), "strange pattern");
846 assert(addp->in(AddPNode::Address) == ac->in(ArrayCopyNode::Dest)->in(AddPNode::Address), "strange pattern");
847 addp->set_req(AddPNode::Base, ac->in(ArrayCopyNode::Src)->in(AddPNode::Base));
848 addp->set_req(AddPNode::Address, ac->in(ArrayCopyNode::Src)->in(AddPNode::Address));
849 ld->set_req(MemNode::Address, phase->transform(addp));
850 if (in(0) != NULL) {
851 assert(ld_alloc->in(0) != NULL, "alloc must have control");
852 ld->set_req(0, ld_alloc->in(0));
853 }
854 } else {
855 Node* addp = in(MemNode::Address)->clone();
856 assert(addp->in(AddPNode::Base) == addp->in(AddPNode::Address), "should be");
857 addp->set_req(AddPNode::Base, ac->in(ArrayCopyNode::Src));
858 addp->set_req(AddPNode::Address, ac->in(ArrayCopyNode::Src));
859
860 const TypeAryPtr* ary_t = phase->type(in(MemNode::Address))->isa_aryptr();
861 BasicType ary_elem = ary_t->klass()->as_array_klass()->element_type()->basic_type();
862 uint header = arrayOopDesc::base_offset_in_bytes(ary_elem);
863 uint shift = exact_log2(type2aelembytes(ary_elem));
864
865 Node* diff = phase->transform(new SubINode(ac->in(ArrayCopyNode::SrcPos), ac->in(ArrayCopyNode::DestPos)));
866 #ifdef _LP64
867 diff = phase->transform(new ConvI2LNode(diff));
868 #endif
869 diff = phase->transform(new LShiftXNode(diff, phase->intcon(shift)));
870
871 Node* offset = phase->transform(new AddXNode(addp->in(AddPNode::Offset), diff));
872 addp->set_req(AddPNode::Offset, offset);
873 ld->set_req(MemNode::Address, phase->transform(addp));
874
875 if (in(0) != NULL) {
876 assert(ac->in(0) != NULL, "alloc must have control");
877 ld->set_req(0, ac->in(0));
878 }
879 }
880 // load depends on the tests that validate the arraycopy
881 ld->as_Load()->_depends_only_on_test = Pinned;
882 return ld;
883 }
884 return NULL;
885 }
886
887
888 //---------------------------can_see_stored_value------------------------------
889 // This routine exists to make sure this set of tests is done the same
890 // everywhere. We need to make a coordinated change: first LoadNode::Ideal
891 // will change the graph shape in a way which makes memory alive twice at the
892 // same time (uses the Oracle model of aliasing), then some
893 // LoadXNode::Identity will fold things back to the equivalence-class model
894 // of aliasing.
895 Node* MemNode::can_see_stored_value(Node* st, PhaseTransform* phase) const {
896 Node* ld_adr = in(MemNode::Address);
897 intptr_t ld_off = 0;
898 AllocateNode* ld_alloc = AllocateNode::Ideal_allocation(ld_adr, phase, ld_off);
899 const TypeInstPtr* tp = phase->type(ld_adr)->isa_instptr();
900 Compile::AliasType* atp = (tp != NULL) ? phase->C->alias_type(tp) : NULL;
901 // This is more general than load from boxing objects.
902 if (skip_through_membars(atp, tp, phase->C->eliminate_boxing())) {
|