< prev index next >

src/hotspot/share/gc/shenandoah/c2/shenandoahBarrierSetC2.cpp

Print this page
rev 56771 : 8233401: Shenandoah: Refactor/cleanup Shenandoah load barrier code


   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "gc/shared/barrierSet.hpp"

  26 #include "gc/shenandoah/shenandoahForwarding.hpp"
  27 #include "gc/shenandoah/shenandoahHeap.hpp"
  28 #include "gc/shenandoah/shenandoahHeuristics.hpp"
  29 #include "gc/shenandoah/shenandoahRuntime.hpp"
  30 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
  31 #include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp"
  32 #include "gc/shenandoah/c2/shenandoahSupport.hpp"
  33 #include "opto/arraycopynode.hpp"
  34 #include "opto/escape.hpp"
  35 #include "opto/graphKit.hpp"
  36 #include "opto/idealKit.hpp"
  37 #include "opto/macro.hpp"
  38 #include "opto/movenode.hpp"
  39 #include "opto/narrowptrnode.hpp"
  40 #include "opto/rootnode.hpp"
  41 #include "opto/runtime.hpp"
  42 
  43 ShenandoahBarrierSetC2* ShenandoahBarrierSetC2::bsc2() {
  44   return reinterpret_cast<ShenandoahBarrierSetC2*>(BarrierSet::barrier_set()->barrier_set_c2());
  45 }


 517     value = shenandoah_storeval_barrier(kit, value);
 518     val.set_node(value);
 519     shenandoah_write_barrier_pre(kit, true /* do_load */, /*kit->control(),*/ access.base(), adr, adr_idx, val.node(),
 520                                  static_cast<const TypeOopPtr*>(val.type()), NULL /* pre_val */, access.type());
 521   } else {
 522     assert(access.is_opt_access(), "only for optimization passes");
 523     assert(((decorators & C2_TIGHTLY_COUPLED_ALLOC) != 0 || !ShenandoahSATBBarrier) && (decorators & C2_ARRAY_COPY) != 0, "unexpected caller of this code");
 524     C2OptAccess& opt_access = static_cast<C2OptAccess&>(access);
 525     PhaseGVN& gvn =  opt_access.gvn();
 526     MergeMemNode* mm = opt_access.mem();
 527 
 528     if (ShenandoahStoreValEnqueueBarrier) {
 529       Node* enqueue = gvn.transform(new ShenandoahEnqueueBarrierNode(val.node()));
 530       val.set_node(enqueue);
 531     }
 532   }
 533   return BarrierSetC2::store_at_resolved(access, val);
 534 }
 535 
 536 Node* ShenandoahBarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) const {
 537   DecoratorSet decorators = access.decorators();
 538 
 539   Node* adr = access.addr().node();
 540   Node* obj = access.base();


 541 
 542   bool mismatched = (decorators & C2_MISMATCHED) != 0;
 543   bool unknown = (decorators & ON_UNKNOWN_OOP_REF) != 0;
 544   bool on_heap = (decorators & IN_HEAP) != 0;
 545   bool on_weak_ref = (decorators & (ON_WEAK_OOP_REF | ON_PHANTOM_OOP_REF)) != 0;
 546   bool is_unordered = (decorators & MO_UNORDERED) != 0;
 547   bool need_cpu_mem_bar = !is_unordered || mismatched || !on_heap;
 548   bool is_traversal_mode = ShenandoahHeap::heap()->is_traversal_mode();
 549   bool keep_alive = (decorators & AS_NO_KEEPALIVE) == 0 || is_traversal_mode;
 550   bool in_native = (decorators & IN_NATIVE) != 0;
 551 
 552   Node* top = Compile::current()->top();
 553 
 554   Node* offset = adr->is_AddP() ? adr->in(AddPNode::Offset) : top;
 555   Node* load = BarrierSetC2::load_at_resolved(access, val_type);
 556 
 557   if (access.is_oop()) {
 558     if (ShenandoahLoadRefBarrier) {
 559       load = new ShenandoahLoadReferenceBarrierNode(NULL, load, in_native && !is_traversal_mode);


 560       if (access.is_parse_access()) {
 561         load = static_cast<C2ParseAccess &>(access).kit()->gvn().transform(load);
 562       } else {
 563         load = static_cast<C2OptAccess &>(access).gvn().transform(load);
 564       }
 565     }
 566   }











 567 
 568   // If we are reading the value of the referent field of a Reference
 569   // object (either by using Unsafe directly or through reflection)
 570   // then, if SATB is enabled, we need to record the referent in an
 571   // SATB log buffer using the pre-barrier mechanism.
 572   // Also we need to add memory barrier to prevent commoning reads
 573   // from this field across safepoint since GC can change its value.
 574   bool need_read_barrier = ShenandoahKeepAliveBarrier &&
 575     (on_weak_ref || (unknown && offset != top && obj != top));
 576 
 577   if (!access.is_oop() || !need_read_barrier) {
 578     return load;
 579   }
 580 
 581   assert(access.is_parse_access(), "entry not supported at optimization time");
 582   C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access);
 583   GraphKit* kit = parse_access.kit();



 584 
 585   if (on_weak_ref && keep_alive) {
 586     // Use the pre-barrier to record the value in the referent field
 587     satb_write_barrier_pre(kit, false /* do_load */,
 588                            NULL /* obj */, NULL /* adr */, max_juint /* alias_idx */, NULL /* val */, NULL /* val_type */,
 589                            load /* pre_val */, T_OBJECT);
 590     // Add memory barrier to prevent commoning reads from this field
 591     // across safepoint since GC can change its value.
 592     kit->insert_mem_bar(Op_MemBarCPUOrder);
 593   } else if (unknown) {
 594     // We do not require a mem bar inside pre_barrier if need_mem_bar
 595     // is set: the barriers would be emitted by us.
 596     insert_pre_barrier(kit, obj, offset, load, !need_cpu_mem_bar);

 597   }
 598 
 599   return load;
 600 }
 601 
 602 Node* ShenandoahBarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
 603                                                    Node* new_val, const Type* value_type) const {
 604   GraphKit* kit = access.kit();
 605   if (access.is_oop()) {
 606     new_val = shenandoah_storeval_barrier(kit, new_val);
 607     shenandoah_write_barrier_pre(kit, false /* do_load */,
 608                                  NULL, NULL, max_juint, NULL, NULL,
 609                                  expected_val /* pre_val */, T_OBJECT);
 610 
 611     MemNode::MemOrd mo = access.mem_node_mo();
 612     Node* mem = access.memory();
 613     Node* adr = access.addr().node();
 614     const TypePtr* adr_type = access.addr().type();
 615     Node* load_store = NULL;
 616 




   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "gc/shared/barrierSet.hpp"
  26 #include "gc/shenandoah/shenandoahConcurrentRoots.hpp"
  27 #include "gc/shenandoah/shenandoahForwarding.hpp"
  28 #include "gc/shenandoah/shenandoahHeap.hpp"
  29 #include "gc/shenandoah/shenandoahHeuristics.hpp"
  30 #include "gc/shenandoah/shenandoahRuntime.hpp"
  31 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
  32 #include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp"
  33 #include "gc/shenandoah/c2/shenandoahSupport.hpp"
  34 #include "opto/arraycopynode.hpp"
  35 #include "opto/escape.hpp"
  36 #include "opto/graphKit.hpp"
  37 #include "opto/idealKit.hpp"
  38 #include "opto/macro.hpp"
  39 #include "opto/movenode.hpp"
  40 #include "opto/narrowptrnode.hpp"
  41 #include "opto/rootnode.hpp"
  42 #include "opto/runtime.hpp"
  43 
  44 ShenandoahBarrierSetC2* ShenandoahBarrierSetC2::bsc2() {
  45   return reinterpret_cast<ShenandoahBarrierSetC2*>(BarrierSet::barrier_set()->barrier_set_c2());
  46 }


 518     value = shenandoah_storeval_barrier(kit, value);
 519     val.set_node(value);
 520     shenandoah_write_barrier_pre(kit, true /* do_load */, /*kit->control(),*/ access.base(), adr, adr_idx, val.node(),
 521                                  static_cast<const TypeOopPtr*>(val.type()), NULL /* pre_val */, access.type());
 522   } else {
 523     assert(access.is_opt_access(), "only for optimization passes");
 524     assert(((decorators & C2_TIGHTLY_COUPLED_ALLOC) != 0 || !ShenandoahSATBBarrier) && (decorators & C2_ARRAY_COPY) != 0, "unexpected caller of this code");
 525     C2OptAccess& opt_access = static_cast<C2OptAccess&>(access);
 526     PhaseGVN& gvn =  opt_access.gvn();
 527     MergeMemNode* mm = opt_access.mem();
 528 
 529     if (ShenandoahStoreValEnqueueBarrier) {
 530       Node* enqueue = gvn.transform(new ShenandoahEnqueueBarrierNode(val.node()));
 531       val.set_node(enqueue);
 532     }
 533   }
 534   return BarrierSetC2::store_at_resolved(access, val);
 535 }
 536 
 537 Node* ShenandoahBarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) const {
 538   // 1: load reference
 539   Node* load = BarrierSetC2::load_at_resolved(access, val_type);
 540   // For none-reference load, no additional barrier is needed
 541   if (!access.is_oop()) {
 542     return load;
 543   }
 544 
 545   DecoratorSet decorators = access.decorators();







 546   bool in_native = (decorators & IN_NATIVE) != 0;
 547 
 548   // 2: apply LRB if ShenandoahLoadRefBarrier is set





 549   if (ShenandoahLoadRefBarrier) {
 550     // Native barrier is for concurrent root processing
 551     bool use_native_barrier = in_native && ShenandoahConcurrentRoots::can_do_concurrent_roots();
 552     load = new ShenandoahLoadReferenceBarrierNode(NULL, load, use_native_barrier);
 553     if (access.is_parse_access()) {
 554       load = static_cast<C2ParseAccess &>(access).kit()->gvn().transform(load);
 555     } else {
 556       load = static_cast<C2OptAccess &>(access).gvn().transform(load);
 557     }
 558   }
 559 
 560   // 3: apply keep-alive barrier if ShenandoahKeepAliveBarrier is set
 561   if (ShenandoahKeepAliveBarrier) {
 562     Node* top = Compile::current()->top();
 563     Node* adr = access.addr().node();
 564     Node* offset = adr->is_AddP() ? adr->in(AddPNode::Offset) : top;
 565     Node* obj = access.base();
 566 
 567     bool unknown = (decorators & ON_UNKNOWN_OOP_REF) != 0;
 568     bool on_weak_ref = (decorators & (ON_WEAK_OOP_REF | ON_PHANTOM_OOP_REF)) != 0;
 569     bool is_traversal_mode = ShenandoahHeap::heap()->is_traversal_mode();
 570     bool keep_alive = (decorators & AS_NO_KEEPALIVE) == 0 || is_traversal_mode;
 571 
 572     // If we are reading the value of the referent field of a Reference
 573     // object (either by using Unsafe directly or through reflection)
 574     // then, if SATB is enabled, we need to record the referent in an
 575     // SATB log buffer using the pre-barrier mechanism.
 576     // Also we need to add memory barrier to prevent commoning reads
 577     // from this field across safepoint since GC can change its value.
 578     if (!on_weak_ref || (unknown && (offset == top || obj == top)) || !keep_alive) {



 579       return load;
 580     }
 581 
 582     assert(access.is_parse_access(), "entry not supported at optimization time");
 583     C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access);
 584     GraphKit* kit = parse_access.kit();
 585     bool mismatched = (decorators & C2_MISMATCHED) != 0;
 586     bool is_unordered = (decorators & MO_UNORDERED) != 0;
 587     bool need_cpu_mem_bar = !is_unordered || mismatched || in_native;
 588 
 589     if (on_weak_ref) {
 590       // Use the pre-barrier to record the value in the referent field
 591       satb_write_barrier_pre(kit, false /* do_load */,
 592                              NULL /* obj */, NULL /* adr */, max_juint /* alias_idx */, NULL /* val */, NULL /* val_type */,
 593                              load /* pre_val */, T_OBJECT);
 594       // Add memory barrier to prevent commoning reads from this field
 595       // across safepoint since GC can change its value.
 596       kit->insert_mem_bar(Op_MemBarCPUOrder);
 597     } else if (unknown) {
 598       // We do not require a mem bar inside pre_barrier if need_mem_bar
 599       // is set: the barriers would be emitted by us.
 600       insert_pre_barrier(kit, obj, offset, load, !need_cpu_mem_bar);
 601     }
 602   }
 603 
 604   return load;
 605 }
 606 
 607 Node* ShenandoahBarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
 608                                                    Node* new_val, const Type* value_type) const {
 609   GraphKit* kit = access.kit();
 610   if (access.is_oop()) {
 611     new_val = shenandoah_storeval_barrier(kit, new_val);
 612     shenandoah_write_barrier_pre(kit, false /* do_load */,
 613                                  NULL, NULL, max_juint, NULL, NULL,
 614                                  expected_val /* pre_val */, T_OBJECT);
 615 
 616     MemNode::MemOrd mo = access.mem_node_mo();
 617     Node* mem = access.memory();
 618     Node* adr = access.addr().node();
 619     const TypePtr* adr_type = access.addr().type();
 620     Node* load_store = NULL;
 621 


< prev index next >