< prev index next >

src/hotspot/share/gc/shenandoah/c2/shenandoahBarrierSetC2.cpp

Print this page
rev 56771 : 8233339: Shenandoah: Centralize load barrier decisions into ShenandoahBarrierSet


 518     val.set_node(value);
 519     shenandoah_write_barrier_pre(kit, true /* do_load */, /*kit->control(),*/ access.base(), adr, adr_idx, val.node(),
 520                                  static_cast<const TypeOopPtr*>(val.type()), NULL /* pre_val */, access.type());
 521   } else {
 522     assert(access.is_opt_access(), "only for optimization passes");
 523     assert(((decorators & C2_TIGHTLY_COUPLED_ALLOC) != 0 || !ShenandoahSATBBarrier) && (decorators & C2_ARRAY_COPY) != 0, "unexpected caller of this code");
 524     C2OptAccess& opt_access = static_cast<C2OptAccess&>(access);
 525     PhaseGVN& gvn =  opt_access.gvn();
 526     MergeMemNode* mm = opt_access.mem();
 527 
 528     if (ShenandoahStoreValEnqueueBarrier) {
 529       Node* enqueue = gvn.transform(new ShenandoahEnqueueBarrierNode(val.node()));
 530       val.set_node(enqueue);
 531     }
 532   }
 533   return BarrierSetC2::store_at_resolved(access, val);
 534 }
 535 
 536 Node* ShenandoahBarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) const {
 537   DecoratorSet decorators = access.decorators();




 538 
 539   Node* adr = access.addr().node();
 540   Node* obj = access.base();
 541 
 542   bool mismatched = (decorators & C2_MISMATCHED) != 0;
 543   bool unknown = (decorators & ON_UNKNOWN_OOP_REF) != 0;
 544   bool on_heap = (decorators & IN_HEAP) != 0;
 545   bool on_weak_ref = (decorators & (ON_WEAK_OOP_REF | ON_PHANTOM_OOP_REF)) != 0;
 546   bool is_unordered = (decorators & MO_UNORDERED) != 0;
 547   bool need_cpu_mem_bar = !is_unordered || mismatched || !on_heap;
 548   bool is_traversal_mode = ShenandoahHeap::heap()->is_traversal_mode();
 549   bool keep_alive = (decorators & AS_NO_KEEPALIVE) == 0 || is_traversal_mode;
 550   bool in_native = (decorators & IN_NATIVE) != 0;
 551 
 552   Node* top = Compile::current()->top();
 553 
 554   Node* offset = adr->is_AddP() ? adr->in(AddPNode::Offset) : top;
 555   Node* load = BarrierSetC2::load_at_resolved(access, val_type);



 556 
 557   if (access.is_oop()) {
 558     if (ShenandoahLoadRefBarrier) {
 559       load = new ShenandoahLoadReferenceBarrierNode(NULL, load, in_native && !is_traversal_mode);
 560       if (access.is_parse_access()) {
 561         load = static_cast<C2ParseAccess &>(access).kit()->gvn().transform(load);
 562       } else {
 563         load = static_cast<C2OptAccess &>(access).gvn().transform(load);
 564       }
 565     }
 566   }
 567 



 568   // If we are reading the value of the referent field of a Reference
 569   // object (either by using Unsafe directly or through reflection)
 570   // then, if SATB is enabled, we need to record the referent in an
 571   // SATB log buffer using the pre-barrier mechanism.
 572   // Also we need to add memory barrier to prevent commoning reads
 573   // from this field across safepoint since GC can change its value.
 574   bool need_read_barrier = ShenandoahKeepAliveBarrier &&
 575     (on_weak_ref || (unknown && offset != top && obj != top));
 576 
 577   if (!access.is_oop() || !need_read_barrier) {
 578     return load;
 579   }
 580 
 581   assert(access.is_parse_access(), "entry not supported at optimization time");
 582   C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access);
 583   GraphKit* kit = parse_access.kit();
 584 
 585   if (on_weak_ref && keep_alive) {
 586     // Use the pre-barrier to record the value in the referent field
 587     satb_write_barrier_pre(kit, false /* do_load */,
 588                            NULL /* obj */, NULL /* adr */, max_juint /* alias_idx */, NULL /* val */, NULL /* val_type */,
 589                            load /* pre_val */, T_OBJECT);
 590     // Add memory barrier to prevent commoning reads from this field
 591     // across safepoint since GC can change its value.
 592     kit->insert_mem_bar(Op_MemBarCPUOrder);
 593   } else if (unknown) {
 594     // We do not require a mem bar inside pre_barrier if need_mem_bar
 595     // is set: the barriers would be emitted by us.
 596     insert_pre_barrier(kit, obj, offset, load, !need_cpu_mem_bar);
 597   }
 598 
 599   return load;
 600 }
 601 
 602 Node* ShenandoahBarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
 603                                                    Node* new_val, const Type* value_type) const {
 604   GraphKit* kit = access.kit();
 605   if (access.is_oop()) {




 518     val.set_node(value);
 519     shenandoah_write_barrier_pre(kit, true /* do_load */, /*kit->control(),*/ access.base(), adr, adr_idx, val.node(),
 520                                  static_cast<const TypeOopPtr*>(val.type()), NULL /* pre_val */, access.type());
 521   } else {
 522     assert(access.is_opt_access(), "only for optimization passes");
 523     assert(((decorators & C2_TIGHTLY_COUPLED_ALLOC) != 0 || !ShenandoahSATBBarrier) && (decorators & C2_ARRAY_COPY) != 0, "unexpected caller of this code");
 524     C2OptAccess& opt_access = static_cast<C2OptAccess&>(access);
 525     PhaseGVN& gvn =  opt_access.gvn();
 526     MergeMemNode* mm = opt_access.mem();
 527 
 528     if (ShenandoahStoreValEnqueueBarrier) {
 529       Node* enqueue = gvn.transform(new ShenandoahEnqueueBarrierNode(val.node()));
 530       val.set_node(enqueue);
 531     }
 532   }
 533   return BarrierSetC2::store_at_resolved(access, val);
 534 }
 535 
 536 Node* ShenandoahBarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) const {
 537   DecoratorSet decorators = access.decorators();
 538   BasicType type = access.type();
 539 
 540   assert((decorators & AS_RAW) == 0, "Unexpected decorator");
 541   assert((decorators & AS_NO_KEEPALIVE) == 0, "Unexpected decorator");
 542 
 543   Node* adr = access.addr().node();
 544   Node* obj = access.base();
 545 
 546   bool mismatched = (decorators & C2_MISMATCHED) != 0;
 547   bool unknown = (decorators & ON_UNKNOWN_OOP_REF) != 0;
 548   bool on_heap = (decorators & IN_HEAP) != 0;
 549   bool on_weak_ref = (decorators & (ON_WEAK_OOP_REF | ON_PHANTOM_OOP_REF)) != 0;
 550   bool is_unordered = (decorators & MO_UNORDERED) != 0;
 551   bool need_cpu_mem_bar = !is_unordered || mismatched || !on_heap;



 552 
 553   Node* top = Compile::current()->top();
 554 
 555   Node* offset = adr->is_AddP() ? adr->in(AddPNode::Offset) : top;
 556   Node* load = BarrierSetC2::load_at_resolved(access, val_type);
 557   if (!ShenandoahBarrierSet::need_load_reference_barrier(decorators, type)) {
 558     return load;
 559   }
 560 
 561   load = new ShenandoahLoadReferenceBarrierNode(NULL, load, ShenandoahBarrierSet::use_native_load_reference_barrier(decorators, type));


 562   if (access.is_parse_access()) {
 563     load = static_cast<C2ParseAccess &>(access).kit()->gvn().transform(load);
 564   } else {
 565     load = static_cast<C2OptAccess &>(access).gvn().transform(load);
 566   }


 567 
 568   if (!ShenandoahBarrierSet::need_keep_alive_barrier(decorators, type)) {
 569     return load;
 570   }
 571   // If we are reading the value of the referent field of a Reference
 572   // object (either by using Unsafe directly or through reflection)
 573   // then, if SATB is enabled, we need to record the referent in an
 574   // SATB log buffer using the pre-barrier mechanism.
 575   // Also we need to add memory barrier to prevent commoning reads
 576   // from this field across safepoint since GC can change its value.
 577   if (unknown && (offset == top || obj == top)) {



 578     return load;
 579   }
 580 
 581   assert(access.is_parse_access(), "entry not supported at optimization time");
 582   C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access);
 583   GraphKit* kit = parse_access.kit();
 584 
 585   if (on_weak_ref) {
 586     // Use the pre-barrier to record the value in the referent field
 587     satb_write_barrier_pre(kit, false /* do_load */,
 588                            NULL /* obj */, NULL /* adr */, max_juint /* alias_idx */, NULL /* val */, NULL /* val_type */,
 589                            load /* pre_val */, T_OBJECT);
 590     // Add memory barrier to prevent commoning reads from this field
 591     // across safepoint since GC can change its value.
 592     kit->insert_mem_bar(Op_MemBarCPUOrder);
 593   } else if (unknown) {
 594     // We do not require a mem bar inside pre_barrier if need_mem_bar
 595     // is set: the barriers would be emitted by us.
 596     insert_pre_barrier(kit, obj, offset, load, !need_cpu_mem_bar);
 597   }
 598 
 599   return load;
 600 }
 601 
 602 Node* ShenandoahBarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
 603                                                    Node* new_val, const Type* value_type) const {
 604   GraphKit* kit = access.kit();
 605   if (access.is_oop()) {


< prev index next >