1511 set_memory(st, adr_idx);
1512 // Back-to-back stores can only remove intermediate store with DU info
1513 // so push on worklist for optimizer.
1514 if (mem->req() > MemNode::Address && adr == mem->in(MemNode::Address))
1515 record_for_igvn(st);
1516
1517 return st;
1518 }
1519
1520
1521 void GraphKit::pre_barrier(bool do_load,
1522 Node* ctl,
1523 Node* obj,
1524 Node* adr,
1525 uint adr_idx,
1526 Node* val,
1527 const TypeOopPtr* val_type,
1528 Node* pre_val,
1529 BasicType bt) {
1530
1531 BarrierSet* bs = Universe::heap()->barrier_set();
1532 set_control(ctl);
1533 switch (bs->kind()) {
1534 case BarrierSet::G1SATBCTLogging:
1535 g1_write_barrier_pre(do_load, obj, adr, adr_idx, val, val_type, pre_val, bt);
1536 break;
1537
1538 case BarrierSet::CardTableForRS:
1539 case BarrierSet::CardTableExtension:
1540 case BarrierSet::ModRef:
1541 break;
1542
1543 default :
1544 ShouldNotReachHere();
1545
1546 }
1547 }
1548
1549 bool GraphKit::can_move_pre_barrier() const {
1550 BarrierSet* bs = Universe::heap()->barrier_set();
1551 switch (bs->kind()) {
1552 case BarrierSet::G1SATBCTLogging:
1553 return true; // Can move it if no safepoint
1554
1555 case BarrierSet::CardTableForRS:
1556 case BarrierSet::CardTableExtension:
1557 case BarrierSet::ModRef:
1558 return true; // There is no pre-barrier
1559
1560 default :
1561 ShouldNotReachHere();
1562 }
1563 return false;
1564 }
1565
1566 void GraphKit::post_barrier(Node* ctl,
1567 Node* store,
1568 Node* obj,
1569 Node* adr,
1570 uint adr_idx,
1571 Node* val,
1572 BasicType bt,
1573 bool use_precise) {
1574 BarrierSet* bs = Universe::heap()->barrier_set();
1575 set_control(ctl);
1576 switch (bs->kind()) {
1577 case BarrierSet::G1SATBCTLogging:
1578 g1_write_barrier_post(store, obj, adr, adr_idx, val, bt, use_precise);
1579 break;
1580
1581 case BarrierSet::CardTableForRS:
1582 case BarrierSet::CardTableExtension:
1583 write_barrier_post(store, obj, adr, adr_idx, val, use_precise);
1584 break;
1585
1586 case BarrierSet::ModRef:
1587 break;
1588
1589 default :
1590 ShouldNotReachHere();
1591
1592 }
1593 }
1594
3758 add_predicate_impl(Deoptimization::Reason_loop_limit_check, nargs);
3759 }
3760
3761 //----------------------------- store barriers ----------------------------
3762 #define __ ideal.
3763
3764 void GraphKit::sync_kit(IdealKit& ideal) {
3765 set_all_memory(__ merged_memory());
3766 set_i_o(__ i_o());
3767 set_control(__ ctrl());
3768 }
3769
3770 void GraphKit::final_sync(IdealKit& ideal) {
3771 // Final sync IdealKit and graphKit.
3772 sync_kit(ideal);
3773 }
3774
3775 Node* GraphKit::byte_map_base_node() {
3776 // Get base of card map
3777 CardTableModRefBS* ct =
3778 barrier_set_cast<CardTableModRefBS>(Universe::heap()->barrier_set());
3779 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust users of this code");
3780 if (ct->byte_map_base != NULL) {
3781 return makecon(TypeRawPtr::make((address)ct->byte_map_base));
3782 } else {
3783 return null();
3784 }
3785 }
3786
3787 // vanilla/CMS post barrier
3788 // Insert a write-barrier store. This is to let generational GC work; we have
3789 // to flag all oop-stores before the next GC point.
3790 void GraphKit::write_barrier_post(Node* oop_store,
3791 Node* obj,
3792 Node* adr,
3793 uint adr_idx,
3794 Node* val,
3795 bool use_precise) {
3796 // No store check needed if we're storing a NULL or an old object
3797 // (latter case is probably a string constant). The concurrent
3798 // mark sweep garbage collector, however, needs to have all nonNull
3811 // Keep this code in sync with new_store_pre_barrier() in runtime.cpp.
3812 // That routine informs GC to take appropriate compensating steps,
3813 // upon a slow-path allocation, so as to make this card-mark
3814 // elision safe.
3815 return;
3816 }
3817
3818 if (!use_precise) {
3819 // All card marks for a (non-array) instance are in one place:
3820 adr = obj;
3821 }
3822 // (Else it's an array (or unknown), and we want more precise card marks.)
3823 assert(adr != NULL, "");
3824
3825 IdealKit ideal(this, true);
3826
3827 // Convert the pointer to an int prior to doing math on it
3828 Node* cast = __ CastPX(__ ctrl(), adr);
3829
3830 // Divide by card size
3831 assert(Universe::heap()->barrier_set()->is_a(BarrierSet::CardTableModRef),
3832 "Only one we handle so far.");
3833 Node* card_offset = __ URShiftX( cast, __ ConI(CardTableModRefBS::card_shift) );
3834
3835 // Combine card table base and card offset
3836 Node* card_adr = __ AddP(__ top(), byte_map_base_node(), card_offset );
3837
3838 // Get the alias_index for raw card-mark memory
3839 int adr_type = Compile::AliasIdxRaw;
3840 Node* zero = __ ConI(0); // Dirty card value
3841 BasicType bt = T_BYTE;
3842
3843 if (UseConcMarkSweepGC && UseCondCardMark) {
3844 insert_mem_bar(Op_MemBarVolatile); // StoreLoad barrier
3845 __ sync_kit(this);
3846 }
3847
3848 if (UseCondCardMark) {
3849 // The classic GC reference write barrier is typically implemented
3850 // as a store into the global card mark table. Unfortunately
3851 // unconditional stores can result in false sharing and excessive
|
1511 set_memory(st, adr_idx);
1512 // Back-to-back stores can only remove intermediate store with DU info
1513 // so push on worklist for optimizer.
1514 if (mem->req() > MemNode::Address && adr == mem->in(MemNode::Address))
1515 record_for_igvn(st);
1516
1517 return st;
1518 }
1519
1520
1521 void GraphKit::pre_barrier(bool do_load,
1522 Node* ctl,
1523 Node* obj,
1524 Node* adr,
1525 uint adr_idx,
1526 Node* val,
1527 const TypeOopPtr* val_type,
1528 Node* pre_val,
1529 BasicType bt) {
1530
1531 BarrierSet* bs = GC::gc()->heap()->barrier_set();
1532 set_control(ctl);
1533 switch (bs->kind()) {
1534 case BarrierSet::G1SATBCTLogging:
1535 g1_write_barrier_pre(do_load, obj, adr, adr_idx, val, val_type, pre_val, bt);
1536 break;
1537
1538 case BarrierSet::CardTableForRS:
1539 case BarrierSet::CardTableExtension:
1540 case BarrierSet::ModRef:
1541 break;
1542
1543 default :
1544 ShouldNotReachHere();
1545
1546 }
1547 }
1548
1549 bool GraphKit::can_move_pre_barrier() const {
1550 BarrierSet* bs = GC::gc()->heap()->barrier_set();
1551 switch (bs->kind()) {
1552 case BarrierSet::G1SATBCTLogging:
1553 return true; // Can move it if no safepoint
1554
1555 case BarrierSet::CardTableForRS:
1556 case BarrierSet::CardTableExtension:
1557 case BarrierSet::ModRef:
1558 return true; // There is no pre-barrier
1559
1560 default :
1561 ShouldNotReachHere();
1562 }
1563 return false;
1564 }
1565
1566 void GraphKit::post_barrier(Node* ctl,
1567 Node* store,
1568 Node* obj,
1569 Node* adr,
1570 uint adr_idx,
1571 Node* val,
1572 BasicType bt,
1573 bool use_precise) {
1574 BarrierSet* bs = GC::gc()->heap()->barrier_set();
1575 set_control(ctl);
1576 switch (bs->kind()) {
1577 case BarrierSet::G1SATBCTLogging:
1578 g1_write_barrier_post(store, obj, adr, adr_idx, val, bt, use_precise);
1579 break;
1580
1581 case BarrierSet::CardTableForRS:
1582 case BarrierSet::CardTableExtension:
1583 write_barrier_post(store, obj, adr, adr_idx, val, use_precise);
1584 break;
1585
1586 case BarrierSet::ModRef:
1587 break;
1588
1589 default :
1590 ShouldNotReachHere();
1591
1592 }
1593 }
1594
3758 add_predicate_impl(Deoptimization::Reason_loop_limit_check, nargs);
3759 }
3760
3761 //----------------------------- store barriers ----------------------------
3762 #define __ ideal.
3763
3764 void GraphKit::sync_kit(IdealKit& ideal) {
3765 set_all_memory(__ merged_memory());
3766 set_i_o(__ i_o());
3767 set_control(__ ctrl());
3768 }
3769
3770 void GraphKit::final_sync(IdealKit& ideal) {
3771 // Final sync IdealKit and graphKit.
3772 sync_kit(ideal);
3773 }
3774
3775 Node* GraphKit::byte_map_base_node() {
3776 // Get base of card map
3777 CardTableModRefBS* ct =
3778 barrier_set_cast<CardTableModRefBS>(GC::gc()->heap()->barrier_set());
3779 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust users of this code");
3780 if (ct->byte_map_base != NULL) {
3781 return makecon(TypeRawPtr::make((address)ct->byte_map_base));
3782 } else {
3783 return null();
3784 }
3785 }
3786
3787 // vanilla/CMS post barrier
3788 // Insert a write-barrier store. This is to let generational GC work; we have
3789 // to flag all oop-stores before the next GC point.
3790 void GraphKit::write_barrier_post(Node* oop_store,
3791 Node* obj,
3792 Node* adr,
3793 uint adr_idx,
3794 Node* val,
3795 bool use_precise) {
3796 // No store check needed if we're storing a NULL or an old object
3797 // (latter case is probably a string constant). The concurrent
3798 // mark sweep garbage collector, however, needs to have all nonNull
3811 // Keep this code in sync with new_store_pre_barrier() in runtime.cpp.
3812 // That routine informs GC to take appropriate compensating steps,
3813 // upon a slow-path allocation, so as to make this card-mark
3814 // elision safe.
3815 return;
3816 }
3817
3818 if (!use_precise) {
3819 // All card marks for a (non-array) instance are in one place:
3820 adr = obj;
3821 }
3822 // (Else it's an array (or unknown), and we want more precise card marks.)
3823 assert(adr != NULL, "");
3824
3825 IdealKit ideal(this, true);
3826
3827 // Convert the pointer to an int prior to doing math on it
3828 Node* cast = __ CastPX(__ ctrl(), adr);
3829
3830 // Divide by card size
3831 assert(GC::gc()->heap()->barrier_set()->is_a(BarrierSet::CardTableModRef),
3832 "Only one we handle so far.");
3833 Node* card_offset = __ URShiftX( cast, __ ConI(CardTableModRefBS::card_shift) );
3834
3835 // Combine card table base and card offset
3836 Node* card_adr = __ AddP(__ top(), byte_map_base_node(), card_offset );
3837
3838 // Get the alias_index for raw card-mark memory
3839 int adr_type = Compile::AliasIdxRaw;
3840 Node* zero = __ ConI(0); // Dirty card value
3841 BasicType bt = T_BYTE;
3842
3843 if (UseConcMarkSweepGC && UseCondCardMark) {
3844 insert_mem_bar(Op_MemBarVolatile); // StoreLoad barrier
3845 __ sync_kit(this);
3846 }
3847
3848 if (UseCondCardMark) {
3849 // The classic GC reference write barrier is typically implemented
3850 // as a store into the global card mark table. Unfortunately
3851 // unconditional stores can result in false sharing and excessive
|