1545 set_memory(st, adr_idx);
1546 // Back-to-back stores can only remove intermediate store with DU info
1547 // so push on worklist for optimizer.
1548 if (mem->req() > MemNode::Address && adr == mem->in(MemNode::Address))
1549 record_for_igvn(st);
1550
1551 return st;
1552 }
1553
1554
1555 void GraphKit::pre_barrier(bool do_load,
1556 Node* ctl,
1557 Node* obj,
1558 Node* adr,
1559 uint adr_idx,
1560 Node* val,
1561 const TypeOopPtr* val_type,
1562 Node* pre_val,
1563 BasicType bt) {
1564
1565 BarrierSet* bs = Universe::heap()->barrier_set();
1566 set_control(ctl);
1567 switch (bs->kind()) {
1568 case BarrierSet::G1BarrierSet:
1569 g1_write_barrier_pre(do_load, obj, adr, adr_idx, val, val_type, pre_val, bt);
1570 break;
1571
1572 case BarrierSet::CardTableBarrierSet:
1573 break;
1574
1575 default :
1576 ShouldNotReachHere();
1577
1578 }
1579 }
1580
1581 bool GraphKit::can_move_pre_barrier() const {
1582 BarrierSet* bs = Universe::heap()->barrier_set();
1583 switch (bs->kind()) {
1584 case BarrierSet::G1BarrierSet:
1585 return true; // Can move it if no safepoint
1586
1587 case BarrierSet::CardTableBarrierSet:
1588 return true; // There is no pre-barrier
1589
1590 default :
1591 ShouldNotReachHere();
1592 }
1593 return false;
1594 }
1595
1596 void GraphKit::post_barrier(Node* ctl,
1597 Node* store,
1598 Node* obj,
1599 Node* adr,
1600 uint adr_idx,
1601 Node* val,
1602 BasicType bt,
1603 bool use_precise) {
1604 BarrierSet* bs = Universe::heap()->barrier_set();
1605 set_control(ctl);
1606 switch (bs->kind()) {
1607 case BarrierSet::G1BarrierSet:
1608 g1_write_barrier_post(store, obj, adr, adr_idx, val, bt, use_precise);
1609 break;
1610
1611 case BarrierSet::CardTableBarrierSet:
1612 write_barrier_post(store, obj, adr, adr_idx, val, use_precise);
1613 break;
1614
1615 default :
1616 ShouldNotReachHere();
1617
1618 }
1619 }
1620
1621 Node* GraphKit::store_oop(Node* ctl,
1622 Node* obj,
1623 Node* adr,
1624 const TypePtr* adr_type,
3797 inc_sp(nargs);
3798 uncommon_trap(reason, Deoptimization::Action_maybe_recompile);
3799 }
3800 Node* iftrue = _gvn.transform(new IfTrueNode(iff));
3801 set_control(iftrue);
3802 }
3803
3804 //------------------------------add_predicate---------------------------------
3805 void GraphKit::add_predicate(int nargs) {
3806 if (UseLoopPredicate) {
3807 add_predicate_impl(Deoptimization::Reason_predicate, nargs);
3808 }
3809 // loop's limit check predicate should be near the loop.
3810 add_predicate_impl(Deoptimization::Reason_loop_limit_check, nargs);
3811 }
3812
3813 //----------------------------- store barriers ----------------------------
3814 #define __ ideal.
3815
3816 bool GraphKit::use_ReduceInitialCardMarks() {
3817 BarrierSet *bs = Universe::heap()->barrier_set();
3818 return bs->is_a(BarrierSet::CardTableBarrierSet)
3819 && barrier_set_cast<CardTableBarrierSet>(bs)->can_elide_tlab_store_barriers()
3820 && ReduceInitialCardMarks;
3821 }
3822
3823 void GraphKit::sync_kit(IdealKit& ideal) {
3824 set_all_memory(__ merged_memory());
3825 set_i_o(__ i_o());
3826 set_control(__ ctrl());
3827 }
3828
3829 void GraphKit::final_sync(IdealKit& ideal) {
3830 // Final sync IdealKit and graphKit.
3831 sync_kit(ideal);
3832 }
3833
3834 Node* GraphKit::byte_map_base_node() {
3835 // Get base of card map
3836 jbyte* card_table_base = ci_card_table_address();
3837 if (card_table_base != NULL) {
3868 // Keep this code in sync with new_deferred_store_barrier() in runtime.cpp.
3869 // That routine informs GC to take appropriate compensating steps,
3870 // upon a slow-path allocation, so as to make this card-mark
3871 // elision safe.
3872 return;
3873 }
3874
3875 if (!use_precise) {
3876 // All card marks for a (non-array) instance are in one place:
3877 adr = obj;
3878 }
3879 // (Else it's an array (or unknown), and we want more precise card marks.)
3880 assert(adr != NULL, "");
3881
3882 IdealKit ideal(this, true);
3883
3884 // Convert the pointer to an int prior to doing math on it
3885 Node* cast = __ CastPX(__ ctrl(), adr);
3886
3887 // Divide by card size
3888 assert(Universe::heap()->barrier_set()->is_a(BarrierSet::CardTableBarrierSet),
3889 "Only one we handle so far.");
3890 Node* card_offset = __ URShiftX( cast, __ ConI(CardTable::card_shift) );
3891
3892 // Combine card table base and card offset
3893 Node* card_adr = __ AddP(__ top(), byte_map_base_node(), card_offset );
3894
3895 // Get the alias_index for raw card-mark memory
3896 int adr_type = Compile::AliasIdxRaw;
3897 Node* zero = __ ConI(0); // Dirty card value
3898 BasicType bt = T_BYTE;
3899
3900 if (UseConcMarkSweepGC && UseCondCardMark) {
3901 insert_mem_bar(Op_MemBarVolatile); // StoreLoad barrier
3902 __ sync_kit(this);
3903 }
3904
3905 if (UseCondCardMark) {
3906 // The classic GC reference write barrier is typically implemented
3907 // as a store into the global card mark table. Unfortunately
3908 // unconditional stores can result in false sharing and excessive
|
1545 set_memory(st, adr_idx);
1546 // Back-to-back stores can only remove intermediate store with DU info
1547 // so push on worklist for optimizer.
1548 if (mem->req() > MemNode::Address && adr == mem->in(MemNode::Address))
1549 record_for_igvn(st);
1550
1551 return st;
1552 }
1553
1554
1555 void GraphKit::pre_barrier(bool do_load,
1556 Node* ctl,
1557 Node* obj,
1558 Node* adr,
1559 uint adr_idx,
1560 Node* val,
1561 const TypeOopPtr* val_type,
1562 Node* pre_val,
1563 BasicType bt) {
1564
1565 BarrierSet* bs = BarrierSet::barrier_set();
1566 set_control(ctl);
1567 switch (bs->kind()) {
1568 case BarrierSet::G1BarrierSet:
1569 g1_write_barrier_pre(do_load, obj, adr, adr_idx, val, val_type, pre_val, bt);
1570 break;
1571
1572 case BarrierSet::CardTableBarrierSet:
1573 break;
1574
1575 default :
1576 ShouldNotReachHere();
1577
1578 }
1579 }
1580
1581 bool GraphKit::can_move_pre_barrier() const {
1582 BarrierSet* bs = BarrierSet::barrier_set();
1583 switch (bs->kind()) {
1584 case BarrierSet::G1BarrierSet:
1585 return true; // Can move it if no safepoint
1586
1587 case BarrierSet::CardTableBarrierSet:
1588 return true; // There is no pre-barrier
1589
1590 default :
1591 ShouldNotReachHere();
1592 }
1593 return false;
1594 }
1595
1596 void GraphKit::post_barrier(Node* ctl,
1597 Node* store,
1598 Node* obj,
1599 Node* adr,
1600 uint adr_idx,
1601 Node* val,
1602 BasicType bt,
1603 bool use_precise) {
1604 BarrierSet* bs = BarrierSet::barrier_set();
1605 set_control(ctl);
1606 switch (bs->kind()) {
1607 case BarrierSet::G1BarrierSet:
1608 g1_write_barrier_post(store, obj, adr, adr_idx, val, bt, use_precise);
1609 break;
1610
1611 case BarrierSet::CardTableBarrierSet:
1612 write_barrier_post(store, obj, adr, adr_idx, val, use_precise);
1613 break;
1614
1615 default :
1616 ShouldNotReachHere();
1617
1618 }
1619 }
1620
1621 Node* GraphKit::store_oop(Node* ctl,
1622 Node* obj,
1623 Node* adr,
1624 const TypePtr* adr_type,
3797 inc_sp(nargs);
3798 uncommon_trap(reason, Deoptimization::Action_maybe_recompile);
3799 }
3800 Node* iftrue = _gvn.transform(new IfTrueNode(iff));
3801 set_control(iftrue);
3802 }
3803
3804 //------------------------------add_predicate---------------------------------
3805 void GraphKit::add_predicate(int nargs) {
3806 if (UseLoopPredicate) {
3807 add_predicate_impl(Deoptimization::Reason_predicate, nargs);
3808 }
3809 // loop's limit check predicate should be near the loop.
3810 add_predicate_impl(Deoptimization::Reason_loop_limit_check, nargs);
3811 }
3812
3813 //----------------------------- store barriers ----------------------------
3814 #define __ ideal.
3815
3816 bool GraphKit::use_ReduceInitialCardMarks() {
3817 BarrierSet *bs = BarrierSet::barrier_set();
3818 return bs->is_a(BarrierSet::CardTableBarrierSet)
3819 && barrier_set_cast<CardTableBarrierSet>(bs)->can_elide_tlab_store_barriers()
3820 && ReduceInitialCardMarks;
3821 }
3822
3823 void GraphKit::sync_kit(IdealKit& ideal) {
3824 set_all_memory(__ merged_memory());
3825 set_i_o(__ i_o());
3826 set_control(__ ctrl());
3827 }
3828
3829 void GraphKit::final_sync(IdealKit& ideal) {
3830 // Final sync IdealKit and graphKit.
3831 sync_kit(ideal);
3832 }
3833
3834 Node* GraphKit::byte_map_base_node() {
3835 // Get base of card map
3836 jbyte* card_table_base = ci_card_table_address();
3837 if (card_table_base != NULL) {
3868 // Keep this code in sync with new_deferred_store_barrier() in runtime.cpp.
3869 // That routine informs GC to take appropriate compensating steps,
3870 // upon a slow-path allocation, so as to make this card-mark
3871 // elision safe.
3872 return;
3873 }
3874
3875 if (!use_precise) {
3876 // All card marks for a (non-array) instance are in one place:
3877 adr = obj;
3878 }
3879 // (Else it's an array (or unknown), and we want more precise card marks.)
3880 assert(adr != NULL, "");
3881
3882 IdealKit ideal(this, true);
3883
3884 // Convert the pointer to an int prior to doing math on it
3885 Node* cast = __ CastPX(__ ctrl(), adr);
3886
3887 // Divide by card size
3888 assert(BarrierSet::barrier_set()->is_a(BarrierSet::CardTableBarrierSet),
3889 "Only one we handle so far.");
3890 Node* card_offset = __ URShiftX( cast, __ ConI(CardTable::card_shift) );
3891
3892 // Combine card table base and card offset
3893 Node* card_adr = __ AddP(__ top(), byte_map_base_node(), card_offset );
3894
3895 // Get the alias_index for raw card-mark memory
3896 int adr_type = Compile::AliasIdxRaw;
3897 Node* zero = __ ConI(0); // Dirty card value
3898 BasicType bt = T_BYTE;
3899
3900 if (UseConcMarkSweepGC && UseCondCardMark) {
3901 insert_mem_bar(Op_MemBarVolatile); // StoreLoad barrier
3902 __ sync_kit(this);
3903 }
3904
3905 if (UseCondCardMark) {
3906 // The classic GC reference write barrier is typically implemented
3907 // as a store into the global card mark table. Unfortunately
3908 // unconditional stores can result in false sharing and excessive
|