1532 st = StoreDNode::make_atomic(ctl, mem, adr, adr_type, val, mo);
1533 } else {
1534 st = StoreNode::make(_gvn, ctl, mem, adr, adr_type, val, bt, mo);
1535 }
1536 if (unaligned) {
1537 st->as_Store()->set_unaligned_access();
1538 }
1539 if (mismatched) {
1540 st->as_Store()->set_mismatched_access();
1541 }
1542 st = _gvn.transform(st);
1543 set_memory(st, adr_idx);
1544 // Back-to-back stores can only remove intermediate store with DU info
1545 // so push on worklist for optimizer.
1546 if (mem->req() > MemNode::Address && adr == mem->in(MemNode::Address))
1547 record_for_igvn(st);
1548
1549 return st;
1550 }
1551
1552
1553 void GraphKit::pre_barrier(bool do_load,
1554 Node* ctl,
1555 Node* obj,
1556 Node* adr,
1557 uint adr_idx,
1558 Node* val,
1559 const TypeOopPtr* val_type,
1560 Node* pre_val,
1561 BasicType bt) {
1562
1563 BarrierSet* bs = BarrierSet::barrier_set();
1564 set_control(ctl);
1565 switch (bs->kind()) {
1566
1567 #if INCLUDE_G1GC
1568 case BarrierSet::G1BarrierSet:
1569 g1_write_barrier_pre(do_load, obj, adr, adr_idx, val, val_type, pre_val, bt);
1570 break;
1571 #endif
1572
1573 case BarrierSet::CardTableBarrierSet:
1574 break;
1575
1576 default :
1577 ShouldNotReachHere();
1578
1579 }
1580 }
1581
1582 bool GraphKit::can_move_pre_barrier() const {
1583 BarrierSet* bs = BarrierSet::barrier_set();
1584 switch (bs->kind()) {
1585
1586 #if INCLUDE_G1GC
1587 case BarrierSet::G1BarrierSet:
1588 return true; // Can move it if no safepoint
1589 #endif
1590
1591 case BarrierSet::CardTableBarrierSet:
1592 return true; // There is no pre-barrier
1593
1594 default :
1595 ShouldNotReachHere();
1596 }
1597 return false;
1598 }
1599
1600 void GraphKit::post_barrier(Node* ctl,
1601 Node* store,
1602 Node* obj,
1603 Node* adr,
1604 uint adr_idx,
1605 Node* val,
1606 BasicType bt,
1607 bool use_precise) {
1608 BarrierSet* bs = BarrierSet::barrier_set();
1609 set_control(ctl);
1610 switch (bs->kind()) {
1611 #if INCLUDE_G1GC
1612 case BarrierSet::G1BarrierSet:
1613 g1_write_barrier_post(store, obj, adr, adr_idx, val, bt, use_precise);
1614 break;
1615 #endif
1616
1617 case BarrierSet::CardTableBarrierSet:
1618 write_barrier_post(store, obj, adr, adr_idx, val, use_precise);
1619 break;
1620
1621 default :
1622 ShouldNotReachHere();
1623
1624 }
1625 }
1626
1627 Node* GraphKit::store_oop(Node* ctl,
1628 Node* obj,
1629 Node* adr,
1630 const TypePtr* adr_type,
1631 Node* val,
1632 const TypeOopPtr* val_type,
1633 BasicType bt,
1634 bool use_precise,
1635 MemNode::MemOrd mo,
1636 bool mismatched) {
1637 // Transformation of a value which could be NULL pointer (CastPP #NULL)
1638 // could be delayed during Parse (for example, in adjust_map_after_if()).
1639 // Execute transformation here to avoid barrier generation in such case.
1640 if (_gvn.type(val) == TypePtr::NULL_PTR)
1641 val = _gvn.makecon(TypePtr::NULL_PTR);
1642
1643 set_control(ctl);
1644 if (stopped()) return top(); // Dead path ?
1645
1646 assert(bt == T_OBJECT, "sanity");
1647 assert(val != NULL, "not dead path");
1648 uint adr_idx = C->get_alias_index(adr_type);
1649 assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" );
1650
1651 pre_barrier(true /* do_load */,
1652 control(), obj, adr, adr_idx, val, val_type,
1653 NULL /* pre_val */,
1654 bt);
1655
1656 Node* store = store_to_memory(control(), adr, val, bt, adr_idx, mo, mismatched);
1657 post_barrier(control(), store, obj, adr, adr_idx, val, bt, use_precise);
1658 return store;
1659 }
1660
1661 // Could be an array or object we don't know at compile time (unsafe ref.)
1662 Node* GraphKit::store_oop_to_unknown(Node* ctl,
1663 Node* obj, // containing obj
1664 Node* adr, // actual adress to store val at
1665 const TypePtr* adr_type,
1666 Node* val,
1667 BasicType bt,
1668 MemNode::MemOrd mo,
1669 bool mismatched) {
1670 Compile::AliasType* at = C->alias_type(adr_type);
1671 const TypeOopPtr* val_type = NULL;
1672 if (adr_type->isa_instptr()) {
1673 if (at->field() != NULL) {
1674 // known field. This code is a copy of the do_put_xxx logic.
1675 ciField* field = at->field();
1676 if (!field->type()->is_loaded()) {
1677 val_type = TypeInstPtr::BOTTOM;
1678 } else {
1679 val_type = TypeOopPtr::make_from_klass(field->type()->as_klass());
1680 }
1681 }
1682 } else if (adr_type->isa_aryptr()) {
1683 val_type = adr_type->is_aryptr()->elem()->make_oopptr();
1684 }
1685 if (val_type == NULL) {
1686 val_type = TypeInstPtr::BOTTOM;
1687 }
1688 return store_oop(ctl, obj, adr, adr_type, val, val_type, bt, true, mo, mismatched);
1689 }
1690
1691
1692 //-------------------------array_element_address-------------------------
1693 Node* GraphKit::array_element_address(Node* ary, Node* idx, BasicType elembt,
1694 const TypeInt* sizetype, Node* ctrl) {
1695 uint shift = exact_log2(type2aelembytes(elembt));
1696 uint header = arrayOopDesc::base_offset_in_bytes(elembt);
1697
1698 // short-circuit a common case (saves lots of confusing waste motion)
1699 jint idx_con = find_int_con(idx, -1);
1700 if (idx_con >= 0) {
1701 intptr_t offset = header + ((intptr_t)idx_con << shift);
1702 return basic_plus_adr(ary, offset);
1703 }
1704
1705 // must be correct type for alignment purposes
1706 Node* base = basic_plus_adr(ary, header);
1707 idx = Compile::conv_I2X_index(&_gvn, idx, sizetype, ctrl);
1708 Node* scale = _gvn.transform( new LShiftXNode(idx, intcon(shift)) );
1709 return basic_plus_adr(ary, base, scale);
|
1526 st = StoreDNode::make_atomic(ctl, mem, adr, adr_type, val, mo);
1527 } else {
1528 st = StoreNode::make(_gvn, ctl, mem, adr, adr_type, val, bt, mo);
1529 }
1530 if (unaligned) {
1531 st->as_Store()->set_unaligned_access();
1532 }
1533 if (mismatched) {
1534 st->as_Store()->set_mismatched_access();
1535 }
1536 st = _gvn.transform(st);
1537 set_memory(st, adr_idx);
1538 // Back-to-back stores can only remove intermediate store with DU info
1539 // so push on worklist for optimizer.
1540 if (mem->req() > MemNode::Address && adr == mem->in(MemNode::Address))
1541 record_for_igvn(st);
1542
1543 return st;
1544 }
1545
1546 Node* GraphKit::access_store_at(Node* ctl,
1547 Node* obj,
1548 Node* adr,
1549 const TypePtr* adr_type,
1550 Node* val,
1551 const Type* val_type,
1552 BasicType bt,
1553 DecoratorSet decorators) {
1554 // Transformation of a value which could be NULL pointer (CastPP #NULL)
1555 // could be delayed during Parse (for example, in adjust_map_after_if()).
1556 // Execute transformation here to avoid barrier generation in such case.
1557 if (_gvn.type(val) == TypePtr::NULL_PTR) {
1558 val = _gvn.makecon(TypePtr::NULL_PTR);
1559 }
1560
1561 set_control(ctl);
1562 if (stopped()) {
1563 return top(); // Dead path ?
1564 }
1565
1566 assert(val != NULL, "not dead path");
1567
1568 C2AccessValuePtr addr(adr, adr_type);
1569 C2AccessValue value(val, val_type);
1570 C2Access access(this, decorators | C2_WRITE_ACCESS, bt, obj, addr);
1571 if (access.is_raw()) {
1572 return _barrier_set->BarrierSetC2::store_at(access, value);
1573 } else {
1574 return _barrier_set->store_at(access, value);
1575 }
1576 }
1577
1578 Node* GraphKit::access_load_at(Node* obj, // containing obj
1579 Node* adr, // actual adress to store val at
1580 const TypePtr* adr_type,
1581 const Type* val_type,
1582 BasicType bt,
1583 DecoratorSet decorators) {
1584 if (stopped()) {
1585 return top(); // Dead path ?
1586 }
1587
1588 C2AccessValuePtr addr(adr, adr_type);
1589 C2Access access(this, decorators | C2_READ_ACCESS, bt, obj, addr);
1590 if (access.is_raw()) {
1591 return _barrier_set->BarrierSetC2::load_at(access, val_type);
1592 } else {
1593 return _barrier_set->load_at(access, val_type);
1594 }
1595 }
1596
1597 Node* GraphKit::access_atomic_cmpxchg_val_at(Node* ctl,
1598 Node* obj,
1599 Node* adr,
1600 const TypePtr* adr_type,
1601 int alias_idx,
1602 Node* expected_val,
1603 Node* new_val,
1604 const Type* value_type,
1605 BasicType bt,
1606 DecoratorSet decorators) {
1607 set_control(ctl);
1608 C2AccessValuePtr addr(adr, adr_type);
1609 C2AtomicAccess access(this, decorators | C2_READ_ACCESS | C2_WRITE_ACCESS,
1610 bt, obj, addr, alias_idx);
1611 if (access.is_raw()) {
1612 return _barrier_set->BarrierSetC2::atomic_cmpxchg_val_at(access, expected_val, new_val, value_type);
1613 } else {
1614 return _barrier_set->atomic_cmpxchg_val_at(access, expected_val, new_val, value_type);
1615 }
1616 }
1617
1618 Node* GraphKit::access_atomic_cmpxchg_bool_at(Node* ctl,
1619 Node* obj,
1620 Node* adr,
1621 const TypePtr* adr_type,
1622 int alias_idx,
1623 Node* expected_val,
1624 Node* new_val,
1625 const Type* value_type,
1626 BasicType bt,
1627 DecoratorSet decorators) {
1628 set_control(ctl);
1629 C2AccessValuePtr addr(adr, adr_type);
1630 C2AtomicAccess access(this, decorators | C2_READ_ACCESS | C2_WRITE_ACCESS,
1631 bt, obj, addr, alias_idx);
1632 if (access.is_raw()) {
1633 return _barrier_set->BarrierSetC2::atomic_cmpxchg_bool_at(access, expected_val, new_val, value_type);
1634 } else {
1635 return _barrier_set->atomic_cmpxchg_bool_at(access, expected_val, new_val, value_type);
1636 }
1637 }
1638
1639 Node* GraphKit::access_atomic_xchg_at(Node* ctl,
1640 Node* obj,
1641 Node* adr,
1642 const TypePtr* adr_type,
1643 int alias_idx,
1644 Node* new_val,
1645 const Type* value_type,
1646 BasicType bt,
1647 DecoratorSet decorators) {
1648 set_control(ctl);
1649 C2AccessValuePtr addr(adr, adr_type);
1650 C2AtomicAccess access(this, decorators | C2_READ_ACCESS | C2_WRITE_ACCESS,
1651 bt, obj, addr, alias_idx);
1652 if (access.is_raw()) {
1653 return _barrier_set->BarrierSetC2::atomic_xchg_at(access, new_val, value_type);
1654 } else {
1655 return _barrier_set->atomic_xchg_at(access, new_val, value_type);
1656 }
1657 }
1658
1659 Node* GraphKit::access_atomic_add_at(Node* ctl,
1660 Node* obj,
1661 Node* adr,
1662 const TypePtr* adr_type,
1663 int alias_idx,
1664 Node* new_val,
1665 const Type* value_type,
1666 BasicType bt,
1667 DecoratorSet decorators) {
1668 set_control(ctl);
1669 C2AccessValuePtr addr(adr, adr_type);
1670 C2AtomicAccess access(this, decorators | C2_READ_ACCESS | C2_WRITE_ACCESS, bt, obj, addr, alias_idx);
1671 if (access.is_raw()) {
1672 return _barrier_set->BarrierSetC2::atomic_add_at(access, new_val, value_type);
1673 } else {
1674 return _barrier_set->atomic_add_at(access, new_val, value_type);
1675 }
1676 }
1677
1678 void GraphKit::access_clone(Node* ctl, Node* src, Node* dst, Node* size, bool is_array) {
1679 set_control(ctl);
1680 return _barrier_set->clone(this, src, dst, size, is_array);
1681 }
1682
1683 //-------------------------array_element_address-------------------------
1684 Node* GraphKit::array_element_address(Node* ary, Node* idx, BasicType elembt,
1685 const TypeInt* sizetype, Node* ctrl) {
1686 uint shift = exact_log2(type2aelembytes(elembt));
1687 uint header = arrayOopDesc::base_offset_in_bytes(elembt);
1688
1689 // short-circuit a common case (saves lots of confusing waste motion)
1690 jint idx_con = find_int_con(idx, -1);
1691 if (idx_con >= 0) {
1692 intptr_t offset = header + ((intptr_t)idx_con << shift);
1693 return basic_plus_adr(ary, offset);
1694 }
1695
1696 // must be correct type for alignment purposes
1697 Node* base = basic_plus_adr(ary, header);
1698 idx = Compile::conv_I2X_index(&_gvn, idx, sizetype, ctrl);
1699 Node* scale = _gvn.transform( new LShiftXNode(idx, intcon(shift)) );
1700 return basic_plus_adr(ary, base, scale);
|
3799 C->add_predicate_opaq(opq);
3800 {
3801 PreserveJVMState pjvms(this);
3802 set_control(iffalse);
3803 inc_sp(nargs);
3804 uncommon_trap(reason, Deoptimization::Action_maybe_recompile);
3805 }
3806 Node* iftrue = _gvn.transform(new IfTrueNode(iff));
3807 set_control(iftrue);
3808 }
3809
3810 //------------------------------add_predicate---------------------------------
3811 void GraphKit::add_predicate(int nargs) {
3812 if (UseLoopPredicate) {
3813 add_predicate_impl(Deoptimization::Reason_predicate, nargs);
3814 }
3815 // loop's limit check predicate should be near the loop.
3816 add_predicate_impl(Deoptimization::Reason_loop_limit_check, nargs);
3817 }
3818
3819 //----------------------------- store barriers ----------------------------
3820 #define __ ideal.
3821
3822 bool GraphKit::use_ReduceInitialCardMarks() {
3823 BarrierSet *bs = BarrierSet::barrier_set();
3824 return bs->is_a(BarrierSet::CardTableBarrierSet)
3825 && barrier_set_cast<CardTableBarrierSet>(bs)->can_elide_tlab_store_barriers()
3826 && ReduceInitialCardMarks;
3827 }
3828
3829 void GraphKit::sync_kit(IdealKit& ideal) {
3830 set_all_memory(__ merged_memory());
3831 set_i_o(__ i_o());
3832 set_control(__ ctrl());
3833 }
3834
3835 void GraphKit::final_sync(IdealKit& ideal) {
3836 // Final sync IdealKit and graphKit.
3837 sync_kit(ideal);
3838 }
3839
3840 Node* GraphKit::byte_map_base_node() {
3841 // Get base of card map
3842 jbyte* card_table_base = ci_card_table_address();
3843 if (card_table_base != NULL) {
3844 return makecon(TypeRawPtr::make((address)card_table_base));
3845 } else {
3846 return null();
3847 }
3848 }
3849
3850 // vanilla/CMS post barrier
3851 // Insert a write-barrier store. This is to let generational GC work; we have
3852 // to flag all oop-stores before the next GC point.
3853 void GraphKit::write_barrier_post(Node* oop_store,
3854 Node* obj,
3855 Node* adr,
3856 uint adr_idx,
3857 Node* val,
3858 bool use_precise) {
3859 // No store check needed if we're storing a NULL or an old object
3860 // (latter case is probably a string constant). The concurrent
3861 // mark sweep garbage collector, however, needs to have all nonNull
3862 // oop updates flagged via card-marks.
3863 if (val != NULL && val->is_Con()) {
3864 // must be either an oop or NULL
3865 const Type* t = val->bottom_type();
3866 if (t == TypePtr::NULL_PTR || t == Type::TOP)
3867 // stores of null never (?) need barriers
3868 return;
3869 }
3870
3871 if (use_ReduceInitialCardMarks()
3872 && obj == just_allocated_object(control())) {
3873 // We can skip marks on a freshly-allocated object in Eden.
3874 // Keep this code in sync with new_deferred_store_barrier() in runtime.cpp.
3875 // That routine informs GC to take appropriate compensating steps,
3876 // upon a slow-path allocation, so as to make this card-mark
3877 // elision safe.
3878 return;
3879 }
3880
3881 if (!use_precise) {
3882 // All card marks for a (non-array) instance are in one place:
3883 adr = obj;
3884 }
3885 // (Else it's an array (or unknown), and we want more precise card marks.)
3886 assert(adr != NULL, "");
3887
3888 IdealKit ideal(this, true);
3889
3890 // Convert the pointer to an int prior to doing math on it
3891 Node* cast = __ CastPX(__ ctrl(), adr);
3892
3893 // Divide by card size
3894 assert(BarrierSet::barrier_set()->is_a(BarrierSet::CardTableBarrierSet),
3895 "Only one we handle so far.");
3896 Node* card_offset = __ URShiftX( cast, __ ConI(CardTable::card_shift) );
3897
3898 // Combine card table base and card offset
3899 Node* card_adr = __ AddP(__ top(), byte_map_base_node(), card_offset );
3900
3901 // Get the alias_index for raw card-mark memory
3902 int adr_type = Compile::AliasIdxRaw;
3903 Node* zero = __ ConI(0); // Dirty card value
3904 BasicType bt = T_BYTE;
3905
3906 if (UseConcMarkSweepGC && UseCondCardMark) {
3907 insert_mem_bar(Op_MemBarVolatile); // StoreLoad barrier
3908 __ sync_kit(this);
3909 }
3910
3911 if (UseCondCardMark) {
3912 // The classic GC reference write barrier is typically implemented
3913 // as a store into the global card mark table. Unfortunately
3914 // unconditional stores can result in false sharing and excessive
3915 // coherence traffic as well as false transactional aborts.
3916 // UseCondCardMark enables MP "polite" conditional card mark
3917 // stores. In theory we could relax the load from ctrl() to
3918 // no_ctrl, but that doesn't buy much latitude.
3919 Node* card_val = __ load( __ ctrl(), card_adr, TypeInt::BYTE, bt, adr_type);
3920 __ if_then(card_val, BoolTest::ne, zero);
3921 }
3922
3923 // Smash zero into card
3924 if( !UseConcMarkSweepGC ) {
3925 __ store(__ ctrl(), card_adr, zero, bt, adr_type, MemNode::unordered);
3926 } else {
3927 // Specialized path for CM store barrier
3928 __ storeCM(__ ctrl(), card_adr, zero, oop_store, adr_idx, bt, adr_type);
3929 }
3930
3931 if (UseCondCardMark) {
3932 __ end_if();
3933 }
3934
3935 // Final sync IdealKit and GraphKit.
3936 final_sync(ideal);
3937 }
3938
3939 #if INCLUDE_G1GC
3940
3941 /*
3942 * Determine if the G1 pre-barrier can be removed. The pre-barrier is
3943 * required by SATB to make sure all objects live at the start of the
3944 * marking are kept alive, all reference updates need to any previous
3945 * reference stored before writing.
3946 *
3947 * If the previous value is NULL there is no need to save the old value.
3948 * References that are NULL are filtered during runtime by the barrier
3949 * code to avoid unnecessary queuing.
3950 *
3951 * However in the case of newly allocated objects it might be possible to
3952 * prove that the reference about to be overwritten is NULL during compile
3953 * time and avoid adding the barrier code completely.
3954 *
3955 * The compiler needs to determine that the object in which a field is about
3956 * to be written is newly allocated, and that no prior store to the same field
3957 * has happened since the allocation.
3958 *
3959 * Returns true if the pre-barrier can be removed
3960 */
3961 bool GraphKit::g1_can_remove_pre_barrier(PhaseTransform* phase, Node* adr,
3962 BasicType bt, uint adr_idx) {
3963 intptr_t offset = 0;
3964 Node* base = AddPNode::Ideal_base_and_offset(adr, phase, offset);
3965 AllocateNode* alloc = AllocateNode::Ideal_allocation(base, phase);
3966
3967 if (offset == Type::OffsetBot) {
3968 return false; // cannot unalias unless there are precise offsets
3969 }
3970
3971 if (alloc == NULL) {
3972 return false; // No allocation found
3973 }
3974
3975 intptr_t size_in_bytes = type2aelembytes(bt);
3976
3977 Node* mem = memory(adr_idx); // start searching here...
3978
3979 for (int cnt = 0; cnt < 50; cnt++) {
3980
3981 if (mem->is_Store()) {
3982
3983 Node* st_adr = mem->in(MemNode::Address);
3984 intptr_t st_offset = 0;
3985 Node* st_base = AddPNode::Ideal_base_and_offset(st_adr, phase, st_offset);
3986
3987 if (st_base == NULL) {
3988 break; // inscrutable pointer
3989 }
3990
3991 // Break we have found a store with same base and offset as ours so break
3992 if (st_base == base && st_offset == offset) {
3993 break;
3994 }
3995
3996 if (st_offset != offset && st_offset != Type::OffsetBot) {
3997 const int MAX_STORE = BytesPerLong;
3998 if (st_offset >= offset + size_in_bytes ||
3999 st_offset <= offset - MAX_STORE ||
4000 st_offset <= offset - mem->as_Store()->memory_size()) {
4001 // Success: The offsets are provably independent.
4002 // (You may ask, why not just test st_offset != offset and be done?
4003 // The answer is that stores of different sizes can co-exist
4004 // in the same sequence of RawMem effects. We sometimes initialize
4005 // a whole 'tile' of array elements with a single jint or jlong.)
4006 mem = mem->in(MemNode::Memory);
4007 continue; // advance through independent store memory
4008 }
4009 }
4010
4011 if (st_base != base
4012 && MemNode::detect_ptr_independence(base, alloc, st_base,
4013 AllocateNode::Ideal_allocation(st_base, phase),
4014 phase)) {
4015 // Success: The bases are provably independent.
4016 mem = mem->in(MemNode::Memory);
4017 continue; // advance through independent store memory
4018 }
4019 } else if (mem->is_Proj() && mem->in(0)->is_Initialize()) {
4020
4021 InitializeNode* st_init = mem->in(0)->as_Initialize();
4022 AllocateNode* st_alloc = st_init->allocation();
4023
4024 // Make sure that we are looking at the same allocation site.
4025 // The alloc variable is guaranteed to not be null here from earlier check.
4026 if (alloc == st_alloc) {
4027 // Check that the initialization is storing NULL so that no previous store
4028 // has been moved up and directly write a reference
4029 Node* captured_store = st_init->find_captured_store(offset,
4030 type2aelembytes(T_OBJECT),
4031 phase);
4032 if (captured_store == NULL || captured_store == st_init->zero_memory()) {
4033 return true;
4034 }
4035 }
4036 }
4037
4038 // Unless there is an explicit 'continue', we must bail out here,
4039 // because 'mem' is an inscrutable memory state (e.g., a call).
4040 break;
4041 }
4042
4043 return false;
4044 }
4045
4046 // G1 pre/post barriers
4047 void GraphKit::g1_write_barrier_pre(bool do_load,
4048 Node* obj,
4049 Node* adr,
4050 uint alias_idx,
4051 Node* val,
4052 const TypeOopPtr* val_type,
4053 Node* pre_val,
4054 BasicType bt) {
4055
4056 // Some sanity checks
4057 // Note: val is unused in this routine.
4058
4059 if (do_load) {
4060 // We need to generate the load of the previous value
4061 assert(obj != NULL, "must have a base");
4062 assert(adr != NULL, "where are loading from?");
4063 assert(pre_val == NULL, "loaded already?");
4064 assert(val_type != NULL, "need a type");
4065
4066 if (use_ReduceInitialCardMarks()
4067 && g1_can_remove_pre_barrier(&_gvn, adr, bt, alias_idx)) {
4068 return;
4069 }
4070
4071 } else {
4072 // In this case both val_type and alias_idx are unused.
4073 assert(pre_val != NULL, "must be loaded already");
4074 // Nothing to be done if pre_val is null.
4075 if (pre_val->bottom_type() == TypePtr::NULL_PTR) return;
4076 assert(pre_val->bottom_type()->basic_type() == T_OBJECT, "or we shouldn't be here");
4077 }
4078 assert(bt == T_OBJECT, "or we shouldn't be here");
4079
4080 IdealKit ideal(this, true);
4081
4082 Node* tls = __ thread(); // ThreadLocalStorage
4083
4084 Node* no_ctrl = NULL;
4085 Node* no_base = __ top();
4086 Node* zero = __ ConI(0);
4087 Node* zeroX = __ ConX(0);
4088
4089 float likely = PROB_LIKELY(0.999);
4090 float unlikely = PROB_UNLIKELY(0.999);
4091
4092 BasicType active_type = in_bytes(SATBMarkQueue::byte_width_of_active()) == 4 ? T_INT : T_BYTE;
4093 assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 4 || in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "flag width");
4094
4095 // Offsets into the thread
4096 const int marking_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset());
4097 const int index_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset());
4098 const int buffer_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset());
4099
4100 // Now the actual pointers into the thread
4101 Node* marking_adr = __ AddP(no_base, tls, __ ConX(marking_offset));
4102 Node* buffer_adr = __ AddP(no_base, tls, __ ConX(buffer_offset));
4103 Node* index_adr = __ AddP(no_base, tls, __ ConX(index_offset));
4104
4105 // Now some of the values
4106 Node* marking = __ load(__ ctrl(), marking_adr, TypeInt::INT, active_type, Compile::AliasIdxRaw);
4107
4108 // if (!marking)
4109 __ if_then(marking, BoolTest::ne, zero, unlikely); {
4110 BasicType index_bt = TypeX_X->basic_type();
4111 assert(sizeof(size_t) == type2aelembytes(index_bt), "Loading G1 SATBMarkQueue::_index with wrong size.");
4112 Node* index = __ load(__ ctrl(), index_adr, TypeX_X, index_bt, Compile::AliasIdxRaw);
4113
4114 if (do_load) {
4115 // load original value
4116 // alias_idx correct??
4117 pre_val = __ load(__ ctrl(), adr, val_type, bt, alias_idx);
4118 }
4119
4120 // if (pre_val != NULL)
4121 __ if_then(pre_val, BoolTest::ne, null()); {
4122 Node* buffer = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw);
4123
4124 // is the queue for this thread full?
4125 __ if_then(index, BoolTest::ne, zeroX, likely); {
4126
4127 // decrement the index
4128 Node* next_index = _gvn.transform(new SubXNode(index, __ ConX(sizeof(intptr_t))));
4129
4130 // Now get the buffer location we will log the previous value into and store it
4131 Node *log_addr = __ AddP(no_base, buffer, next_index);
4132 __ store(__ ctrl(), log_addr, pre_val, T_OBJECT, Compile::AliasIdxRaw, MemNode::unordered);
4133 // update the index
4134 __ store(__ ctrl(), index_adr, next_index, index_bt, Compile::AliasIdxRaw, MemNode::unordered);
4135
4136 } __ else_(); {
4137
4138 // logging buffer is full, call the runtime
4139 const TypeFunc *tf = OptoRuntime::g1_wb_pre_Type();
4140 __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), "g1_wb_pre", pre_val, tls);
4141 } __ end_if(); // (!index)
4142 } __ end_if(); // (pre_val != NULL)
4143 } __ end_if(); // (!marking)
4144
4145 // Final sync IdealKit and GraphKit.
4146 final_sync(ideal);
4147 }
4148
4149 /*
4150 * G1 similar to any GC with a Young Generation requires a way to keep track of
4151 * references from Old Generation to Young Generation to make sure all live
4152 * objects are found. G1 also requires to keep track of object references
4153 * between different regions to enable evacuation of old regions, which is done
4154 * as part of mixed collections. References are tracked in remembered sets and
4155 * is continuously updated as reference are written to with the help of the
4156 * post-barrier.
4157 *
4158 * To reduce the number of updates to the remembered set the post-barrier
4159 * filters updates to fields in objects located in the Young Generation,
4160 * the same region as the reference, when the NULL is being written or
4161 * if the card is already marked as dirty by an earlier write.
4162 *
4163 * Under certain circumstances it is possible to avoid generating the
4164 * post-barrier completely if it is possible during compile time to prove
4165 * the object is newly allocated and that no safepoint exists between the
4166 * allocation and the store.
4167 *
4168 * In the case of slow allocation the allocation code must handle the barrier
4169 * as part of the allocation in the case the allocated object is not located
4170 * in the nursery, this would happen for humongous objects. This is similar to
4171 * how CMS is required to handle this case, see the comments for the method
4172 * CardTableBarrierSet::on_allocation_slowpath_exit and OptoRuntime::new_deferred_store_barrier.
4173 * A deferred card mark is required for these objects and handled in the above
4174 * mentioned methods.
4175 *
4176 * Returns true if the post barrier can be removed
4177 */
4178 bool GraphKit::g1_can_remove_post_barrier(PhaseTransform* phase, Node* store,
4179 Node* adr) {
4180 intptr_t offset = 0;
4181 Node* base = AddPNode::Ideal_base_and_offset(adr, phase, offset);
4182 AllocateNode* alloc = AllocateNode::Ideal_allocation(base, phase);
4183
4184 if (offset == Type::OffsetBot) {
4185 return false; // cannot unalias unless there are precise offsets
4186 }
4187
4188 if (alloc == NULL) {
4189 return false; // No allocation found
4190 }
4191
4192 // Start search from Store node
4193 Node* mem = store->in(MemNode::Control);
4194 if (mem->is_Proj() && mem->in(0)->is_Initialize()) {
4195
4196 InitializeNode* st_init = mem->in(0)->as_Initialize();
4197 AllocateNode* st_alloc = st_init->allocation();
4198
4199 // Make sure we are looking at the same allocation
4200 if (alloc == st_alloc) {
4201 return true;
4202 }
4203 }
4204
4205 return false;
4206 }
4207
4208 //
4209 // Update the card table and add card address to the queue
4210 //
4211 void GraphKit::g1_mark_card(IdealKit& ideal,
4212 Node* card_adr,
4213 Node* oop_store,
4214 uint oop_alias_idx,
4215 Node* index,
4216 Node* index_adr,
4217 Node* buffer,
4218 const TypeFunc* tf) {
4219
4220 Node* zero = __ ConI(0);
4221 Node* zeroX = __ ConX(0);
4222 Node* no_base = __ top();
4223 BasicType card_bt = T_BYTE;
4224 // Smash zero into card. MUST BE ORDERED WRT TO STORE
4225 __ storeCM(__ ctrl(), card_adr, zero, oop_store, oop_alias_idx, card_bt, Compile::AliasIdxRaw);
4226
4227 // Now do the queue work
4228 __ if_then(index, BoolTest::ne, zeroX); {
4229
4230 Node* next_index = _gvn.transform(new SubXNode(index, __ ConX(sizeof(intptr_t))));
4231 Node* log_addr = __ AddP(no_base, buffer, next_index);
4232
4233 // Order, see storeCM.
4234 __ store(__ ctrl(), log_addr, card_adr, T_ADDRESS, Compile::AliasIdxRaw, MemNode::unordered);
4235 __ store(__ ctrl(), index_adr, next_index, TypeX_X->basic_type(), Compile::AliasIdxRaw, MemNode::unordered);
4236
4237 } __ else_(); {
4238 __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), "g1_wb_post", card_adr, __ thread());
4239 } __ end_if();
4240
4241 }
4242
4243 void GraphKit::g1_write_barrier_post(Node* oop_store,
4244 Node* obj,
4245 Node* adr,
4246 uint alias_idx,
4247 Node* val,
4248 BasicType bt,
4249 bool use_precise) {
4250 // If we are writing a NULL then we need no post barrier
4251
4252 if (val != NULL && val->is_Con() && val->bottom_type() == TypePtr::NULL_PTR) {
4253 // Must be NULL
4254 const Type* t = val->bottom_type();
4255 assert(t == Type::TOP || t == TypePtr::NULL_PTR, "must be NULL");
4256 // No post barrier if writing NULLx
4257 return;
4258 }
4259
4260 if (use_ReduceInitialCardMarks() && obj == just_allocated_object(control())) {
4261 // We can skip marks on a freshly-allocated object in Eden.
4262 // Keep this code in sync with new_deferred_store_barrier() in runtime.cpp.
4263 // That routine informs GC to take appropriate compensating steps,
4264 // upon a slow-path allocation, so as to make this card-mark
4265 // elision safe.
4266 return;
4267 }
4268
4269 if (use_ReduceInitialCardMarks()
4270 && g1_can_remove_post_barrier(&_gvn, oop_store, adr)) {
4271 return;
4272 }
4273
4274 if (!use_precise) {
4275 // All card marks for a (non-array) instance are in one place:
4276 adr = obj;
4277 }
4278 // (Else it's an array (or unknown), and we want more precise card marks.)
4279 assert(adr != NULL, "");
4280
4281 IdealKit ideal(this, true);
4282
4283 Node* tls = __ thread(); // ThreadLocalStorage
4284
4285 Node* no_base = __ top();
4286 float likely = PROB_LIKELY(0.999);
4287 float unlikely = PROB_UNLIKELY(0.999);
4288 Node* young_card = __ ConI((jint)G1CardTable::g1_young_card_val());
4289 Node* dirty_card = __ ConI((jint)CardTable::dirty_card_val());
4290 Node* zeroX = __ ConX(0);
4291
4292 // Get the alias_index for raw card-mark memory
4293 const TypePtr* card_type = TypeRawPtr::BOTTOM;
4294
4295 const TypeFunc *tf = OptoRuntime::g1_wb_post_Type();
4296
4297 // Offsets into the thread
4298 const int index_offset = in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset());
4299 const int buffer_offset = in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset());
4300
4301 // Pointers into the thread
4302
4303 Node* buffer_adr = __ AddP(no_base, tls, __ ConX(buffer_offset));
4304 Node* index_adr = __ AddP(no_base, tls, __ ConX(index_offset));
4305
4306 // Now some values
4307 // Use ctrl to avoid hoisting these values past a safepoint, which could
4308 // potentially reset these fields in the JavaThread.
4309 Node* index = __ load(__ ctrl(), index_adr, TypeX_X, TypeX_X->basic_type(), Compile::AliasIdxRaw);
4310 Node* buffer = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw);
4311
4312 // Convert the store obj pointer to an int prior to doing math on it
4313 // Must use ctrl to prevent "integerized oop" existing across safepoint
4314 Node* cast = __ CastPX(__ ctrl(), adr);
4315
4316 // Divide pointer by card size
4317 Node* card_offset = __ URShiftX( cast, __ ConI(CardTable::card_shift) );
4318
4319 // Combine card table base and card offset
4320 Node* card_adr = __ AddP(no_base, byte_map_base_node(), card_offset );
4321
4322 // If we know the value being stored does it cross regions?
4323
4324 if (val != NULL) {
4325 // Does the store cause us to cross regions?
4326
4327 // Should be able to do an unsigned compare of region_size instead of
4328 // and extra shift. Do we have an unsigned compare??
4329 // Node* region_size = __ ConI(1 << HeapRegion::LogOfHRGrainBytes);
4330 Node* xor_res = __ URShiftX ( __ XorX( cast, __ CastPX(__ ctrl(), val)), __ ConI(HeapRegion::LogOfHRGrainBytes));
4331
4332 // if (xor_res == 0) same region so skip
4333 __ if_then(xor_res, BoolTest::ne, zeroX); {
4334
4335 // No barrier if we are storing a NULL
4336 __ if_then(val, BoolTest::ne, null(), unlikely); {
4337
4338 // Ok must mark the card if not already dirty
4339
4340 // load the original value of the card
4341 Node* card_val = __ load(__ ctrl(), card_adr, TypeInt::INT, T_BYTE, Compile::AliasIdxRaw);
4342
4343 __ if_then(card_val, BoolTest::ne, young_card); {
4344 sync_kit(ideal);
4345 // Use Op_MemBarVolatile to achieve the effect of a StoreLoad barrier.
4346 insert_mem_bar(Op_MemBarVolatile, oop_store);
4347 __ sync_kit(this);
4348
4349 Node* card_val_reload = __ load(__ ctrl(), card_adr, TypeInt::INT, T_BYTE, Compile::AliasIdxRaw);
4350 __ if_then(card_val_reload, BoolTest::ne, dirty_card); {
4351 g1_mark_card(ideal, card_adr, oop_store, alias_idx, index, index_adr, buffer, tf);
4352 } __ end_if();
4353 } __ end_if();
4354 } __ end_if();
4355 } __ end_if();
4356 } else {
4357 // The Object.clone() intrinsic uses this path if !ReduceInitialCardMarks.
4358 // We don't need a barrier here if the destination is a newly allocated object
4359 // in Eden. Otherwise, GC verification breaks because we assume that cards in Eden
4360 // are set to 'g1_young_gen' (see G1CardTable::verify_g1_young_region()).
4361 assert(!use_ReduceInitialCardMarks(), "can only happen with card marking");
4362 Node* card_val = __ load(__ ctrl(), card_adr, TypeInt::INT, T_BYTE, Compile::AliasIdxRaw);
4363 __ if_then(card_val, BoolTest::ne, young_card); {
4364 g1_mark_card(ideal, card_adr, oop_store, alias_idx, index, index_adr, buffer, tf);
4365 } __ end_if();
4366 }
4367
4368 // Final sync IdealKit and GraphKit.
4369 final_sync(ideal);
4370 }
4371 #undef __
4372
4373 #endif // INCLUDE_G1GC
4374
4375 Node* GraphKit::load_String_length(Node* ctrl, Node* str) {
4376 Node* len = load_array_length(load_String_value(ctrl, str));
4377 Node* coder = load_String_coder(ctrl, str);
4378 // Divide length by 2 if coder is UTF16
4379 return _gvn.transform(new RShiftINode(len, coder));
4380 }
4381
4382 Node* GraphKit::load_String_value(Node* ctrl, Node* str) {
4383 int value_offset = java_lang_String::value_offset_in_bytes();
4384 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4385 false, NULL, 0);
4386 const TypePtr* value_field_type = string_type->add_offset(value_offset);
4387 const TypeAryPtr* value_type = TypeAryPtr::make(TypePtr::NotNull,
4388 TypeAry::make(TypeInt::BYTE, TypeInt::POS),
4389 ciTypeArrayKlass::make(T_BYTE), true, 0);
4390 int value_field_idx = C->get_alias_index(value_field_type);
4391 Node* load = make_load(ctrl, basic_plus_adr(str, str, value_offset),
4392 value_type, T_OBJECT, value_field_idx, MemNode::unordered);
4393 // String.value field is known to be @Stable.
4394 if (UseImplicitStableValues) {
4395 load = cast_array_to_stable(load, value_type);
4396 }
4397 return load;
4398 }
4399
4400 Node* GraphKit::load_String_coder(Node* ctrl, Node* str) {
4401 if (!CompactStrings) {
4402 return intcon(java_lang_String::CODER_UTF16);
4403 }
4404 int coder_offset = java_lang_String::coder_offset_in_bytes();
4405 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4406 false, NULL, 0);
4407 const TypePtr* coder_field_type = string_type->add_offset(coder_offset);
4408 int coder_field_idx = C->get_alias_index(coder_field_type);
4409 return make_load(ctrl, basic_plus_adr(str, str, coder_offset),
4410 TypeInt::BYTE, T_BYTE, coder_field_idx, MemNode::unordered);
4411 }
4412
4413 void GraphKit::store_String_value(Node* ctrl, Node* str, Node* value) {
4414 int value_offset = java_lang_String::value_offset_in_bytes();
4415 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4416 false, NULL, 0);
4417 const TypePtr* value_field_type = string_type->add_offset(value_offset);
4418 store_oop_to_object(ctrl, str, basic_plus_adr(str, value_offset), value_field_type,
4419 value, TypeAryPtr::BYTES, T_OBJECT, MemNode::unordered);
4420 }
4421
4422 void GraphKit::store_String_coder(Node* ctrl, Node* str, Node* value) {
4423 int coder_offset = java_lang_String::coder_offset_in_bytes();
4424 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4425 false, NULL, 0);
4426 const TypePtr* coder_field_type = string_type->add_offset(coder_offset);
4427 int coder_field_idx = C->get_alias_index(coder_field_type);
4428 store_to_memory(ctrl, basic_plus_adr(str, coder_offset),
4429 value, T_BYTE, coder_field_idx, MemNode::unordered);
4430 }
4431
4432 // Capture src and dst memory state with a MergeMemNode
4433 Node* GraphKit::capture_memory(const TypePtr* src_type, const TypePtr* dst_type) {
4434 if (src_type == dst_type) {
4435 // Types are equal, we don't need a MergeMemNode
4436 return memory(src_type);
4437 }
4438 MergeMemNode* merge = MergeMemNode::make(map()->memory());
|
3790 C->add_predicate_opaq(opq);
3791 {
3792 PreserveJVMState pjvms(this);
3793 set_control(iffalse);
3794 inc_sp(nargs);
3795 uncommon_trap(reason, Deoptimization::Action_maybe_recompile);
3796 }
3797 Node* iftrue = _gvn.transform(new IfTrueNode(iff));
3798 set_control(iftrue);
3799 }
3800
3801 //------------------------------add_predicate---------------------------------
3802 void GraphKit::add_predicate(int nargs) {
3803 if (UseLoopPredicate) {
3804 add_predicate_impl(Deoptimization::Reason_predicate, nargs);
3805 }
3806 // loop's limit check predicate should be near the loop.
3807 add_predicate_impl(Deoptimization::Reason_loop_limit_check, nargs);
3808 }
3809
3810 void GraphKit::sync_kit(IdealKit& ideal) {
3811 set_all_memory(ideal.merged_memory());
3812 set_i_o(ideal.i_o());
3813 set_control(ideal.ctrl());
3814 }
3815
3816 void GraphKit::final_sync(IdealKit& ideal) {
3817 // Final sync IdealKit and graphKit.
3818 sync_kit(ideal);
3819 }
3820
3821 Node* GraphKit::load_String_length(Node* ctrl, Node* str) {
3822 Node* len = load_array_length(load_String_value(ctrl, str));
3823 Node* coder = load_String_coder(ctrl, str);
3824 // Divide length by 2 if coder is UTF16
3825 return _gvn.transform(new RShiftINode(len, coder));
3826 }
3827
3828 Node* GraphKit::load_String_value(Node* ctrl, Node* str) {
3829 int value_offset = java_lang_String::value_offset_in_bytes();
3830 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
3831 false, NULL, 0);
3832 const TypePtr* value_field_type = string_type->add_offset(value_offset);
3833 const TypeAryPtr* value_type = TypeAryPtr::make(TypePtr::NotNull,
3834 TypeAry::make(TypeInt::BYTE, TypeInt::POS),
3835 ciTypeArrayKlass::make(T_BYTE), true, 0);
3836 Node* p = basic_plus_adr(str, str, value_offset);
3837 Node* load = access_load_at(str, p, value_field_type, value_type, T_OBJECT,
3838 IN_HEAP | C2_CONTROL_DEPENDENT_LOAD);
3839 // String.value field is known to be @Stable.
3840 if (UseImplicitStableValues) {
3841 load = cast_array_to_stable(load, value_type);
3842 }
3843 return load;
3844 }
3845
3846 Node* GraphKit::load_String_coder(Node* ctrl, Node* str) {
3847 if (!CompactStrings) {
3848 return intcon(java_lang_String::CODER_UTF16);
3849 }
3850 int coder_offset = java_lang_String::coder_offset_in_bytes();
3851 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
3852 false, NULL, 0);
3853 const TypePtr* coder_field_type = string_type->add_offset(coder_offset);
3854 int coder_field_idx = C->get_alias_index(coder_field_type);
3855 return make_load(ctrl, basic_plus_adr(str, str, coder_offset),
3856 TypeInt::BYTE, T_BYTE, coder_field_idx, MemNode::unordered);
3857 }
3858
3859 void GraphKit::store_String_value(Node* ctrl, Node* str, Node* value) {
3860 int value_offset = java_lang_String::value_offset_in_bytes();
3861 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
3862 false, NULL, 0);
3863 const TypePtr* value_field_type = string_type->add_offset(value_offset);
3864 access_store_at(ctrl, str, basic_plus_adr(str, value_offset), value_field_type,
3865 value, TypeAryPtr::BYTES, T_OBJECT, IN_HEAP);
3866 }
3867
3868 void GraphKit::store_String_coder(Node* ctrl, Node* str, Node* value) {
3869 int coder_offset = java_lang_String::coder_offset_in_bytes();
3870 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
3871 false, NULL, 0);
3872 const TypePtr* coder_field_type = string_type->add_offset(coder_offset);
3873 int coder_field_idx = C->get_alias_index(coder_field_type);
3874 store_to_memory(ctrl, basic_plus_adr(str, coder_offset),
3875 value, T_BYTE, coder_field_idx, MemNode::unordered);
3876 }
3877
3878 // Capture src and dst memory state with a MergeMemNode
3879 Node* GraphKit::capture_memory(const TypePtr* src_type, const TypePtr* dst_type) {
3880 if (src_type == dst_type) {
3881 // Types are equal, we don't need a MergeMemNode
3882 return memory(src_type);
3883 }
3884 MergeMemNode* merge = MergeMemNode::make(map()->memory());
|