7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "compiler/compileLog.hpp"
27 #include "gc/g1/g1SATBCardTableModRefBS.hpp"
28 #include "gc/g1/heapRegion.hpp"
29 #include "gc/shared/barrierSet.hpp"
30 #include "gc/shared/cardTableModRefBS.hpp"
31 #include "gc/shared/collectedHeap.hpp"
32 #include "memory/resourceArea.hpp"
33 #include "opto/addnode.hpp"
34 #include "opto/castnode.hpp"
35 #include "opto/convertnode.hpp"
36 #include "opto/graphKit.hpp"
37 #include "opto/idealKit.hpp"
38 #include "opto/intrinsicnode.hpp"
39 #include "opto/locknode.hpp"
40 #include "opto/machnode.hpp"
41 #include "opto/opaquenode.hpp"
42 #include "opto/parse.hpp"
43 #include "opto/rootnode.hpp"
44 #include "opto/runtime.hpp"
45 #include "runtime/deoptimization.hpp"
46 #include "runtime/sharedRuntime.hpp"
47
585 }
586 break;
587 }
588 if (failing()) { stop(); return; } // exception allocation might fail
589 if (ex_obj != NULL) {
590 // Cheat with a preallocated exception object.
591 if (C->log() != NULL)
592 C->log()->elem("hot_throw preallocated='1' reason='%s'",
593 Deoptimization::trap_reason_name(reason));
594 const TypeInstPtr* ex_con = TypeInstPtr::make(ex_obj);
595 Node* ex_node = _gvn.transform(ConNode::make(ex_con));
596
597 // Clear the detail message of the preallocated exception object.
598 // Weblogic sometimes mutates the detail message of exceptions
599 // using reflection.
600 int offset = java_lang_Throwable::get_detailMessage_offset();
601 const TypePtr* adr_typ = ex_con->add_offset(offset);
602
603 Node *adr = basic_plus_adr(ex_node, ex_node, offset);
604 const TypeOopPtr* val_type = TypeOopPtr::make_from_klass(env()->String_klass());
605 // Conservatively release stores of object references.
606 Node *store = store_oop_to_object(control(), ex_node, adr, adr_typ, null(), val_type, T_OBJECT, MemNode::release);
607
608 add_exception_state(make_exception_state(ex_node));
609 return;
610 }
611 }
612
613 // %%% Maybe add entry to OptoRuntime which directly throws the exc.?
614 // It won't be much cheaper than bailing to the interp., since we'll
615 // have to pass up all the debug-info, and the runtime will have to
616 // create the stack trace.
617
618 // Usual case: Bail to interpreter.
619 // Reserve the right to recompile if we haven't seen anything yet.
620
621 ciMethod* m = Deoptimization::reason_is_speculate(reason) ? C->method() : NULL;
622 Deoptimization::DeoptAction action = Deoptimization::Action_maybe_recompile;
623 if (treat_throw_as_hot
624 && (method()->method_data()->trap_recompiled_at(bci(), m)
625 || C->too_many_traps(reason))) {
626 // We cannot afford to take more traps here. Suffer in the interpreter.
1500 st = StoreDNode::make_atomic(ctl, mem, adr, adr_type, val, mo);
1501 } else {
1502 st = StoreNode::make(_gvn, ctl, mem, adr, adr_type, val, bt, mo);
1503 }
1504 if (unaligned) {
1505 st->as_Store()->set_unaligned_access();
1506 }
1507 if (mismatched) {
1508 st->as_Store()->set_mismatched_access();
1509 }
1510 st = _gvn.transform(st);
1511 set_memory(st, adr_idx);
1512 // Back-to-back stores can only remove intermediate store with DU info
1513 // so push on worklist for optimizer.
1514 if (mem->req() > MemNode::Address && adr == mem->in(MemNode::Address))
1515 record_for_igvn(st);
1516
1517 return st;
1518 }
1519
1520
1521 void GraphKit::pre_barrier(bool do_load,
1522 Node* ctl,
1523 Node* obj,
1524 Node* adr,
1525 uint adr_idx,
1526 Node* val,
1527 const TypeOopPtr* val_type,
1528 Node* pre_val,
1529 BasicType bt) {
1530
1531 BarrierSet* bs = Universe::heap()->barrier_set();
1532 set_control(ctl);
1533 switch (bs->kind()) {
1534 case BarrierSet::G1SATBCTLogging:
1535 g1_write_barrier_pre(do_load, obj, adr, adr_idx, val, val_type, pre_val, bt);
1536 break;
1537
1538 case BarrierSet::CardTableForRS:
1539 case BarrierSet::CardTableExtension:
1540 case BarrierSet::ModRef:
1541 break;
1542
1543 default :
1544 ShouldNotReachHere();
1545
1546 }
1547 }
1548
1549 bool GraphKit::can_move_pre_barrier() const {
1550 BarrierSet* bs = Universe::heap()->barrier_set();
1551 switch (bs->kind()) {
1552 case BarrierSet::G1SATBCTLogging:
1553 return true; // Can move it if no safepoint
1554
1555 case BarrierSet::CardTableForRS:
1556 case BarrierSet::CardTableExtension:
1557 case BarrierSet::ModRef:
1558 return true; // There is no pre-barrier
1559
1560 default :
1561 ShouldNotReachHere();
1562 }
1563 return false;
1564 }
1565
1566 void GraphKit::post_barrier(Node* ctl,
1567 Node* store,
1568 Node* obj,
1569 Node* adr,
1570 uint adr_idx,
1571 Node* val,
1572 BasicType bt,
1573 bool use_precise) {
1574 BarrierSet* bs = Universe::heap()->barrier_set();
1575 set_control(ctl);
1576 switch (bs->kind()) {
1577 case BarrierSet::G1SATBCTLogging:
1578 g1_write_barrier_post(store, obj, adr, adr_idx, val, bt, use_precise);
1579 break;
1580
1581 case BarrierSet::CardTableForRS:
1582 case BarrierSet::CardTableExtension:
1583 write_barrier_post(store, obj, adr, adr_idx, val, use_precise);
1584 break;
1585
1586 case BarrierSet::ModRef:
1587 break;
1588
1589 default :
1590 ShouldNotReachHere();
1591
1592 }
1593 }
1594
1595 Node* GraphKit::store_oop(Node* ctl,
1596 Node* obj,
1597 Node* adr,
1598 const TypePtr* adr_type,
1599 Node* val,
1600 const TypeOopPtr* val_type,
1601 BasicType bt,
1602 bool use_precise,
1603 MemNode::MemOrd mo,
1604 bool mismatched) {
1605 // Transformation of a value which could be NULL pointer (CastPP #NULL)
1606 // could be delayed during Parse (for example, in adjust_map_after_if()).
1607 // Execute transformation here to avoid barrier generation in such case.
1608 if (_gvn.type(val) == TypePtr::NULL_PTR)
1609 val = _gvn.makecon(TypePtr::NULL_PTR);
1610
1611 set_control(ctl);
1612 if (stopped()) return top(); // Dead path ?
1613
1614 assert(bt == T_OBJECT, "sanity");
1615 assert(val != NULL, "not dead path");
1616 uint adr_idx = C->get_alias_index(adr_type);
1617 assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" );
1618
1619 pre_barrier(true /* do_load */,
1620 control(), obj, adr, adr_idx, val, val_type,
1621 NULL /* pre_val */,
1622 bt);
1623
1624 Node* store = store_to_memory(control(), adr, val, bt, adr_idx, mo, mismatched);
1625 post_barrier(control(), store, obj, adr, adr_idx, val, bt, use_precise);
1626 return store;
1627 }
1628
1629 // Could be an array or object we don't know at compile time (unsafe ref.)
1630 Node* GraphKit::store_oop_to_unknown(Node* ctl,
1631 Node* obj, // containing obj
1632 Node* adr, // actual adress to store val at
1633 const TypePtr* adr_type,
1634 Node* val,
1635 BasicType bt,
1636 MemNode::MemOrd mo,
1637 bool mismatched) {
1638 Compile::AliasType* at = C->alias_type(adr_type);
1639 const TypeOopPtr* val_type = NULL;
1640 if (adr_type->isa_instptr()) {
1641 if (at->field() != NULL) {
1642 // known field. This code is a copy of the do_put_xxx logic.
1643 ciField* field = at->field();
1644 if (!field->type()->is_loaded()) {
1645 val_type = TypeInstPtr::BOTTOM;
1646 } else {
1647 val_type = TypeOopPtr::make_from_klass(field->type()->as_klass());
1648 }
1649 }
1650 } else if (adr_type->isa_aryptr()) {
1651 val_type = adr_type->is_aryptr()->elem()->make_oopptr();
1652 }
1653 if (val_type == NULL) {
1654 val_type = TypeInstPtr::BOTTOM;
1655 }
1656 return store_oop(ctl, obj, adr, adr_type, val, val_type, bt, true, mo, mismatched);
1657 }
1658
1659
1660 //-------------------------array_element_address-------------------------
1661 Node* GraphKit::array_element_address(Node* ary, Node* idx, BasicType elembt,
1662 const TypeInt* sizetype, Node* ctrl) {
1663 uint shift = exact_log2(type2aelembytes(elembt));
1664 uint header = arrayOopDesc::base_offset_in_bytes(elembt);
1665
1666 // short-circuit a common case (saves lots of confusing waste motion)
1667 jint idx_con = find_int_con(idx, -1);
1668 if (idx_con >= 0) {
1669 intptr_t offset = header + ((intptr_t)idx_con << shift);
1670 return basic_plus_adr(ary, offset);
1671 }
1672
1673 // must be correct type for alignment purposes
1674 Node* base = basic_plus_adr(ary, header);
1675 idx = Compile::conv_I2X_index(&_gvn, idx, sizetype, ctrl);
1676 Node* scale = _gvn.transform( new LShiftXNode(idx, intcon(shift)) );
1677 return basic_plus_adr(ary, base, scale);
1678 }
3741 C->add_predicate_opaq(opq);
3742 {
3743 PreserveJVMState pjvms(this);
3744 set_control(iffalse);
3745 inc_sp(nargs);
3746 uncommon_trap(reason, Deoptimization::Action_maybe_recompile);
3747 }
3748 Node* iftrue = _gvn.transform(new IfTrueNode(iff));
3749 set_control(iftrue);
3750 }
3751
3752 //------------------------------add_predicate---------------------------------
3753 void GraphKit::add_predicate(int nargs) {
3754 if (UseLoopPredicate) {
3755 add_predicate_impl(Deoptimization::Reason_predicate, nargs);
3756 }
3757 // loop's limit check predicate should be near the loop.
3758 add_predicate_impl(Deoptimization::Reason_loop_limit_check, nargs);
3759 }
3760
3761 //----------------------------- store barriers ----------------------------
3762 #define __ ideal.
3763
3764 void GraphKit::sync_kit(IdealKit& ideal) {
3765 set_all_memory(__ merged_memory());
3766 set_i_o(__ i_o());
3767 set_control(__ ctrl());
3768 }
3769
3770 void GraphKit::final_sync(IdealKit& ideal) {
3771 // Final sync IdealKit and graphKit.
3772 sync_kit(ideal);
3773 }
3774
3775 Node* GraphKit::byte_map_base_node() {
3776 // Get base of card map
3777 CardTableModRefBS* ct =
3778 barrier_set_cast<CardTableModRefBS>(Universe::heap()->barrier_set());
3779 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust users of this code");
3780 if (ct->byte_map_base != NULL) {
3781 return makecon(TypeRawPtr::make((address)ct->byte_map_base));
3782 } else {
3783 return null();
3784 }
3785 }
3786
3787 // vanilla/CMS post barrier
3788 // Insert a write-barrier store. This is to let generational GC work; we have
3789 // to flag all oop-stores before the next GC point.
3790 void GraphKit::write_barrier_post(Node* oop_store,
3791 Node* obj,
3792 Node* adr,
3793 uint adr_idx,
3794 Node* val,
3795 bool use_precise) {
3796 // No store check needed if we're storing a NULL or an old object
3797 // (latter case is probably a string constant). The concurrent
3798 // mark sweep garbage collector, however, needs to have all nonNull
3799 // oop updates flagged via card-marks.
3800 if (val != NULL && val->is_Con()) {
3801 // must be either an oop or NULL
3802 const Type* t = val->bottom_type();
3803 if (t == TypePtr::NULL_PTR || t == Type::TOP)
3804 // stores of null never (?) need barriers
3805 return;
3806 }
3807
3808 if (use_ReduceInitialCardMarks()
3809 && obj == just_allocated_object(control())) {
3810 // We can skip marks on a freshly-allocated object in Eden.
3811 // Keep this code in sync with new_store_pre_barrier() in runtime.cpp.
3812 // That routine informs GC to take appropriate compensating steps,
3813 // upon a slow-path allocation, so as to make this card-mark
3814 // elision safe.
3815 return;
3816 }
3817
3818 if (!use_precise) {
3819 // All card marks for a (non-array) instance are in one place:
3820 adr = obj;
3821 }
3822 // (Else it's an array (or unknown), and we want more precise card marks.)
3823 assert(adr != NULL, "");
3824
3825 IdealKit ideal(this, true);
3826
3827 // Convert the pointer to an int prior to doing math on it
3828 Node* cast = __ CastPX(__ ctrl(), adr);
3829
3830 // Divide by card size
3831 assert(Universe::heap()->barrier_set()->is_a(BarrierSet::CardTableModRef),
3832 "Only one we handle so far.");
3833 Node* card_offset = __ URShiftX( cast, __ ConI(CardTableModRefBS::card_shift) );
3834
3835 // Combine card table base and card offset
3836 Node* card_adr = __ AddP(__ top(), byte_map_base_node(), card_offset );
3837
3838 // Get the alias_index for raw card-mark memory
3839 int adr_type = Compile::AliasIdxRaw;
3840 Node* zero = __ ConI(0); // Dirty card value
3841 BasicType bt = T_BYTE;
3842
3843 if (UseConcMarkSweepGC && UseCondCardMark) {
3844 insert_mem_bar(Op_MemBarVolatile); // StoreLoad barrier
3845 __ sync_kit(this);
3846 }
3847
3848 if (UseCondCardMark) {
3849 // The classic GC reference write barrier is typically implemented
3850 // as a store into the global card mark table. Unfortunately
3851 // unconditional stores can result in false sharing and excessive
3852 // coherence traffic as well as false transactional aborts.
3853 // UseCondCardMark enables MP "polite" conditional card mark
3854 // stores. In theory we could relax the load from ctrl() to
3855 // no_ctrl, but that doesn't buy much latitude.
3856 Node* card_val = __ load( __ ctrl(), card_adr, TypeInt::BYTE, bt, adr_type);
3857 __ if_then(card_val, BoolTest::ne, zero);
3858 }
3859
3860 // Smash zero into card
3861 if( !UseConcMarkSweepGC ) {
3862 __ store(__ ctrl(), card_adr, zero, bt, adr_type, MemNode::unordered);
3863 } else {
3864 // Specialized path for CM store barrier
3865 __ storeCM(__ ctrl(), card_adr, zero, oop_store, adr_idx, bt, adr_type);
3866 }
3867
3868 if (UseCondCardMark) {
3869 __ end_if();
3870 }
3871
3872 // Final sync IdealKit and GraphKit.
3873 final_sync(ideal);
3874 }
3875 /*
3876 * Determine if the G1 pre-barrier can be removed. The pre-barrier is
3877 * required by SATB to make sure all objects live at the start of the
3878 * marking are kept alive, all reference updates need to any previous
3879 * reference stored before writing.
3880 *
3881 * If the previous value is NULL there is no need to save the old value.
3882 * References that are NULL are filtered during runtime by the barrier
3883 * code to avoid unnecessary queuing.
3884 *
3885 * However in the case of newly allocated objects it might be possible to
3886 * prove that the reference about to be overwritten is NULL during compile
3887 * time and avoid adding the barrier code completely.
3888 *
3889 * The compiler needs to determine that the object in which a field is about
3890 * to be written is newly allocated, and that no prior store to the same field
3891 * has happened since the allocation.
3892 *
3893 * Returns true if the pre-barrier can be removed
3894 */
3895 bool GraphKit::g1_can_remove_pre_barrier(PhaseTransform* phase, Node* adr,
3896 BasicType bt, uint adr_idx) {
3897 intptr_t offset = 0;
3898 Node* base = AddPNode::Ideal_base_and_offset(adr, phase, offset);
3899 AllocateNode* alloc = AllocateNode::Ideal_allocation(base, phase);
3900
3901 if (offset == Type::OffsetBot) {
3902 return false; // cannot unalias unless there are precise offsets
3903 }
3904
3905 if (alloc == NULL) {
3906 return false; // No allocation found
3907 }
3908
3909 intptr_t size_in_bytes = type2aelembytes(bt);
3910
3911 Node* mem = memory(adr_idx); // start searching here...
3912
3913 for (int cnt = 0; cnt < 50; cnt++) {
3914
3915 if (mem->is_Store()) {
3916
3917 Node* st_adr = mem->in(MemNode::Address);
3918 intptr_t st_offset = 0;
3919 Node* st_base = AddPNode::Ideal_base_and_offset(st_adr, phase, st_offset);
3920
3921 if (st_base == NULL) {
3922 break; // inscrutable pointer
3923 }
3924
3925 // Break we have found a store with same base and offset as ours so break
3926 if (st_base == base && st_offset == offset) {
3927 break;
3928 }
3929
3930 if (st_offset != offset && st_offset != Type::OffsetBot) {
3931 const int MAX_STORE = BytesPerLong;
3932 if (st_offset >= offset + size_in_bytes ||
3933 st_offset <= offset - MAX_STORE ||
3934 st_offset <= offset - mem->as_Store()->memory_size()) {
3935 // Success: The offsets are provably independent.
3936 // (You may ask, why not just test st_offset != offset and be done?
3937 // The answer is that stores of different sizes can co-exist
3938 // in the same sequence of RawMem effects. We sometimes initialize
3939 // a whole 'tile' of array elements with a single jint or jlong.)
3940 mem = mem->in(MemNode::Memory);
3941 continue; // advance through independent store memory
3942 }
3943 }
3944
3945 if (st_base != base
3946 && MemNode::detect_ptr_independence(base, alloc, st_base,
3947 AllocateNode::Ideal_allocation(st_base, phase),
3948 phase)) {
3949 // Success: The bases are provably independent.
3950 mem = mem->in(MemNode::Memory);
3951 continue; // advance through independent store memory
3952 }
3953 } else if (mem->is_Proj() && mem->in(0)->is_Initialize()) {
3954
3955 InitializeNode* st_init = mem->in(0)->as_Initialize();
3956 AllocateNode* st_alloc = st_init->allocation();
3957
3958 // Make sure that we are looking at the same allocation site.
3959 // The alloc variable is guaranteed to not be null here from earlier check.
3960 if (alloc == st_alloc) {
3961 // Check that the initialization is storing NULL so that no previous store
3962 // has been moved up and directly write a reference
3963 Node* captured_store = st_init->find_captured_store(offset,
3964 type2aelembytes(T_OBJECT),
3965 phase);
3966 if (captured_store == NULL || captured_store == st_init->zero_memory()) {
3967 return true;
3968 }
3969 }
3970 }
3971
3972 // Unless there is an explicit 'continue', we must bail out here,
3973 // because 'mem' is an inscrutable memory state (e.g., a call).
3974 break;
3975 }
3976
3977 return false;
3978 }
3979
3980 // G1 pre/post barriers
3981 void GraphKit::g1_write_barrier_pre(bool do_load,
3982 Node* obj,
3983 Node* adr,
3984 uint alias_idx,
3985 Node* val,
3986 const TypeOopPtr* val_type,
3987 Node* pre_val,
3988 BasicType bt) {
3989
3990 // Some sanity checks
3991 // Note: val is unused in this routine.
3992
3993 if (do_load) {
3994 // We need to generate the load of the previous value
3995 assert(obj != NULL, "must have a base");
3996 assert(adr != NULL, "where are loading from?");
3997 assert(pre_val == NULL, "loaded already?");
3998 assert(val_type != NULL, "need a type");
3999
4000 if (use_ReduceInitialCardMarks()
4001 && g1_can_remove_pre_barrier(&_gvn, adr, bt, alias_idx)) {
4002 return;
4003 }
4004
4005 } else {
4006 // In this case both val_type and alias_idx are unused.
4007 assert(pre_val != NULL, "must be loaded already");
4008 // Nothing to be done if pre_val is null.
4009 if (pre_val->bottom_type() == TypePtr::NULL_PTR) return;
4010 assert(pre_val->bottom_type()->basic_type() == T_OBJECT, "or we shouldn't be here");
4011 }
4012 assert(bt == T_OBJECT, "or we shouldn't be here");
4013
4014 IdealKit ideal(this, true);
4015
4016 Node* tls = __ thread(); // ThreadLocalStorage
4017
4018 Node* no_ctrl = NULL;
4019 Node* no_base = __ top();
4020 Node* zero = __ ConI(0);
4021 Node* zeroX = __ ConX(0);
4022
4023 float likely = PROB_LIKELY(0.999);
4024 float unlikely = PROB_UNLIKELY(0.999);
4025
4026 BasicType active_type = in_bytes(SATBMarkQueue::byte_width_of_active()) == 4 ? T_INT : T_BYTE;
4027 assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 4 || in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "flag width");
4028
4029 // Offsets into the thread
4030 const int marking_offset = in_bytes(JavaThread::satb_mark_queue_offset() + // 648
4031 SATBMarkQueue::byte_offset_of_active());
4032 const int index_offset = in_bytes(JavaThread::satb_mark_queue_offset() + // 656
4033 SATBMarkQueue::byte_offset_of_index());
4034 const int buffer_offset = in_bytes(JavaThread::satb_mark_queue_offset() + // 652
4035 SATBMarkQueue::byte_offset_of_buf());
4036
4037 // Now the actual pointers into the thread
4038 Node* marking_adr = __ AddP(no_base, tls, __ ConX(marking_offset));
4039 Node* buffer_adr = __ AddP(no_base, tls, __ ConX(buffer_offset));
4040 Node* index_adr = __ AddP(no_base, tls, __ ConX(index_offset));
4041
4042 // Now some of the values
4043 Node* marking = __ load(__ ctrl(), marking_adr, TypeInt::INT, active_type, Compile::AliasIdxRaw);
4044
4045 // if (!marking)
4046 __ if_then(marking, BoolTest::ne, zero, unlikely); {
4047 BasicType index_bt = TypeX_X->basic_type();
4048 assert(sizeof(size_t) == type2aelembytes(index_bt), "Loading G1 SATBMarkQueue::_index with wrong size.");
4049 Node* index = __ load(__ ctrl(), index_adr, TypeX_X, index_bt, Compile::AliasIdxRaw);
4050
4051 if (do_load) {
4052 // load original value
4053 // alias_idx correct??
4054 pre_val = __ load(__ ctrl(), adr, val_type, bt, alias_idx);
4055 }
4056
4057 // if (pre_val != NULL)
4058 __ if_then(pre_val, BoolTest::ne, null()); {
4059 Node* buffer = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw);
4060
4061 // is the queue for this thread full?
4062 __ if_then(index, BoolTest::ne, zeroX, likely); {
4063
4064 // decrement the index
4065 Node* next_index = _gvn.transform(new SubXNode(index, __ ConX(sizeof(intptr_t))));
4066
4067 // Now get the buffer location we will log the previous value into and store it
4068 Node *log_addr = __ AddP(no_base, buffer, next_index);
4069 __ store(__ ctrl(), log_addr, pre_val, T_OBJECT, Compile::AliasIdxRaw, MemNode::unordered);
4070 // update the index
4071 __ store(__ ctrl(), index_adr, next_index, index_bt, Compile::AliasIdxRaw, MemNode::unordered);
4072
4073 } __ else_(); {
4074
4075 // logging buffer is full, call the runtime
4076 const TypeFunc *tf = OptoRuntime::g1_wb_pre_Type();
4077 __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), "g1_wb_pre", pre_val, tls);
4078 } __ end_if(); // (!index)
4079 } __ end_if(); // (pre_val != NULL)
4080 } __ end_if(); // (!marking)
4081
4082 // Final sync IdealKit and GraphKit.
4083 final_sync(ideal);
4084 }
4085
4086 /*
4087 * G1 similar to any GC with a Young Generation requires a way to keep track of
4088 * references from Old Generation to Young Generation to make sure all live
4089 * objects are found. G1 also requires to keep track of object references
4090 * between different regions to enable evacuation of old regions, which is done
4091 * as part of mixed collections. References are tracked in remembered sets and
4092 * is continuously updated as reference are written to with the help of the
4093 * post-barrier.
4094 *
4095 * To reduce the number of updates to the remembered set the post-barrier
4096 * filters updates to fields in objects located in the Young Generation,
4097 * the same region as the reference, when the NULL is being written or
4098 * if the card is already marked as dirty by an earlier write.
4099 *
4100 * Under certain circumstances it is possible to avoid generating the
4101 * post-barrier completely if it is possible during compile time to prove
4102 * the object is newly allocated and that no safepoint exists between the
4103 * allocation and the store.
4104 *
4105 * In the case of slow allocation the allocation code must handle the barrier
4106 * as part of the allocation in the case the allocated object is not located
4107 * in the nursery, this would happen for humongous objects. This is similar to
4108 * how CMS is required to handle this case, see the comments for the method
4109 * CollectedHeap::new_store_pre_barrier and OptoRuntime::new_store_pre_barrier.
4110 * A deferred card mark is required for these objects and handled in the above
4111 * mentioned methods.
4112 *
4113 * Returns true if the post barrier can be removed
4114 */
4115 bool GraphKit::g1_can_remove_post_barrier(PhaseTransform* phase, Node* store,
4116 Node* adr) {
4117 intptr_t offset = 0;
4118 Node* base = AddPNode::Ideal_base_and_offset(adr, phase, offset);
4119 AllocateNode* alloc = AllocateNode::Ideal_allocation(base, phase);
4120
4121 if (offset == Type::OffsetBot) {
4122 return false; // cannot unalias unless there are precise offsets
4123 }
4124
4125 if (alloc == NULL) {
4126 return false; // No allocation found
4127 }
4128
4129 // Start search from Store node
4130 Node* mem = store->in(MemNode::Control);
4131 if (mem->is_Proj() && mem->in(0)->is_Initialize()) {
4132
4133 InitializeNode* st_init = mem->in(0)->as_Initialize();
4134 AllocateNode* st_alloc = st_init->allocation();
4135
4136 // Make sure we are looking at the same allocation
4137 if (alloc == st_alloc) {
4138 return true;
4139 }
4140 }
4141
4142 return false;
4143 }
4144
4145 //
4146 // Update the card table and add card address to the queue
4147 //
4148 void GraphKit::g1_mark_card(IdealKit& ideal,
4149 Node* card_adr,
4150 Node* oop_store,
4151 uint oop_alias_idx,
4152 Node* index,
4153 Node* index_adr,
4154 Node* buffer,
4155 const TypeFunc* tf) {
4156
4157 Node* zero = __ ConI(0);
4158 Node* zeroX = __ ConX(0);
4159 Node* no_base = __ top();
4160 BasicType card_bt = T_BYTE;
4161 // Smash zero into card. MUST BE ORDERED WRT TO STORE
4162 __ storeCM(__ ctrl(), card_adr, zero, oop_store, oop_alias_idx, card_bt, Compile::AliasIdxRaw);
4163
4164 // Now do the queue work
4165 __ if_then(index, BoolTest::ne, zeroX); {
4166
4167 Node* next_index = _gvn.transform(new SubXNode(index, __ ConX(sizeof(intptr_t))));
4168 Node* log_addr = __ AddP(no_base, buffer, next_index);
4169
4170 // Order, see storeCM.
4171 __ store(__ ctrl(), log_addr, card_adr, T_ADDRESS, Compile::AliasIdxRaw, MemNode::unordered);
4172 __ store(__ ctrl(), index_adr, next_index, TypeX_X->basic_type(), Compile::AliasIdxRaw, MemNode::unordered);
4173
4174 } __ else_(); {
4175 __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), "g1_wb_post", card_adr, __ thread());
4176 } __ end_if();
4177
4178 }
4179
4180 void GraphKit::g1_write_barrier_post(Node* oop_store,
4181 Node* obj,
4182 Node* adr,
4183 uint alias_idx,
4184 Node* val,
4185 BasicType bt,
4186 bool use_precise) {
4187 // If we are writing a NULL then we need no post barrier
4188
4189 if (val != NULL && val->is_Con() && val->bottom_type() == TypePtr::NULL_PTR) {
4190 // Must be NULL
4191 const Type* t = val->bottom_type();
4192 assert(t == Type::TOP || t == TypePtr::NULL_PTR, "must be NULL");
4193 // No post barrier if writing NULLx
4194 return;
4195 }
4196
4197 if (use_ReduceInitialCardMarks() && obj == just_allocated_object(control())) {
4198 // We can skip marks on a freshly-allocated object in Eden.
4199 // Keep this code in sync with new_store_pre_barrier() in runtime.cpp.
4200 // That routine informs GC to take appropriate compensating steps,
4201 // upon a slow-path allocation, so as to make this card-mark
4202 // elision safe.
4203 return;
4204 }
4205
4206 if (use_ReduceInitialCardMarks()
4207 && g1_can_remove_post_barrier(&_gvn, oop_store, adr)) {
4208 return;
4209 }
4210
4211 if (!use_precise) {
4212 // All card marks for a (non-array) instance are in one place:
4213 adr = obj;
4214 }
4215 // (Else it's an array (or unknown), and we want more precise card marks.)
4216 assert(adr != NULL, "");
4217
4218 IdealKit ideal(this, true);
4219
4220 Node* tls = __ thread(); // ThreadLocalStorage
4221
4222 Node* no_base = __ top();
4223 float likely = PROB_LIKELY(0.999);
4224 float unlikely = PROB_UNLIKELY(0.999);
4225 Node* young_card = __ ConI((jint)G1SATBCardTableModRefBS::g1_young_card_val());
4226 Node* dirty_card = __ ConI((jint)CardTableModRefBS::dirty_card_val());
4227 Node* zeroX = __ ConX(0);
4228
4229 // Get the alias_index for raw card-mark memory
4230 const TypePtr* card_type = TypeRawPtr::BOTTOM;
4231
4232 const TypeFunc *tf = OptoRuntime::g1_wb_post_Type();
4233
4234 // Offsets into the thread
4235 const int index_offset = in_bytes(JavaThread::dirty_card_queue_offset() +
4236 DirtyCardQueue::byte_offset_of_index());
4237 const int buffer_offset = in_bytes(JavaThread::dirty_card_queue_offset() +
4238 DirtyCardQueue::byte_offset_of_buf());
4239
4240 // Pointers into the thread
4241
4242 Node* buffer_adr = __ AddP(no_base, tls, __ ConX(buffer_offset));
4243 Node* index_adr = __ AddP(no_base, tls, __ ConX(index_offset));
4244
4245 // Now some values
4246 // Use ctrl to avoid hoisting these values past a safepoint, which could
4247 // potentially reset these fields in the JavaThread.
4248 Node* index = __ load(__ ctrl(), index_adr, TypeX_X, TypeX_X->basic_type(), Compile::AliasIdxRaw);
4249 Node* buffer = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw);
4250
4251 // Convert the store obj pointer to an int prior to doing math on it
4252 // Must use ctrl to prevent "integerized oop" existing across safepoint
4253 Node* cast = __ CastPX(__ ctrl(), adr);
4254
4255 // Divide pointer by card size
4256 Node* card_offset = __ URShiftX( cast, __ ConI(CardTableModRefBS::card_shift) );
4257
4258 // Combine card table base and card offset
4259 Node* card_adr = __ AddP(no_base, byte_map_base_node(), card_offset );
4260
4261 // If we know the value being stored does it cross regions?
4262
4263 if (val != NULL) {
4264 // Does the store cause us to cross regions?
4265
4266 // Should be able to do an unsigned compare of region_size instead of
4267 // and extra shift. Do we have an unsigned compare??
4268 // Node* region_size = __ ConI(1 << HeapRegion::LogOfHRGrainBytes);
4269 Node* xor_res = __ URShiftX ( __ XorX( cast, __ CastPX(__ ctrl(), val)), __ ConI(HeapRegion::LogOfHRGrainBytes));
4270
4271 // if (xor_res == 0) same region so skip
4272 __ if_then(xor_res, BoolTest::ne, zeroX); {
4273
4274 // No barrier if we are storing a NULL
4275 __ if_then(val, BoolTest::ne, null(), unlikely); {
4276
4277 // Ok must mark the card if not already dirty
4278
4279 // load the original value of the card
4280 Node* card_val = __ load(__ ctrl(), card_adr, TypeInt::INT, T_BYTE, Compile::AliasIdxRaw);
4281
4282 __ if_then(card_val, BoolTest::ne, young_card); {
4283 sync_kit(ideal);
4284 // Use Op_MemBarVolatile to achieve the effect of a StoreLoad barrier.
4285 insert_mem_bar(Op_MemBarVolatile, oop_store);
4286 __ sync_kit(this);
4287
4288 Node* card_val_reload = __ load(__ ctrl(), card_adr, TypeInt::INT, T_BYTE, Compile::AliasIdxRaw);
4289 __ if_then(card_val_reload, BoolTest::ne, dirty_card); {
4290 g1_mark_card(ideal, card_adr, oop_store, alias_idx, index, index_adr, buffer, tf);
4291 } __ end_if();
4292 } __ end_if();
4293 } __ end_if();
4294 } __ end_if();
4295 } else {
4296 // The Object.clone() intrinsic uses this path if !ReduceInitialCardMarks.
4297 // We don't need a barrier here if the destination is a newly allocated object
4298 // in Eden. Otherwise, GC verification breaks because we assume that cards in Eden
4299 // are set to 'g1_young_gen' (see G1SATBCardTableModRefBS::verify_g1_young_region()).
4300 assert(!use_ReduceInitialCardMarks(), "can only happen with card marking");
4301 Node* card_val = __ load(__ ctrl(), card_adr, TypeInt::INT, T_BYTE, Compile::AliasIdxRaw);
4302 __ if_then(card_val, BoolTest::ne, young_card); {
4303 g1_mark_card(ideal, card_adr, oop_store, alias_idx, index, index_adr, buffer, tf);
4304 } __ end_if();
4305 }
4306
4307 // Final sync IdealKit and GraphKit.
4308 final_sync(ideal);
4309 }
4310 #undef __
4311
4312
4313 Node* GraphKit::load_String_length(Node* ctrl, Node* str) {
4314 Node* len = load_array_length(load_String_value(ctrl, str));
4315 Node* coder = load_String_coder(ctrl, str);
4316 // Divide length by 2 if coder is UTF16
4317 return _gvn.transform(new RShiftINode(len, coder));
4318 }
4319
4320 Node* GraphKit::load_String_value(Node* ctrl, Node* str) {
4321 int value_offset = java_lang_String::value_offset_in_bytes();
4322 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4323 false, NULL, 0);
4324 const TypePtr* value_field_type = string_type->add_offset(value_offset);
4325 const TypeAryPtr* value_type = TypeAryPtr::make(TypePtr::NotNull,
4326 TypeAry::make(TypeInt::BYTE, TypeInt::POS),
4327 ciTypeArrayKlass::make(T_BYTE), true, 0);
4328 int value_field_idx = C->get_alias_index(value_field_type);
4329 Node* load = make_load(ctrl, basic_plus_adr(str, str, value_offset),
4330 value_type, T_OBJECT, value_field_idx, MemNode::unordered);
4331 // String.value field is known to be @Stable.
4332 if (UseImplicitStableValues) {
4333 load = cast_array_to_stable(load, value_type);
4334 }
4335 return load;
4336 }
4337
4338 Node* GraphKit::load_String_coder(Node* ctrl, Node* str) {
4339 if (!CompactStrings) {
4340 return intcon(java_lang_String::CODER_UTF16);
4341 }
4342 int coder_offset = java_lang_String::coder_offset_in_bytes();
4343 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4344 false, NULL, 0);
4345 const TypePtr* coder_field_type = string_type->add_offset(coder_offset);
4346 int coder_field_idx = C->get_alias_index(coder_field_type);
4347 return make_load(ctrl, basic_plus_adr(str, str, coder_offset),
4348 TypeInt::BYTE, T_BYTE, coder_field_idx, MemNode::unordered);
4349 }
4350
4351 void GraphKit::store_String_value(Node* ctrl, Node* str, Node* value) {
4352 int value_offset = java_lang_String::value_offset_in_bytes();
4353 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4354 false, NULL, 0);
4355 const TypePtr* value_field_type = string_type->add_offset(value_offset);
4356 store_oop_to_object(ctrl, str, basic_plus_adr(str, value_offset), value_field_type,
4357 value, TypeAryPtr::BYTES, T_OBJECT, MemNode::unordered);
4358 }
4359
4360 void GraphKit::store_String_coder(Node* ctrl, Node* str, Node* value) {
4361 int coder_offset = java_lang_String::coder_offset_in_bytes();
4362 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4363 false, NULL, 0);
4364 const TypePtr* coder_field_type = string_type->add_offset(coder_offset);
4365 int coder_field_idx = C->get_alias_index(coder_field_type);
4366 store_to_memory(ctrl, basic_plus_adr(str, coder_offset),
4367 value, T_BYTE, coder_field_idx, MemNode::unordered);
4368 }
4369
4370 // Capture src and dst memory state with a MergeMemNode
4371 Node* GraphKit::capture_memory(const TypePtr* src_type, const TypePtr* dst_type) {
4372 if (src_type == dst_type) {
4373 // Types are equal, we don't need a MergeMemNode
4374 return memory(src_type);
4375 }
4376 MergeMemNode* merge = MergeMemNode::make(map()->memory());
4377 record_for_igvn(merge); // fold it up later, if possible
|
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "compiler/compileLog.hpp"
27 #include "gc/g1/heapRegion.hpp"
28 #include "gc/shared/barrierSet.hpp"
29 #include "gc/shared/cardTableModRefBS.hpp"
30 #include "gc/shared/collectedHeap.hpp"
31 #include "memory/resourceArea.hpp"
32 #include "opto/addnode.hpp"
33 #include "opto/castnode.hpp"
34 #include "opto/convertnode.hpp"
35 #include "opto/graphKit.hpp"
36 #include "opto/idealKit.hpp"
37 #include "opto/intrinsicnode.hpp"
38 #include "opto/locknode.hpp"
39 #include "opto/machnode.hpp"
40 #include "opto/opaquenode.hpp"
41 #include "opto/parse.hpp"
42 #include "opto/rootnode.hpp"
43 #include "opto/runtime.hpp"
44 #include "runtime/deoptimization.hpp"
45 #include "runtime/sharedRuntime.hpp"
46
584 }
585 break;
586 }
587 if (failing()) { stop(); return; } // exception allocation might fail
588 if (ex_obj != NULL) {
589 // Cheat with a preallocated exception object.
590 if (C->log() != NULL)
591 C->log()->elem("hot_throw preallocated='1' reason='%s'",
592 Deoptimization::trap_reason_name(reason));
593 const TypeInstPtr* ex_con = TypeInstPtr::make(ex_obj);
594 Node* ex_node = _gvn.transform(ConNode::make(ex_con));
595
596 // Clear the detail message of the preallocated exception object.
597 // Weblogic sometimes mutates the detail message of exceptions
598 // using reflection.
599 int offset = java_lang_Throwable::get_detailMessage_offset();
600 const TypePtr* adr_typ = ex_con->add_offset(offset);
601
602 Node *adr = basic_plus_adr(ex_node, ex_node, offset);
603 const TypeOopPtr* val_type = TypeOopPtr::make_from_klass(env()->String_klass());
604 Node *store = access_store_at(control(), ex_node, adr, adr_typ, null(), val_type, T_OBJECT, C2_ACCESS_ON_HEAP);
605
606 add_exception_state(make_exception_state(ex_node));
607 return;
608 }
609 }
610
611 // %%% Maybe add entry to OptoRuntime which directly throws the exc.?
612 // It won't be much cheaper than bailing to the interp., since we'll
613 // have to pass up all the debug-info, and the runtime will have to
614 // create the stack trace.
615
616 // Usual case: Bail to interpreter.
617 // Reserve the right to recompile if we haven't seen anything yet.
618
619 ciMethod* m = Deoptimization::reason_is_speculate(reason) ? C->method() : NULL;
620 Deoptimization::DeoptAction action = Deoptimization::Action_maybe_recompile;
621 if (treat_throw_as_hot
622 && (method()->method_data()->trap_recompiled_at(bci(), m)
623 || C->too_many_traps(reason))) {
624 // We cannot afford to take more traps here. Suffer in the interpreter.
1498 st = StoreDNode::make_atomic(ctl, mem, adr, adr_type, val, mo);
1499 } else {
1500 st = StoreNode::make(_gvn, ctl, mem, adr, adr_type, val, bt, mo);
1501 }
1502 if (unaligned) {
1503 st->as_Store()->set_unaligned_access();
1504 }
1505 if (mismatched) {
1506 st->as_Store()->set_mismatched_access();
1507 }
1508 st = _gvn.transform(st);
1509 set_memory(st, adr_idx);
1510 // Back-to-back stores can only remove intermediate store with DU info
1511 // so push on worklist for optimizer.
1512 if (mem->req() > MemNode::Address && adr == mem->in(MemNode::Address))
1513 record_for_igvn(st);
1514
1515 return st;
1516 }
1517
1518 Node* GraphKit::access_store_at(Node* ctl,
1519 Node* obj,
1520 Node* adr,
1521 const TypePtr* adr_type,
1522 Node* val,
1523 const Type* val_type,
1524 BasicType bt,
1525 C2DecoratorSet decorators) {
1526 // Transformation of a value which could be NULL pointer (CastPP #NULL)
1527 // could be delayed during Parse (for example, in adjust_map_after_if()).
1528 // Execute transformation here to avoid barrier generation in such case.
1529 if (_gvn.type(val) == TypePtr::NULL_PTR) {
1530 val = _gvn.makecon(TypePtr::NULL_PTR);
1531 }
1532
1533 set_control(ctl);
1534 if (stopped()) {
1535 return top(); // Dead path ?
1536 }
1537
1538 assert(val != NULL, "not dead path");
1539
1540 C2BarrierSetCodeGen *code_gen = Universe::heap()->barrier_set()->c2_code_gen();
1541 return code_gen->store_at(this, obj, adr, adr_type, val, val_type, bt, decorators);
1542 }
1543
1544 Node* GraphKit::access_load_at(Node* obj, // containing obj
1545 Node* adr, // actual adress to store val at
1546 const TypePtr* adr_type,
1547 const Type* val_type,
1548 BasicType bt,
1549 C2DecoratorSet decorators) {
1550 if (stopped()) {
1551 return top(); // Dead path ?
1552 }
1553
1554 C2BarrierSetCodeGen *code_gen = Universe::heap()->barrier_set()->c2_code_gen();
1555 return code_gen->load_at(this, obj, adr, adr_type, val_type, bt, decorators);
1556 }
1557
1558 Node* GraphKit::access_cas_val_at(Node* ctl,
1559 Node* obj,
1560 Node* adr,
1561 const TypePtr* adr_type,
1562 int alias_idx,
1563 Node* expected_val,
1564 Node* new_val,
1565 const Type* value_type,
1566 BasicType bt,
1567 C2DecoratorSet decorators) {
1568 set_control(ctl);
1569 C2BarrierSetCodeGen *code_gen = Universe::heap()->barrier_set()->c2_code_gen();
1570 return code_gen->cas_val_at(this, obj, adr, adr_type, alias_idx, expected_val, new_val, value_type, bt, decorators);
1571 }
1572
1573 Node* GraphKit::access_cas_bool_at(Node* ctl,
1574 Node* obj,
1575 Node* adr,
1576 const TypePtr* adr_type,
1577 int alias_idx,
1578 Node* expected_val,
1579 Node* new_val,
1580 const Type* value_type,
1581 BasicType bt,
1582 C2DecoratorSet decorators) {
1583 set_control(ctl);
1584 C2BarrierSetCodeGen *code_gen = Universe::heap()->barrier_set()->c2_code_gen();
1585 return code_gen->cas_bool_at(this, obj, adr, adr_type, alias_idx, expected_val, new_val, value_type, bt, decorators);
1586 }
1587
1588 Node* GraphKit::access_swap_at(Node* ctl,
1589 Node* obj,
1590 Node* adr,
1591 const TypePtr* adr_type,
1592 int alias_idx,
1593 Node* new_val,
1594 const Type* value_type,
1595 BasicType bt,
1596 C2DecoratorSet decorators) {
1597 set_control(ctl);
1598 C2BarrierSetCodeGen *code_gen = Universe::heap()->barrier_set()->c2_code_gen();
1599 return code_gen->swap_at(this, obj, adr, adr_type, alias_idx, new_val, value_type, bt, decorators);
1600 }
1601
1602 Node* GraphKit::access_fetch_and_add_at(Node* ctl,
1603 Node* obj,
1604 Node* adr,
1605 const TypePtr* adr_type,
1606 int alias_idx,
1607 Node* new_val,
1608 const Type* value_type,
1609 BasicType bt,
1610 C2DecoratorSet decorators) {
1611 set_control(ctl);
1612 C2BarrierSetCodeGen *code_gen = Universe::heap()->barrier_set()->c2_code_gen();
1613 return code_gen->fetch_and_add_at(this, obj, adr, adr_type, alias_idx, new_val, value_type, bt, decorators);
1614 }
1615
1616 void GraphKit::access_clone(Node* ctl, Node* src, Node* dst, Node* size, bool is_array) {
1617 set_control(ctl);
1618 C2BarrierSetCodeGen *code_gen = Universe::heap()->barrier_set()->c2_code_gen();
1619 return code_gen->clone(this, src, dst, size, is_array);
1620 }
1621
1622 //-------------------------array_element_address-------------------------
1623 Node* GraphKit::array_element_address(Node* ary, Node* idx, BasicType elembt,
1624 const TypeInt* sizetype, Node* ctrl) {
1625 uint shift = exact_log2(type2aelembytes(elembt));
1626 uint header = arrayOopDesc::base_offset_in_bytes(elembt);
1627
1628 // short-circuit a common case (saves lots of confusing waste motion)
1629 jint idx_con = find_int_con(idx, -1);
1630 if (idx_con >= 0) {
1631 intptr_t offset = header + ((intptr_t)idx_con << shift);
1632 return basic_plus_adr(ary, offset);
1633 }
1634
1635 // must be correct type for alignment purposes
1636 Node* base = basic_plus_adr(ary, header);
1637 idx = Compile::conv_I2X_index(&_gvn, idx, sizetype, ctrl);
1638 Node* scale = _gvn.transform( new LShiftXNode(idx, intcon(shift)) );
1639 return basic_plus_adr(ary, base, scale);
1640 }
3703 C->add_predicate_opaq(opq);
3704 {
3705 PreserveJVMState pjvms(this);
3706 set_control(iffalse);
3707 inc_sp(nargs);
3708 uncommon_trap(reason, Deoptimization::Action_maybe_recompile);
3709 }
3710 Node* iftrue = _gvn.transform(new IfTrueNode(iff));
3711 set_control(iftrue);
3712 }
3713
3714 //------------------------------add_predicate---------------------------------
3715 void GraphKit::add_predicate(int nargs) {
3716 if (UseLoopPredicate) {
3717 add_predicate_impl(Deoptimization::Reason_predicate, nargs);
3718 }
3719 // loop's limit check predicate should be near the loop.
3720 add_predicate_impl(Deoptimization::Reason_loop_limit_check, nargs);
3721 }
3722
3723 void GraphKit::sync_kit(IdealKit& ideal) {
3724 set_all_memory(ideal.merged_memory());
3725 set_i_o(ideal.i_o());
3726 set_control(ideal.ctrl());
3727 }
3728
3729 void GraphKit::final_sync(IdealKit& ideal) {
3730 // Final sync IdealKit and graphKit.
3731 sync_kit(ideal);
3732 }
3733
3734 Node* GraphKit::load_String_length(Node* ctrl, Node* str) {
3735 Node* len = load_array_length(load_String_value(ctrl, str));
3736 Node* coder = load_String_coder(ctrl, str);
3737 // Divide length by 2 if coder is UTF16
3738 return _gvn.transform(new RShiftINode(len, coder));
3739 }
3740
3741 Node* GraphKit::load_String_value(Node* ctrl, Node* str) {
3742 int value_offset = java_lang_String::value_offset_in_bytes();
3743 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
3744 false, NULL, 0);
3745 const TypePtr* value_field_type = string_type->add_offset(value_offset);
3746 const TypeAryPtr* value_type = TypeAryPtr::make(TypePtr::NotNull,
3747 TypeAry::make(TypeInt::BYTE, TypeInt::POS),
3748 ciTypeArrayKlass::make(T_BYTE), true, 0);
3749 Node* p = basic_plus_adr(str, str, value_offset);
3750 Node* load = access_load_at(str, p, value_field_type, value_type, T_OBJECT, C2_ACCESS_ON_HEAP);
3751 // String.value field is known to be @Stable.
3752 if (UseImplicitStableValues) {
3753 load = cast_array_to_stable(load, value_type);
3754 }
3755 return load;
3756 }
3757
3758 Node* GraphKit::load_String_coder(Node* ctrl, Node* str) {
3759 if (!CompactStrings) {
3760 return intcon(java_lang_String::CODER_UTF16);
3761 }
3762 int coder_offset = java_lang_String::coder_offset_in_bytes();
3763 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
3764 false, NULL, 0);
3765 const TypePtr* coder_field_type = string_type->add_offset(coder_offset);
3766 int coder_field_idx = C->get_alias_index(coder_field_type);
3767 return make_load(ctrl, basic_plus_adr(str, str, coder_offset),
3768 TypeInt::BYTE, T_BYTE, coder_field_idx, MemNode::unordered);
3769 }
3770
3771 void GraphKit::store_String_value(Node* ctrl, Node* str, Node* value) {
3772 int value_offset = java_lang_String::value_offset_in_bytes();
3773 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
3774 false, NULL, 0);
3775 const TypePtr* value_field_type = string_type->add_offset(value_offset);
3776 access_store_at(ctrl, str, basic_plus_adr(str, value_offset), value_field_type,
3777 value, TypeAryPtr::BYTES, T_OBJECT, C2_ACCESS_ON_HEAP);
3778 }
3779
3780 void GraphKit::store_String_coder(Node* ctrl, Node* str, Node* value) {
3781 int coder_offset = java_lang_String::coder_offset_in_bytes();
3782 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
3783 false, NULL, 0);
3784 const TypePtr* coder_field_type = string_type->add_offset(coder_offset);
3785 int coder_field_idx = C->get_alias_index(coder_field_type);
3786 store_to_memory(ctrl, basic_plus_adr(str, coder_offset),
3787 value, T_BYTE, coder_field_idx, MemNode::unordered);
3788 }
3789
3790 // Capture src and dst memory state with a MergeMemNode
3791 Node* GraphKit::capture_memory(const TypePtr* src_type, const TypePtr* dst_type) {
3792 if (src_type == dst_type) {
3793 // Types are equal, we don't need a MergeMemNode
3794 return memory(src_type);
3795 }
3796 MergeMemNode* merge = MergeMemNode::make(map()->memory());
3797 record_for_igvn(merge); // fold it up later, if possible
|