477 int bci = this->bci();
478 if (method != NULL && bci != InvocationEntryBci)
479 return method->java_code_at_bci(bci);
480 else
481 return Bytecodes::_illegal;
482 }
483
484 void GraphKit::uncommon_trap_if_should_post_on_exceptions(Deoptimization::DeoptReason reason,
485 bool must_throw) {
486 // if the exception capability is set, then we will generate code
487 // to check the JavaThread.should_post_on_exceptions flag to see
488 // if we actually need to report exception events (for this
489 // thread). If we don't need to report exception events, we will
490 // take the normal fast path provided by add_exception_events. If
491 // exception event reporting is enabled for this thread, we will
492 // take the uncommon_trap in the BuildCutout below.
493
494 // first must access the should_post_on_exceptions_flag in this thread's JavaThread
495 Node* jthread = _gvn.transform(new (C) ThreadLocalNode());
496 Node* adr = basic_plus_adr(top(), jthread, in_bytes(JavaThread::should_post_on_exceptions_flag_offset()));
497 Node* should_post_flag = make_load(control(), adr, TypeInt::INT, T_INT, Compile::AliasIdxRaw, false);
498
499 // Test the should_post_on_exceptions_flag vs. 0
500 Node* chk = _gvn.transform( new (C) CmpINode(should_post_flag, intcon(0)) );
501 Node* tst = _gvn.transform( new (C) BoolNode(chk, BoolTest::eq) );
502
503 // Branch to slow_path if should_post_on_exceptions_flag was true
504 { BuildCutout unless(this, tst, PROB_MAX);
505 // Do not try anything fancy if we're notifying the VM on every throw.
506 // Cf. case Bytecodes::_athrow in parse2.cpp.
507 uncommon_trap(reason, Deoptimization::Action_none,
508 (ciKlass*)NULL, (char*)NULL, must_throw);
509 }
510
511 }
512
513 //------------------------------builtin_throw----------------------------------
514 void GraphKit::builtin_throw(Deoptimization::DeoptReason reason, Node* arg) {
515 bool must_throw = true;
516
517 if (env()->jvmti_can_post_on_exceptions()) {
579 }
580 break;
581 }
582 if (failing()) { stop(); return; } // exception allocation might fail
583 if (ex_obj != NULL) {
584 // Cheat with a preallocated exception object.
585 if (C->log() != NULL)
586 C->log()->elem("hot_throw preallocated='1' reason='%s'",
587 Deoptimization::trap_reason_name(reason));
588 const TypeInstPtr* ex_con = TypeInstPtr::make(ex_obj);
589 Node* ex_node = _gvn.transform( ConNode::make(C, ex_con) );
590
591 // Clear the detail message of the preallocated exception object.
592 // Weblogic sometimes mutates the detail message of exceptions
593 // using reflection.
594 int offset = java_lang_Throwable::get_detailMessage_offset();
595 const TypePtr* adr_typ = ex_con->add_offset(offset);
596
597 Node *adr = basic_plus_adr(ex_node, ex_node, offset);
598 const TypeOopPtr* val_type = TypeOopPtr::make_from_klass(env()->String_klass());
599 Node *store = store_oop_to_object(control(), ex_node, adr, adr_typ, null(), val_type, T_OBJECT);
600
601 add_exception_state(make_exception_state(ex_node));
602 return;
603 }
604 }
605
606 // %%% Maybe add entry to OptoRuntime which directly throws the exc.?
607 // It won't be much cheaper than bailing to the interp., since we'll
608 // have to pass up all the debug-info, and the runtime will have to
609 // create the stack trace.
610
611 // Usual case: Bail to interpreter.
612 // Reserve the right to recompile if we haven't seen anything yet.
613
614 Deoptimization::DeoptAction action = Deoptimization::Action_maybe_recompile;
615 if (treat_throw_as_hot
616 && (method()->method_data()->trap_recompiled_at(bci())
617 || C->too_many_traps(reason))) {
618 // We cannot afford to take more traps here. Suffer in the interpreter.
619 if (C->log() != NULL)
1466 map()->set_memory(mergemem);
1467 }
1468
1469 //------------------------------set_all_memory_call----------------------------
1470 void GraphKit::set_all_memory_call(Node* call, bool separate_io_proj) {
1471 Node* newmem = _gvn.transform( new (C) ProjNode(call, TypeFunc::Memory, separate_io_proj) );
1472 set_all_memory(newmem);
1473 }
1474
1475 //=============================================================================
1476 //
1477 // parser factory methods for MemNodes
1478 //
1479 // These are layered on top of the factory methods in LoadNode and StoreNode,
1480 // and integrate with the parser's memory state and _gvn engine.
1481 //
1482
1483 // factory methods in "int adr_idx"
1484 Node* GraphKit::make_load(Node* ctl, Node* adr, const Type* t, BasicType bt,
1485 int adr_idx,
1486 bool require_atomic_access) {
1487 assert(adr_idx != Compile::AliasIdxTop, "use other make_load factory" );
1488 const TypePtr* adr_type = NULL; // debug-mode-only argument
1489 debug_only(adr_type = C->get_adr_type(adr_idx));
1490 Node* mem = memory(adr_idx);
1491 Node* ld;
1492 if (require_atomic_access && bt == T_LONG) {
1493 ld = LoadLNode::make_atomic(C, ctl, mem, adr, adr_type, t);
1494 } else {
1495 ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt);
1496 }
1497 ld = _gvn.transform(ld);
1498 if ((bt == T_OBJECT) && C->do_escape_analysis() || C->eliminate_boxing()) {
1499 // Improve graph before escape analysis and boxing elimination.
1500 record_for_igvn(ld);
1501 }
1502 return ld;
1503 }
1504
1505 Node* GraphKit::store_to_memory(Node* ctl, Node* adr, Node *val, BasicType bt,
1506 int adr_idx,
1507 bool require_atomic_access) {
1508 assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" );
1509 const TypePtr* adr_type = NULL;
1510 debug_only(adr_type = C->get_adr_type(adr_idx));
1511 Node *mem = memory(adr_idx);
1512 Node* st;
1513 if (require_atomic_access && bt == T_LONG) {
1514 st = StoreLNode::make_atomic(C, ctl, mem, adr, adr_type, val);
1515 } else {
1516 st = StoreNode::make(_gvn, ctl, mem, adr, adr_type, val, bt);
1517 }
1518 st = _gvn.transform(st);
1519 set_memory(st, adr_idx);
1520 // Back-to-back stores can only remove intermediate store with DU info
1521 // so push on worklist for optimizer.
1522 if (mem->req() > MemNode::Address && adr == mem->in(MemNode::Address))
1523 record_for_igvn(st);
1524
1525 return st;
1526 }
1527
1528
1529 void GraphKit::pre_barrier(bool do_load,
1530 Node* ctl,
1531 Node* obj,
1532 Node* adr,
1533 uint adr_idx,
1534 Node* val,
1535 const TypeOopPtr* val_type,
1536 Node* pre_val,
1596 write_barrier_post(store, obj, adr, adr_idx, val, use_precise);
1597 break;
1598
1599 case BarrierSet::ModRef:
1600 break;
1601
1602 case BarrierSet::Other:
1603 default :
1604 ShouldNotReachHere();
1605
1606 }
1607 }
1608
1609 Node* GraphKit::store_oop(Node* ctl,
1610 Node* obj,
1611 Node* adr,
1612 const TypePtr* adr_type,
1613 Node* val,
1614 const TypeOopPtr* val_type,
1615 BasicType bt,
1616 bool use_precise) {
1617 // Transformation of a value which could be NULL pointer (CastPP #NULL)
1618 // could be delayed during Parse (for example, in adjust_map_after_if()).
1619 // Execute transformation here to avoid barrier generation in such case.
1620 if (_gvn.type(val) == TypePtr::NULL_PTR)
1621 val = _gvn.makecon(TypePtr::NULL_PTR);
1622
1623 set_control(ctl);
1624 if (stopped()) return top(); // Dead path ?
1625
1626 assert(bt == T_OBJECT, "sanity");
1627 assert(val != NULL, "not dead path");
1628 uint adr_idx = C->get_alias_index(adr_type);
1629 assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" );
1630
1631 pre_barrier(true /* do_load */,
1632 control(), obj, adr, adr_idx, val, val_type,
1633 NULL /* pre_val */,
1634 bt);
1635
1636 Node* store = store_to_memory(control(), adr, val, bt, adr_idx);
1637 post_barrier(control(), store, obj, adr, adr_idx, val, bt, use_precise);
1638 return store;
1639 }
1640
1641 // Could be an array or object we don't know at compile time (unsafe ref.)
1642 Node* GraphKit::store_oop_to_unknown(Node* ctl,
1643 Node* obj, // containing obj
1644 Node* adr, // actual adress to store val at
1645 const TypePtr* adr_type,
1646 Node* val,
1647 BasicType bt) {
1648 Compile::AliasType* at = C->alias_type(adr_type);
1649 const TypeOopPtr* val_type = NULL;
1650 if (adr_type->isa_instptr()) {
1651 if (at->field() != NULL) {
1652 // known field. This code is a copy of the do_put_xxx logic.
1653 ciField* field = at->field();
1654 if (!field->type()->is_loaded()) {
1655 val_type = TypeInstPtr::BOTTOM;
1656 } else {
1657 val_type = TypeOopPtr::make_from_klass(field->type()->as_klass());
1658 }
1659 }
1660 } else if (adr_type->isa_aryptr()) {
1661 val_type = adr_type->is_aryptr()->elem()->make_oopptr();
1662 }
1663 if (val_type == NULL) {
1664 val_type = TypeInstPtr::BOTTOM;
1665 }
1666 return store_oop(ctl, obj, adr, adr_type, val, val_type, bt, true);
1667 }
1668
1669
1670 //-------------------------array_element_address-------------------------
1671 Node* GraphKit::array_element_address(Node* ary, Node* idx, BasicType elembt,
1672 const TypeInt* sizetype) {
1673 uint shift = exact_log2(type2aelembytes(elembt));
1674 uint header = arrayOopDesc::base_offset_in_bytes(elembt);
1675
1676 // short-circuit a common case (saves lots of confusing waste motion)
1677 jint idx_con = find_int_con(idx, -1);
1678 if (idx_con >= 0) {
1679 intptr_t offset = header + ((intptr_t)idx_con << shift);
1680 return basic_plus_adr(ary, offset);
1681 }
1682
1683 // must be correct type for alignment purposes
1684 Node* base = basic_plus_adr(ary, header);
1685 #ifdef _LP64
1686 // The scaled index operand to AddP must be a clean 64-bit value.
1690 // operand in pointer arithmetic has bad consequences.
1691 // On the other hand, 32-bit overflow is rare, and the possibility
1692 // can often be excluded, if we annotate the ConvI2L node with
1693 // a type assertion that its value is known to be a small positive
1694 // number. (The prior range check has ensured this.)
1695 // This assertion is used by ConvI2LNode::Ideal.
1696 int index_max = max_jint - 1; // array size is max_jint, index is one less
1697 if (sizetype != NULL) index_max = sizetype->_hi - 1;
1698 const TypeLong* lidxtype = TypeLong::make(CONST64(0), index_max, Type::WidenMax);
1699 idx = _gvn.transform( new (C) ConvI2LNode(idx, lidxtype) );
1700 #endif
1701 Node* scale = _gvn.transform( new (C) LShiftXNode(idx, intcon(shift)) );
1702 return basic_plus_adr(ary, base, scale);
1703 }
1704
1705 //-------------------------load_array_element-------------------------
1706 Node* GraphKit::load_array_element(Node* ctl, Node* ary, Node* idx, const TypeAryPtr* arytype) {
1707 const Type* elemtype = arytype->elem();
1708 BasicType elembt = elemtype->array_element_basic_type();
1709 Node* adr = array_element_address(ary, idx, elembt, arytype->size());
1710 Node* ld = make_load(ctl, adr, elemtype, elembt, arytype);
1711 return ld;
1712 }
1713
1714 //-------------------------set_arguments_for_java_call-------------------------
1715 // Arguments (pre-popped from the stack) are taken from the JVMS.
1716 void GraphKit::set_arguments_for_java_call(CallJavaNode* call) {
1717 // Add the call arguments:
1718 uint nargs = call->method()->arg_size();
1719 for (uint i = 0; i < nargs; i++) {
1720 Node* arg = argument(i);
1721 call->init_req(i + TypeFunc::Parms, arg);
1722 }
1723 }
1724
1725 //---------------------------set_edges_for_java_call---------------------------
1726 // Connect a newly created call into the current JVMS.
1727 // A return value node (if any) is returned from set_edges_for_java_call.
1728 void GraphKit::set_edges_for_java_call(CallJavaNode* call, bool must_throw, bool separate_io_proj) {
1729
1730 // Add the predefined inputs:
1925 }
1926 }
1927 while (wl.size() > 0) {
1928 _gvn.transform(wl.pop());
1929 }
1930 }
1931 }
1932
1933
1934 //------------------------------increment_counter------------------------------
1935 // for statistics: increment a VM counter by 1
1936
1937 void GraphKit::increment_counter(address counter_addr) {
1938 Node* adr1 = makecon(TypeRawPtr::make(counter_addr));
1939 increment_counter(adr1);
1940 }
1941
1942 void GraphKit::increment_counter(Node* counter_addr) {
1943 int adr_type = Compile::AliasIdxRaw;
1944 Node* ctrl = control();
1945 Node* cnt = make_load(ctrl, counter_addr, TypeInt::INT, T_INT, adr_type);
1946 Node* incr = _gvn.transform(new (C) AddINode(cnt, _gvn.intcon(1)));
1947 store_to_memory( ctrl, counter_addr, incr, T_INT, adr_type );
1948 }
1949
1950
1951 //------------------------------uncommon_trap----------------------------------
1952 // Bail out to the interpreter in mid-method. Implemented by calling the
1953 // uncommon_trap blob. This helper function inserts a runtime call with the
1954 // right debug info.
1955 void GraphKit::uncommon_trap(int trap_request,
1956 ciKlass* klass, const char* comment,
1957 bool must_throw,
1958 bool keep_exact_action) {
1959 if (failing()) stop();
1960 if (stopped()) return; // trap reachable?
1961
1962 // Note: If ProfileTraps is true, and if a deopt. actually
1963 // occurs here, the runtime will make sure an MDO exists. There is
1964 // no need to call method()->ensure_method_data() at this point.
1965
1966 // Set the stack pointer to the right value for reexecution:
1967 set_sp(reexecute_sp());
2508 // Just do a direct pointer compare and be done.
2509 Node* cmp = _gvn.transform( new(C) CmpPNode(subklass, superklass) );
2510 Node* bol = _gvn.transform( new(C) BoolNode(cmp, BoolTest::eq) );
2511 IfNode* iff = create_and_xform_if(control(), bol, PROB_STATIC_FREQUENT, COUNT_UNKNOWN);
2512 set_control( _gvn.transform( new(C) IfTrueNode (iff) ) );
2513 return _gvn.transform( new(C) IfFalseNode(iff) );
2514 }
2515 case SSC_full_test:
2516 break;
2517 default:
2518 ShouldNotReachHere();
2519 }
2520 }
2521
2522 // %%% Possible further optimization: Even if the superklass is not exact,
2523 // if the subklass is the unique subtype of the superklass, the check
2524 // will always succeed. We could leave a dependency behind to ensure this.
2525
2526 // First load the super-klass's check-offset
2527 Node *p1 = basic_plus_adr( superklass, superklass, in_bytes(Klass::super_check_offset_offset()) );
2528 Node *chk_off = _gvn.transform( new (C) LoadINode( NULL, memory(p1), p1, _gvn.type(p1)->is_ptr() ) );
2529 int cacheoff_con = in_bytes(Klass::secondary_super_cache_offset());
2530 bool might_be_cache = (find_int_con(chk_off, cacheoff_con) == cacheoff_con);
2531
2532 // Load from the sub-klass's super-class display list, or a 1-word cache of
2533 // the secondary superclass list, or a failing value with a sentinel offset
2534 // if the super-klass is an interface or exceptionally deep in the Java
2535 // hierarchy and we have to scan the secondary superclass list the hard way.
2536 // Worst-case type is a little odd: NULL is allowed as a result (usually
2537 // klass loads can never produce a NULL).
2538 Node *chk_off_X = ConvI2X(chk_off);
2539 Node *p2 = _gvn.transform( new (C) AddPNode(subklass,subklass,chk_off_X) );
2540 // For some types like interfaces the following loadKlass is from a 1-word
2541 // cache which is mutable so can't use immutable memory. Other
2542 // types load from the super-class display table which is immutable.
2543 Node *kmem = might_be_cache ? memory(p2) : immutable_memory();
2544 Node *nkls = _gvn.transform( LoadKlassNode::make( _gvn, kmem, p2, _gvn.type(p2)->is_ptr(), TypeKlassPtr::OBJECT_OR_NULL ) );
2545
2546 // Compile speed common case: ARE a subtype and we canNOT fail
2547 if( superklass == nkls )
2548 return top(); // false path is dead; no test needed.
3221 // fetch the constant layout helper value into constant_value
3222 // and return (Node*)NULL. Otherwise, load the non-constant
3223 // layout helper value, and return the node which represents it.
3224 // This two-faced routine is useful because allocation sites
3225 // almost always feature constant types.
3226 Node* GraphKit::get_layout_helper(Node* klass_node, jint& constant_value) {
3227 const TypeKlassPtr* inst_klass = _gvn.type(klass_node)->isa_klassptr();
3228 if (!StressReflectiveCode && inst_klass != NULL) {
3229 ciKlass* klass = inst_klass->klass();
3230 bool xklass = inst_klass->klass_is_exact();
3231 if (xklass || klass->is_array_klass()) {
3232 jint lhelper = klass->layout_helper();
3233 if (lhelper != Klass::_lh_neutral_value) {
3234 constant_value = lhelper;
3235 return (Node*) NULL;
3236 }
3237 }
3238 }
3239 constant_value = Klass::_lh_neutral_value; // put in a known value
3240 Node* lhp = basic_plus_adr(klass_node, klass_node, in_bytes(Klass::layout_helper_offset()));
3241 return make_load(NULL, lhp, TypeInt::INT, T_INT);
3242 }
3243
3244 // We just put in an allocate/initialize with a big raw-memory effect.
3245 // Hook selected additional alias categories on the initialization.
3246 static void hook_memory_on_init(GraphKit& kit, int alias_idx,
3247 MergeMemNode* init_in_merge,
3248 Node* init_out_raw) {
3249 DEBUG_ONLY(Node* init_in_raw = init_in_merge->base_memory());
3250 assert(init_in_merge->memory_at(alias_idx) == init_in_raw, "");
3251
3252 Node* prevmem = kit.memory(alias_idx);
3253 init_in_merge->set_memory_at(alias_idx, prevmem);
3254 kit.set_memory(init_out_raw, alias_idx);
3255 }
3256
3257 //---------------------------set_output_for_allocation-------------------------
3258 Node* GraphKit::set_output_for_allocation(AllocateNode* alloc,
3259 const TypeOopPtr* oop_type) {
3260 int rawidx = Compile::AliasIdxRaw;
3261 alloc->set_req( TypeFunc::FramePtr, frameptr() );
3756
3757 // Get the alias_index for raw card-mark memory
3758 int adr_type = Compile::AliasIdxRaw;
3759 Node* zero = __ ConI(0); // Dirty card value
3760 BasicType bt = T_BYTE;
3761
3762 if (UseCondCardMark) {
3763 // The classic GC reference write barrier is typically implemented
3764 // as a store into the global card mark table. Unfortunately
3765 // unconditional stores can result in false sharing and excessive
3766 // coherence traffic as well as false transactional aborts.
3767 // UseCondCardMark enables MP "polite" conditional card mark
3768 // stores. In theory we could relax the load from ctrl() to
3769 // no_ctrl, but that doesn't buy much latitude.
3770 Node* card_val = __ load( __ ctrl(), card_adr, TypeInt::BYTE, bt, adr_type);
3771 __ if_then(card_val, BoolTest::ne, zero);
3772 }
3773
3774 // Smash zero into card
3775 if( !UseConcMarkSweepGC ) {
3776 __ store(__ ctrl(), card_adr, zero, bt, adr_type);
3777 } else {
3778 // Specialized path for CM store barrier
3779 __ storeCM(__ ctrl(), card_adr, zero, oop_store, adr_idx, bt, adr_type);
3780 }
3781
3782 if (UseCondCardMark) {
3783 __ end_if();
3784 }
3785
3786 // Final sync IdealKit and GraphKit.
3787 final_sync(ideal);
3788 }
3789
3790 // G1 pre/post barriers
3791 void GraphKit::g1_write_barrier_pre(bool do_load,
3792 Node* obj,
3793 Node* adr,
3794 uint alias_idx,
3795 Node* val,
3796 const TypeOopPtr* val_type,
3853 Node* index = __ load(__ ctrl(), index_adr, TypeX_X, index_bt, Compile::AliasIdxRaw);
3854
3855 if (do_load) {
3856 // load original value
3857 // alias_idx correct??
3858 pre_val = __ load(__ ctrl(), adr, val_type, bt, alias_idx);
3859 }
3860
3861 // if (pre_val != NULL)
3862 __ if_then(pre_val, BoolTest::ne, null()); {
3863 Node* buffer = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw);
3864
3865 // is the queue for this thread full?
3866 __ if_then(index, BoolTest::ne, zeroX, likely); {
3867
3868 // decrement the index
3869 Node* next_index = _gvn.transform(new (C) SubXNode(index, __ ConX(sizeof(intptr_t))));
3870
3871 // Now get the buffer location we will log the previous value into and store it
3872 Node *log_addr = __ AddP(no_base, buffer, next_index);
3873 __ store(__ ctrl(), log_addr, pre_val, T_OBJECT, Compile::AliasIdxRaw);
3874 // update the index
3875 __ store(__ ctrl(), index_adr, next_index, index_bt, Compile::AliasIdxRaw);
3876
3877 } __ else_(); {
3878
3879 // logging buffer is full, call the runtime
3880 const TypeFunc *tf = OptoRuntime::g1_wb_pre_Type();
3881 __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), "g1_wb_pre", pre_val, tls);
3882 } __ end_if(); // (!index)
3883 } __ end_if(); // (pre_val != NULL)
3884 } __ end_if(); // (!marking)
3885
3886 // Final sync IdealKit and GraphKit.
3887 final_sync(ideal);
3888 }
3889
3890 //
3891 // Update the card table and add card address to the queue
3892 //
3893 void GraphKit::g1_mark_card(IdealKit& ideal,
3894 Node* card_adr,
3895 Node* oop_store,
3896 uint oop_alias_idx,
3897 Node* index,
3898 Node* index_adr,
3899 Node* buffer,
3900 const TypeFunc* tf) {
3901
3902 Node* zero = __ ConI(0);
3903 Node* zeroX = __ ConX(0);
3904 Node* no_base = __ top();
3905 BasicType card_bt = T_BYTE;
3906 // Smash zero into card. MUST BE ORDERED WRT TO STORE
3907 __ storeCM(__ ctrl(), card_adr, zero, oop_store, oop_alias_idx, card_bt, Compile::AliasIdxRaw);
3908
3909 // Now do the queue work
3910 __ if_then(index, BoolTest::ne, zeroX); {
3911
3912 Node* next_index = _gvn.transform(new (C) SubXNode(index, __ ConX(sizeof(intptr_t))));
3913 Node* log_addr = __ AddP(no_base, buffer, next_index);
3914
3915 __ store(__ ctrl(), log_addr, card_adr, T_ADDRESS, Compile::AliasIdxRaw);
3916 __ store(__ ctrl(), index_adr, next_index, TypeX_X->basic_type(), Compile::AliasIdxRaw);
3917
3918 } __ else_(); {
3919 __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), "g1_wb_post", card_adr, __ thread());
3920 } __ end_if();
3921
3922 }
3923
3924 void GraphKit::g1_write_barrier_post(Node* oop_store,
3925 Node* obj,
3926 Node* adr,
3927 uint alias_idx,
3928 Node* val,
3929 BasicType bt,
3930 bool use_precise) {
3931 // If we are writing a NULL then we need no post barrier
3932
3933 if (val != NULL && val->is_Con() && val->bottom_type() == TypePtr::NULL_PTR) {
3934 // Must be NULL
3935 const Type* t = val->bottom_type();
3936 assert(t == Type::TOP || t == TypePtr::NULL_PTR, "must be NULL");
4026 // Object.clone() instrinsic uses this path.
4027 g1_mark_card(ideal, card_adr, oop_store, alias_idx, index, index_adr, buffer, tf);
4028 }
4029
4030 // Final sync IdealKit and GraphKit.
4031 final_sync(ideal);
4032 }
4033 #undef __
4034
4035
4036
4037 Node* GraphKit::load_String_offset(Node* ctrl, Node* str) {
4038 if (java_lang_String::has_offset_field()) {
4039 int offset_offset = java_lang_String::offset_offset_in_bytes();
4040 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4041 false, NULL, 0);
4042 const TypePtr* offset_field_type = string_type->add_offset(offset_offset);
4043 int offset_field_idx = C->get_alias_index(offset_field_type);
4044 return make_load(ctrl,
4045 basic_plus_adr(str, str, offset_offset),
4046 TypeInt::INT, T_INT, offset_field_idx);
4047 } else {
4048 return intcon(0);
4049 }
4050 }
4051
4052 Node* GraphKit::load_String_length(Node* ctrl, Node* str) {
4053 if (java_lang_String::has_count_field()) {
4054 int count_offset = java_lang_String::count_offset_in_bytes();
4055 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4056 false, NULL, 0);
4057 const TypePtr* count_field_type = string_type->add_offset(count_offset);
4058 int count_field_idx = C->get_alias_index(count_field_type);
4059 return make_load(ctrl,
4060 basic_plus_adr(str, str, count_offset),
4061 TypeInt::INT, T_INT, count_field_idx);
4062 } else {
4063 return load_array_length(load_String_value(ctrl, str));
4064 }
4065 }
4066
4067 Node* GraphKit::load_String_value(Node* ctrl, Node* str) {
4068 int value_offset = java_lang_String::value_offset_in_bytes();
4069 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4070 false, NULL, 0);
4071 const TypePtr* value_field_type = string_type->add_offset(value_offset);
4072 const TypeAryPtr* value_type = TypeAryPtr::make(TypePtr::NotNull,
4073 TypeAry::make(TypeInt::CHAR,TypeInt::POS),
4074 ciTypeArrayKlass::make(T_CHAR), true, 0);
4075 int value_field_idx = C->get_alias_index(value_field_type);
4076 Node* load = make_load(ctrl, basic_plus_adr(str, str, value_offset),
4077 value_type, T_OBJECT, value_field_idx);
4078 // String.value field is known to be @Stable.
4079 if (UseImplicitStableValues) {
4080 load = cast_array_to_stable(load, value_type);
4081 }
4082 return load;
4083 }
4084
4085 void GraphKit::store_String_offset(Node* ctrl, Node* str, Node* value) {
4086 int offset_offset = java_lang_String::offset_offset_in_bytes();
4087 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4088 false, NULL, 0);
4089 const TypePtr* offset_field_type = string_type->add_offset(offset_offset);
4090 int offset_field_idx = C->get_alias_index(offset_field_type);
4091 store_to_memory(ctrl, basic_plus_adr(str, offset_offset),
4092 value, T_INT, offset_field_idx);
4093 }
4094
4095 void GraphKit::store_String_value(Node* ctrl, Node* str, Node* value) {
4096 int value_offset = java_lang_String::value_offset_in_bytes();
4097 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4098 false, NULL, 0);
4099 const TypePtr* value_field_type = string_type->add_offset(value_offset);
4100
4101 store_oop_to_object(ctrl, str, basic_plus_adr(str, value_offset), value_field_type,
4102 value, TypeAryPtr::CHARS, T_OBJECT);
4103 }
4104
4105 void GraphKit::store_String_length(Node* ctrl, Node* str, Node* value) {
4106 int count_offset = java_lang_String::count_offset_in_bytes();
4107 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4108 false, NULL, 0);
4109 const TypePtr* count_field_type = string_type->add_offset(count_offset);
4110 int count_field_idx = C->get_alias_index(count_field_type);
4111 store_to_memory(ctrl, basic_plus_adr(str, count_offset),
4112 value, T_INT, count_field_idx);
4113 }
4114
4115 Node* GraphKit::cast_array_to_stable(Node* ary, const TypeAryPtr* ary_type) {
4116 // Reify the property as a CastPP node in Ideal graph to comply with monotonicity
4117 // assumption of CCP analysis.
4118 return _gvn.transform(new(C) CastPPNode(ary, ary_type->cast_to_stable(true)));
4119 }
|
477 int bci = this->bci();
478 if (method != NULL && bci != InvocationEntryBci)
479 return method->java_code_at_bci(bci);
480 else
481 return Bytecodes::_illegal;
482 }
483
484 void GraphKit::uncommon_trap_if_should_post_on_exceptions(Deoptimization::DeoptReason reason,
485 bool must_throw) {
486 // if the exception capability is set, then we will generate code
487 // to check the JavaThread.should_post_on_exceptions flag to see
488 // if we actually need to report exception events (for this
489 // thread). If we don't need to report exception events, we will
490 // take the normal fast path provided by add_exception_events. If
491 // exception event reporting is enabled for this thread, we will
492 // take the uncommon_trap in the BuildCutout below.
493
494 // first must access the should_post_on_exceptions_flag in this thread's JavaThread
495 Node* jthread = _gvn.transform(new (C) ThreadLocalNode());
496 Node* adr = basic_plus_adr(top(), jthread, in_bytes(JavaThread::should_post_on_exceptions_flag_offset()));
497 Node* should_post_flag = make_load(control(), adr, TypeInt::INT, T_INT, Compile::AliasIdxRaw, false, LoadNode::unordered);
498
499 // Test the should_post_on_exceptions_flag vs. 0
500 Node* chk = _gvn.transform( new (C) CmpINode(should_post_flag, intcon(0)) );
501 Node* tst = _gvn.transform( new (C) BoolNode(chk, BoolTest::eq) );
502
503 // Branch to slow_path if should_post_on_exceptions_flag was true
504 { BuildCutout unless(this, tst, PROB_MAX);
505 // Do not try anything fancy if we're notifying the VM on every throw.
506 // Cf. case Bytecodes::_athrow in parse2.cpp.
507 uncommon_trap(reason, Deoptimization::Action_none,
508 (ciKlass*)NULL, (char*)NULL, must_throw);
509 }
510
511 }
512
513 //------------------------------builtin_throw----------------------------------
514 void GraphKit::builtin_throw(Deoptimization::DeoptReason reason, Node* arg) {
515 bool must_throw = true;
516
517 if (env()->jvmti_can_post_on_exceptions()) {
579 }
580 break;
581 }
582 if (failing()) { stop(); return; } // exception allocation might fail
583 if (ex_obj != NULL) {
584 // Cheat with a preallocated exception object.
585 if (C->log() != NULL)
586 C->log()->elem("hot_throw preallocated='1' reason='%s'",
587 Deoptimization::trap_reason_name(reason));
588 const TypeInstPtr* ex_con = TypeInstPtr::make(ex_obj);
589 Node* ex_node = _gvn.transform( ConNode::make(C, ex_con) );
590
591 // Clear the detail message of the preallocated exception object.
592 // Weblogic sometimes mutates the detail message of exceptions
593 // using reflection.
594 int offset = java_lang_Throwable::get_detailMessage_offset();
595 const TypePtr* adr_typ = ex_con->add_offset(offset);
596
597 Node *adr = basic_plus_adr(ex_node, ex_node, offset);
598 const TypeOopPtr* val_type = TypeOopPtr::make_from_klass(env()->String_klass());
599 // Conservatively release stores of object references.
600 Node *store = store_oop_to_object(control(), ex_node, adr, adr_typ, null(), val_type, T_OBJECT, StoreNode::release);
601
602 add_exception_state(make_exception_state(ex_node));
603 return;
604 }
605 }
606
607 // %%% Maybe add entry to OptoRuntime which directly throws the exc.?
608 // It won't be much cheaper than bailing to the interp., since we'll
609 // have to pass up all the debug-info, and the runtime will have to
610 // create the stack trace.
611
612 // Usual case: Bail to interpreter.
613 // Reserve the right to recompile if we haven't seen anything yet.
614
615 Deoptimization::DeoptAction action = Deoptimization::Action_maybe_recompile;
616 if (treat_throw_as_hot
617 && (method()->method_data()->trap_recompiled_at(bci())
618 || C->too_many_traps(reason))) {
619 // We cannot afford to take more traps here. Suffer in the interpreter.
620 if (C->log() != NULL)
1467 map()->set_memory(mergemem);
1468 }
1469
1470 //------------------------------set_all_memory_call----------------------------
1471 void GraphKit::set_all_memory_call(Node* call, bool separate_io_proj) {
1472 Node* newmem = _gvn.transform( new (C) ProjNode(call, TypeFunc::Memory, separate_io_proj) );
1473 set_all_memory(newmem);
1474 }
1475
1476 //=============================================================================
1477 //
1478 // parser factory methods for MemNodes
1479 //
1480 // These are layered on top of the factory methods in LoadNode and StoreNode,
1481 // and integrate with the parser's memory state and _gvn engine.
1482 //
1483
1484 // factory methods in "int adr_idx"
1485 Node* GraphKit::make_load(Node* ctl, Node* adr, const Type* t, BasicType bt,
1486 int adr_idx,
1487 bool require_atomic_access, LoadNode::Sem sem) {
1488 assert(adr_idx != Compile::AliasIdxTop, "use other make_load factory" );
1489 const TypePtr* adr_type = NULL; // debug-mode-only argument
1490 debug_only(adr_type = C->get_adr_type(adr_idx));
1491 Node* mem = memory(adr_idx);
1492 Node* ld;
1493 if (require_atomic_access && bt == T_LONG) {
1494 ld = LoadLNode::make_atomic(C, ctl, mem, adr, adr_type, t, sem);
1495 } else {
1496 ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, sem);
1497 }
1498 ld = _gvn.transform(ld);
1499 if ((bt == T_OBJECT) && C->do_escape_analysis() || C->eliminate_boxing()) {
1500 // Improve graph before escape analysis and boxing elimination.
1501 record_for_igvn(ld);
1502 }
1503 return ld;
1504 }
1505
1506 Node* GraphKit::store_to_memory(Node* ctl, Node* adr, Node *val, BasicType bt,
1507 int adr_idx,
1508 bool require_atomic_access,
1509 StoreNode::Sem sem) {
1510 assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" );
1511 const TypePtr* adr_type = NULL;
1512 debug_only(adr_type = C->get_adr_type(adr_idx));
1513 Node *mem = memory(adr_idx);
1514 Node* st;
1515 if (require_atomic_access && bt == T_LONG) {
1516 st = StoreLNode::make_atomic(C, ctl, mem, adr, adr_type, val, sem);
1517 } else {
1518 st = StoreNode::make(_gvn, ctl, mem, adr, adr_type, val, bt, sem);
1519 }
1520 st = _gvn.transform(st);
1521 set_memory(st, adr_idx);
1522 // Back-to-back stores can only remove intermediate store with DU info
1523 // so push on worklist for optimizer.
1524 if (mem->req() > MemNode::Address && adr == mem->in(MemNode::Address))
1525 record_for_igvn(st);
1526
1527 return st;
1528 }
1529
1530
1531 void GraphKit::pre_barrier(bool do_load,
1532 Node* ctl,
1533 Node* obj,
1534 Node* adr,
1535 uint adr_idx,
1536 Node* val,
1537 const TypeOopPtr* val_type,
1538 Node* pre_val,
1598 write_barrier_post(store, obj, adr, adr_idx, val, use_precise);
1599 break;
1600
1601 case BarrierSet::ModRef:
1602 break;
1603
1604 case BarrierSet::Other:
1605 default :
1606 ShouldNotReachHere();
1607
1608 }
1609 }
1610
1611 Node* GraphKit::store_oop(Node* ctl,
1612 Node* obj,
1613 Node* adr,
1614 const TypePtr* adr_type,
1615 Node* val,
1616 const TypeOopPtr* val_type,
1617 BasicType bt,
1618 bool use_precise,
1619 StoreNode::Sem sem) {
1620 // Transformation of a value which could be NULL pointer (CastPP #NULL)
1621 // could be delayed during Parse (for example, in adjust_map_after_if()).
1622 // Execute transformation here to avoid barrier generation in such case.
1623 if (_gvn.type(val) == TypePtr::NULL_PTR)
1624 val = _gvn.makecon(TypePtr::NULL_PTR);
1625
1626 set_control(ctl);
1627 if (stopped()) return top(); // Dead path ?
1628
1629 assert(bt == T_OBJECT, "sanity");
1630 assert(val != NULL, "not dead path");
1631 uint adr_idx = C->get_alias_index(adr_type);
1632 assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" );
1633
1634 pre_barrier(true /* do_load */,
1635 control(), obj, adr, adr_idx, val, val_type,
1636 NULL /* pre_val */,
1637 bt);
1638
1639 Node* store = store_to_memory(control(), adr, val, bt, adr_idx, false, sem);
1640 post_barrier(control(), store, obj, adr, adr_idx, val, bt, use_precise);
1641 return store;
1642 }
1643
1644 // Could be an array or object we don't know at compile time (unsafe ref.)
1645 Node* GraphKit::store_oop_to_unknown(Node* ctl,
1646 Node* obj, // containing obj
1647 Node* adr, // actual adress to store val at
1648 const TypePtr* adr_type,
1649 Node* val,
1650 BasicType bt,
1651 StoreNode::Sem sem) {
1652 Compile::AliasType* at = C->alias_type(adr_type);
1653 const TypeOopPtr* val_type = NULL;
1654 if (adr_type->isa_instptr()) {
1655 if (at->field() != NULL) {
1656 // known field. This code is a copy of the do_put_xxx logic.
1657 ciField* field = at->field();
1658 if (!field->type()->is_loaded()) {
1659 val_type = TypeInstPtr::BOTTOM;
1660 } else {
1661 val_type = TypeOopPtr::make_from_klass(field->type()->as_klass());
1662 }
1663 }
1664 } else if (adr_type->isa_aryptr()) {
1665 val_type = adr_type->is_aryptr()->elem()->make_oopptr();
1666 }
1667 if (val_type == NULL) {
1668 val_type = TypeInstPtr::BOTTOM;
1669 }
1670 return store_oop(ctl, obj, adr, adr_type, val, val_type, bt, true, sem);
1671 }
1672
1673
1674 //-------------------------array_element_address-------------------------
1675 Node* GraphKit::array_element_address(Node* ary, Node* idx, BasicType elembt,
1676 const TypeInt* sizetype) {
1677 uint shift = exact_log2(type2aelembytes(elembt));
1678 uint header = arrayOopDesc::base_offset_in_bytes(elembt);
1679
1680 // short-circuit a common case (saves lots of confusing waste motion)
1681 jint idx_con = find_int_con(idx, -1);
1682 if (idx_con >= 0) {
1683 intptr_t offset = header + ((intptr_t)idx_con << shift);
1684 return basic_plus_adr(ary, offset);
1685 }
1686
1687 // must be correct type for alignment purposes
1688 Node* base = basic_plus_adr(ary, header);
1689 #ifdef _LP64
1690 // The scaled index operand to AddP must be a clean 64-bit value.
1694 // operand in pointer arithmetic has bad consequences.
1695 // On the other hand, 32-bit overflow is rare, and the possibility
1696 // can often be excluded, if we annotate the ConvI2L node with
1697 // a type assertion that its value is known to be a small positive
1698 // number. (The prior range check has ensured this.)
1699 // This assertion is used by ConvI2LNode::Ideal.
1700 int index_max = max_jint - 1; // array size is max_jint, index is one less
1701 if (sizetype != NULL) index_max = sizetype->_hi - 1;
1702 const TypeLong* lidxtype = TypeLong::make(CONST64(0), index_max, Type::WidenMax);
1703 idx = _gvn.transform( new (C) ConvI2LNode(idx, lidxtype) );
1704 #endif
1705 Node* scale = _gvn.transform( new (C) LShiftXNode(idx, intcon(shift)) );
1706 return basic_plus_adr(ary, base, scale);
1707 }
1708
1709 //-------------------------load_array_element-------------------------
1710 Node* GraphKit::load_array_element(Node* ctl, Node* ary, Node* idx, const TypeAryPtr* arytype) {
1711 const Type* elemtype = arytype->elem();
1712 BasicType elembt = elemtype->array_element_basic_type();
1713 Node* adr = array_element_address(ary, idx, elembt, arytype->size());
1714 Node* ld = make_load(ctl, adr, elemtype, elembt, arytype, false, LoadNode::unordered);
1715 return ld;
1716 }
1717
1718 //-------------------------set_arguments_for_java_call-------------------------
1719 // Arguments (pre-popped from the stack) are taken from the JVMS.
1720 void GraphKit::set_arguments_for_java_call(CallJavaNode* call) {
1721 // Add the call arguments:
1722 uint nargs = call->method()->arg_size();
1723 for (uint i = 0; i < nargs; i++) {
1724 Node* arg = argument(i);
1725 call->init_req(i + TypeFunc::Parms, arg);
1726 }
1727 }
1728
1729 //---------------------------set_edges_for_java_call---------------------------
1730 // Connect a newly created call into the current JVMS.
1731 // A return value node (if any) is returned from set_edges_for_java_call.
1732 void GraphKit::set_edges_for_java_call(CallJavaNode* call, bool must_throw, bool separate_io_proj) {
1733
1734 // Add the predefined inputs:
1929 }
1930 }
1931 while (wl.size() > 0) {
1932 _gvn.transform(wl.pop());
1933 }
1934 }
1935 }
1936
1937
1938 //------------------------------increment_counter------------------------------
1939 // for statistics: increment a VM counter by 1
1940
1941 void GraphKit::increment_counter(address counter_addr) {
1942 Node* adr1 = makecon(TypeRawPtr::make(counter_addr));
1943 increment_counter(adr1);
1944 }
1945
1946 void GraphKit::increment_counter(Node* counter_addr) {
1947 int adr_type = Compile::AliasIdxRaw;
1948 Node* ctrl = control();
1949 Node* cnt = make_load(ctrl, counter_addr, TypeInt::INT, T_INT, adr_type, false, LoadNode::unordered);
1950 Node* incr = _gvn.transform(new (C) AddINode(cnt, _gvn.intcon(1)));
1951 store_to_memory(ctrl, counter_addr, incr, T_INT, adr_type, false, StoreNode::unordered);
1952 }
1953
1954
1955 //------------------------------uncommon_trap----------------------------------
1956 // Bail out to the interpreter in mid-method. Implemented by calling the
1957 // uncommon_trap blob. This helper function inserts a runtime call with the
1958 // right debug info.
1959 void GraphKit::uncommon_trap(int trap_request,
1960 ciKlass* klass, const char* comment,
1961 bool must_throw,
1962 bool keep_exact_action) {
1963 if (failing()) stop();
1964 if (stopped()) return; // trap reachable?
1965
1966 // Note: If ProfileTraps is true, and if a deopt. actually
1967 // occurs here, the runtime will make sure an MDO exists. There is
1968 // no need to call method()->ensure_method_data() at this point.
1969
1970 // Set the stack pointer to the right value for reexecution:
1971 set_sp(reexecute_sp());
2512 // Just do a direct pointer compare and be done.
2513 Node* cmp = _gvn.transform( new(C) CmpPNode(subklass, superklass) );
2514 Node* bol = _gvn.transform( new(C) BoolNode(cmp, BoolTest::eq) );
2515 IfNode* iff = create_and_xform_if(control(), bol, PROB_STATIC_FREQUENT, COUNT_UNKNOWN);
2516 set_control( _gvn.transform( new(C) IfTrueNode (iff) ) );
2517 return _gvn.transform( new(C) IfFalseNode(iff) );
2518 }
2519 case SSC_full_test:
2520 break;
2521 default:
2522 ShouldNotReachHere();
2523 }
2524 }
2525
2526 // %%% Possible further optimization: Even if the superklass is not exact,
2527 // if the subklass is the unique subtype of the superklass, the check
2528 // will always succeed. We could leave a dependency behind to ensure this.
2529
2530 // First load the super-klass's check-offset
2531 Node *p1 = basic_plus_adr( superklass, superklass, in_bytes(Klass::super_check_offset_offset()) );
2532 Node *chk_off = _gvn.transform(new (C) LoadINode(NULL, memory(p1), p1, _gvn.type(p1)->is_ptr(),
2533 TypeInt::INT, LoadNode::unordered));
2534 int cacheoff_con = in_bytes(Klass::secondary_super_cache_offset());
2535 bool might_be_cache = (find_int_con(chk_off, cacheoff_con) == cacheoff_con);
2536
2537 // Load from the sub-klass's super-class display list, or a 1-word cache of
2538 // the secondary superclass list, or a failing value with a sentinel offset
2539 // if the super-klass is an interface or exceptionally deep in the Java
2540 // hierarchy and we have to scan the secondary superclass list the hard way.
2541 // Worst-case type is a little odd: NULL is allowed as a result (usually
2542 // klass loads can never produce a NULL).
2543 Node *chk_off_X = ConvI2X(chk_off);
2544 Node *p2 = _gvn.transform( new (C) AddPNode(subklass,subklass,chk_off_X) );
2545 // For some types like interfaces the following loadKlass is from a 1-word
2546 // cache which is mutable so can't use immutable memory. Other
2547 // types load from the super-class display table which is immutable.
2548 Node *kmem = might_be_cache ? memory(p2) : immutable_memory();
2549 Node *nkls = _gvn.transform( LoadKlassNode::make( _gvn, kmem, p2, _gvn.type(p2)->is_ptr(), TypeKlassPtr::OBJECT_OR_NULL ) );
2550
2551 // Compile speed common case: ARE a subtype and we canNOT fail
2552 if( superklass == nkls )
2553 return top(); // false path is dead; no test needed.
3226 // fetch the constant layout helper value into constant_value
3227 // and return (Node*)NULL. Otherwise, load the non-constant
3228 // layout helper value, and return the node which represents it.
3229 // This two-faced routine is useful because allocation sites
3230 // almost always feature constant types.
3231 Node* GraphKit::get_layout_helper(Node* klass_node, jint& constant_value) {
3232 const TypeKlassPtr* inst_klass = _gvn.type(klass_node)->isa_klassptr();
3233 if (!StressReflectiveCode && inst_klass != NULL) {
3234 ciKlass* klass = inst_klass->klass();
3235 bool xklass = inst_klass->klass_is_exact();
3236 if (xklass || klass->is_array_klass()) {
3237 jint lhelper = klass->layout_helper();
3238 if (lhelper != Klass::_lh_neutral_value) {
3239 constant_value = lhelper;
3240 return (Node*) NULL;
3241 }
3242 }
3243 }
3244 constant_value = Klass::_lh_neutral_value; // put in a known value
3245 Node* lhp = basic_plus_adr(klass_node, klass_node, in_bytes(Klass::layout_helper_offset()));
3246 return make_load(NULL, lhp, TypeInt::INT, T_INT, false, LoadNode::unordered);
3247 }
3248
3249 // We just put in an allocate/initialize with a big raw-memory effect.
3250 // Hook selected additional alias categories on the initialization.
3251 static void hook_memory_on_init(GraphKit& kit, int alias_idx,
3252 MergeMemNode* init_in_merge,
3253 Node* init_out_raw) {
3254 DEBUG_ONLY(Node* init_in_raw = init_in_merge->base_memory());
3255 assert(init_in_merge->memory_at(alias_idx) == init_in_raw, "");
3256
3257 Node* prevmem = kit.memory(alias_idx);
3258 init_in_merge->set_memory_at(alias_idx, prevmem);
3259 kit.set_memory(init_out_raw, alias_idx);
3260 }
3261
3262 //---------------------------set_output_for_allocation-------------------------
3263 Node* GraphKit::set_output_for_allocation(AllocateNode* alloc,
3264 const TypeOopPtr* oop_type) {
3265 int rawidx = Compile::AliasIdxRaw;
3266 alloc->set_req( TypeFunc::FramePtr, frameptr() );
3761
3762 // Get the alias_index for raw card-mark memory
3763 int adr_type = Compile::AliasIdxRaw;
3764 Node* zero = __ ConI(0); // Dirty card value
3765 BasicType bt = T_BYTE;
3766
3767 if (UseCondCardMark) {
3768 // The classic GC reference write barrier is typically implemented
3769 // as a store into the global card mark table. Unfortunately
3770 // unconditional stores can result in false sharing and excessive
3771 // coherence traffic as well as false transactional aborts.
3772 // UseCondCardMark enables MP "polite" conditional card mark
3773 // stores. In theory we could relax the load from ctrl() to
3774 // no_ctrl, but that doesn't buy much latitude.
3775 Node* card_val = __ load( __ ctrl(), card_adr, TypeInt::BYTE, bt, adr_type);
3776 __ if_then(card_val, BoolTest::ne, zero);
3777 }
3778
3779 // Smash zero into card
3780 if( !UseConcMarkSweepGC ) {
3781 __ store(__ ctrl(), card_adr, zero, bt, adr_type, false, StoreNode::release);
3782 } else {
3783 // Specialized path for CM store barrier
3784 __ storeCM(__ ctrl(), card_adr, zero, oop_store, adr_idx, bt, adr_type);
3785 }
3786
3787 if (UseCondCardMark) {
3788 __ end_if();
3789 }
3790
3791 // Final sync IdealKit and GraphKit.
3792 final_sync(ideal);
3793 }
3794
3795 // G1 pre/post barriers
3796 void GraphKit::g1_write_barrier_pre(bool do_load,
3797 Node* obj,
3798 Node* adr,
3799 uint alias_idx,
3800 Node* val,
3801 const TypeOopPtr* val_type,
3858 Node* index = __ load(__ ctrl(), index_adr, TypeX_X, index_bt, Compile::AliasIdxRaw);
3859
3860 if (do_load) {
3861 // load original value
3862 // alias_idx correct??
3863 pre_val = __ load(__ ctrl(), adr, val_type, bt, alias_idx);
3864 }
3865
3866 // if (pre_val != NULL)
3867 __ if_then(pre_val, BoolTest::ne, null()); {
3868 Node* buffer = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw);
3869
3870 // is the queue for this thread full?
3871 __ if_then(index, BoolTest::ne, zeroX, likely); {
3872
3873 // decrement the index
3874 Node* next_index = _gvn.transform(new (C) SubXNode(index, __ ConX(sizeof(intptr_t))));
3875
3876 // Now get the buffer location we will log the previous value into and store it
3877 Node *log_addr = __ AddP(no_base, buffer, next_index);
3878 __ store(__ ctrl(), log_addr, pre_val, T_OBJECT, Compile::AliasIdxRaw, false, StoreNode::unordered);
3879 // update the index
3880 __ store(__ ctrl(), index_adr, next_index, index_bt, Compile::AliasIdxRaw, false, StoreNode::unordered);
3881
3882 } __ else_(); {
3883
3884 // logging buffer is full, call the runtime
3885 const TypeFunc *tf = OptoRuntime::g1_wb_pre_Type();
3886 __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), "g1_wb_pre", pre_val, tls);
3887 } __ end_if(); // (!index)
3888 } __ end_if(); // (pre_val != NULL)
3889 } __ end_if(); // (!marking)
3890
3891 // Final sync IdealKit and GraphKit.
3892 final_sync(ideal);
3893 }
3894
3895 //
3896 // Update the card table and add card address to the queue
3897 //
3898 void GraphKit::g1_mark_card(IdealKit& ideal,
3899 Node* card_adr,
3900 Node* oop_store,
3901 uint oop_alias_idx,
3902 Node* index,
3903 Node* index_adr,
3904 Node* buffer,
3905 const TypeFunc* tf) {
3906
3907 Node* zero = __ ConI(0);
3908 Node* zeroX = __ ConX(0);
3909 Node* no_base = __ top();
3910 BasicType card_bt = T_BYTE;
3911 // Smash zero into card. MUST BE ORDERED WRT TO STORE
3912 __ storeCM(__ ctrl(), card_adr, zero, oop_store, oop_alias_idx, card_bt, Compile::AliasIdxRaw);
3913
3914 // Now do the queue work
3915 __ if_then(index, BoolTest::ne, zeroX); {
3916
3917 Node* next_index = _gvn.transform(new (C) SubXNode(index, __ ConX(sizeof(intptr_t))));
3918 Node* log_addr = __ AddP(no_base, buffer, next_index);
3919
3920 // Order, see storeCM.
3921 __ store(__ ctrl(), log_addr, card_adr, T_ADDRESS, Compile::AliasIdxRaw, false, StoreNode::unordered);
3922 __ store(__ ctrl(), index_adr, next_index, TypeX_X->basic_type(), Compile::AliasIdxRaw, false, StoreNode::unordered);
3923
3924 } __ else_(); {
3925 __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), "g1_wb_post", card_adr, __ thread());
3926 } __ end_if();
3927
3928 }
3929
3930 void GraphKit::g1_write_barrier_post(Node* oop_store,
3931 Node* obj,
3932 Node* adr,
3933 uint alias_idx,
3934 Node* val,
3935 BasicType bt,
3936 bool use_precise) {
3937 // If we are writing a NULL then we need no post barrier
3938
3939 if (val != NULL && val->is_Con() && val->bottom_type() == TypePtr::NULL_PTR) {
3940 // Must be NULL
3941 const Type* t = val->bottom_type();
3942 assert(t == Type::TOP || t == TypePtr::NULL_PTR, "must be NULL");
4032 // Object.clone() instrinsic uses this path.
4033 g1_mark_card(ideal, card_adr, oop_store, alias_idx, index, index_adr, buffer, tf);
4034 }
4035
4036 // Final sync IdealKit and GraphKit.
4037 final_sync(ideal);
4038 }
4039 #undef __
4040
4041
4042
4043 Node* GraphKit::load_String_offset(Node* ctrl, Node* str) {
4044 if (java_lang_String::has_offset_field()) {
4045 int offset_offset = java_lang_String::offset_offset_in_bytes();
4046 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4047 false, NULL, 0);
4048 const TypePtr* offset_field_type = string_type->add_offset(offset_offset);
4049 int offset_field_idx = C->get_alias_index(offset_field_type);
4050 return make_load(ctrl,
4051 basic_plus_adr(str, str, offset_offset),
4052 TypeInt::INT, T_INT, offset_field_idx, false, LoadNode::unordered);
4053 } else {
4054 return intcon(0);
4055 }
4056 }
4057
4058 Node* GraphKit::load_String_length(Node* ctrl, Node* str) {
4059 if (java_lang_String::has_count_field()) {
4060 int count_offset = java_lang_String::count_offset_in_bytes();
4061 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4062 false, NULL, 0);
4063 const TypePtr* count_field_type = string_type->add_offset(count_offset);
4064 int count_field_idx = C->get_alias_index(count_field_type);
4065 return make_load(ctrl,
4066 basic_plus_adr(str, str, count_offset),
4067 TypeInt::INT, T_INT, count_field_idx, false, LoadNode::unordered);
4068 } else {
4069 return load_array_length(load_String_value(ctrl, str));
4070 }
4071 }
4072
4073 Node* GraphKit::load_String_value(Node* ctrl, Node* str) {
4074 int value_offset = java_lang_String::value_offset_in_bytes();
4075 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4076 false, NULL, 0);
4077 const TypePtr* value_field_type = string_type->add_offset(value_offset);
4078 const TypeAryPtr* value_type = TypeAryPtr::make(TypePtr::NotNull,
4079 TypeAry::make(TypeInt::CHAR,TypeInt::POS),
4080 ciTypeArrayKlass::make(T_CHAR), true, 0);
4081 int value_field_idx = C->get_alias_index(value_field_type);
4082 Node* load = make_load(ctrl, basic_plus_adr(str, str, value_offset),
4083 value_type, T_OBJECT, value_field_idx, false, LoadNode::unordered);
4084 // String.value field is known to be @Stable.
4085 if (UseImplicitStableValues) {
4086 load = cast_array_to_stable(load, value_type);
4087 }
4088 return load;
4089 }
4090
4091 void GraphKit::store_String_offset(Node* ctrl, Node* str, Node* value) {
4092 int offset_offset = java_lang_String::offset_offset_in_bytes();
4093 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4094 false, NULL, 0);
4095 const TypePtr* offset_field_type = string_type->add_offset(offset_offset);
4096 int offset_field_idx = C->get_alias_index(offset_field_type);
4097 store_to_memory(ctrl, basic_plus_adr(str, offset_offset),
4098 value, T_INT, offset_field_idx, false, StoreNode::unordered);
4099 }
4100
4101 void GraphKit::store_String_value(Node* ctrl, Node* str, Node* value) {
4102 int value_offset = java_lang_String::value_offset_in_bytes();
4103 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4104 false, NULL, 0);
4105 const TypePtr* value_field_type = string_type->add_offset(value_offset);
4106
4107 store_oop_to_object(ctrl, str, basic_plus_adr(str, value_offset), value_field_type,
4108 value, TypeAryPtr::CHARS, T_OBJECT, false, StoreNode::unordered);
4109 }
4110
4111 void GraphKit::store_String_length(Node* ctrl, Node* str, Node* value) {
4112 int count_offset = java_lang_String::count_offset_in_bytes();
4113 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4114 false, NULL, 0);
4115 const TypePtr* count_field_type = string_type->add_offset(count_offset);
4116 int count_field_idx = C->get_alias_index(count_field_type);
4117 store_to_memory(ctrl, basic_plus_adr(str, count_offset),
4118 value, T_INT, count_field_idx, false, StoreNode::unordered);
4119 }
4120
4121 Node* GraphKit::cast_array_to_stable(Node* ary, const TypeAryPtr* ary_type) {
4122 // Reify the property as a CastPP node in Ideal graph to comply with monotonicity
4123 // assumption of CCP analysis.
4124 return _gvn.transform(new(C) CastPPNode(ary, ary_type->cast_to_stable(true)));
4125 }
|