1 /*
   2  * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "ci/ciValueKlass.hpp"
  27 #include "classfile/systemDictionary.hpp"
  28 #include "compiler/compileLog.hpp"
  29 #include "oops/objArrayKlass.hpp"
  30 #include "oops/valueArrayKlass.hpp"
  31 #include "opto/addnode.hpp"
  32 #include "opto/memnode.hpp"
  33 #include "opto/mulnode.hpp"
  34 #include "opto/parse.hpp"
  35 #include "opto/rootnode.hpp"
  36 #include "opto/runtime.hpp"
  37 #include "opto/valuetypenode.hpp"
  38 #include "runtime/sharedRuntime.hpp"
  39 
  40 //------------------------------make_dtrace_method_entry_exit ----------------
  41 // Dtrace -- record entry or exit of a method if compiled with dtrace support
  42 void GraphKit::make_dtrace_method_entry_exit(ciMethod* method, bool is_entry) {
  43   const TypeFunc *call_type    = OptoRuntime::dtrace_method_entry_exit_Type();
  44   address         call_address = is_entry ? CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry) :
  45                                             CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit);
  46   const char     *call_name    = is_entry ? "dtrace_method_entry" : "dtrace_method_exit";
  47 
  48   // Get base of thread-local storage area
  49   Node* thread = _gvn.transform( new ThreadLocalNode() );
  50 
  51   // Get method
  52   const TypePtr* method_type = TypeMetadataPtr::make(method);
  53   Node *method_node = _gvn.transform(ConNode::make(method_type));
  54 
  55   kill_dead_locals();
  56 
  57   // For some reason, this call reads only raw memory.
  58   const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
  59   make_runtime_call(RC_LEAF | RC_NARROW_MEM,
  60                     call_type, call_address,
  61                     call_name, raw_adr_type,
  62                     thread, method_node);
  63 }
  64 
  65 
  66 //=============================================================================
  67 //------------------------------do_checkcast-----------------------------------
  68 void Parse::do_checkcast() {
  69   bool will_link;
  70   ciKlass* klass = iter().get_klass(will_link);
  71 
  72   Node *obj = peek();
  73 
  74   // Throw uncommon trap if class is not loaded or the value we are casting
  75   // _from_ is not loaded, and value is not null.  If the value _is_ NULL,
  76   // then the checkcast does nothing.
  77   const TypeOopPtr *tp = _gvn.type(obj)->isa_oopptr();
  78   if (!will_link || (tp && tp->klass() && !tp->klass()->is_loaded())) {
  79     if (C->log() != NULL) {
  80       if (!will_link) {
  81         C->log()->elem("assert_null reason='checkcast' klass='%d'",
  82                        C->log()->identify(klass));
  83       }
  84       if (tp && tp->klass() && !tp->klass()->is_loaded()) {
  85         // %%% Cannot happen?
  86         C->log()->elem("assert_null reason='checkcast source' klass='%d'",
  87                        C->log()->identify(tp->klass()));
  88       }
  89     }
  90     null_assert(obj);
  91     assert( stopped() || _gvn.type(peek())->higher_equal(TypePtr::NULL_PTR), "what's left behind is null" );
  92     if (!stopped()) {
  93       profile_null_checkcast();
  94     }
  95     return;
  96   }
  97 
  98   Node *res = gen_checkcast(obj, makecon(TypeKlassPtr::make(klass)) );
  99   if (stopped()) {
 100     return;
 101   }
 102 
 103   // Pop from stack AFTER gen_checkcast because it can uncommon trap and
 104   // the debug info has to be correct.
 105   pop();
 106   push(res);
 107 }
 108 
 109 
 110 //------------------------------do_instanceof----------------------------------
 111 void Parse::do_instanceof() {
 112   if (stopped())  return;
 113   // We would like to return false if class is not loaded, emitting a
 114   // dependency, but Java requires instanceof to load its operand.
 115 
 116   // Throw uncommon trap if class is not loaded
 117   bool will_link;
 118   ciKlass* klass = iter().get_klass(will_link);
 119 
 120   if (!will_link) {
 121     if (C->log() != NULL) {
 122       C->log()->elem("assert_null reason='instanceof' klass='%d'",
 123                      C->log()->identify(klass));
 124     }
 125     null_assert(peek());
 126     assert( stopped() || _gvn.type(peek())->higher_equal(TypePtr::NULL_PTR), "what's left behind is null" );
 127     if (!stopped()) {
 128       // The object is now known to be null.
 129       // Shortcut the effect of gen_instanceof and return "false" directly.
 130       pop();                   // pop the null
 131       push(_gvn.intcon(0));    // push false answer
 132     }
 133     return;
 134   }
 135 
 136   // Push the bool result back on stack
 137   Node* res = gen_instanceof(peek(), makecon(TypeKlassPtr::make(klass)), true);
 138 
 139   // Pop from stack AFTER gen_instanceof because it can uncommon trap.
 140   pop();
 141   push(res);
 142 }
 143 
 144 //------------------------------array_store_check------------------------------
 145 // pull array from stack and check that the store is valid
 146 Node* Parse::array_store_check() {
 147   // Shorthand access to array store elements without popping them.
 148   Node *obj = peek(0);
 149   Node *idx = peek(1);
 150   Node *ary = peek(2);
 151 
 152   const TypeAryPtr* ary_t = _gvn.type(ary)->is_aryptr();
 153   const Type* elemtype = ary_t->elem();
 154   const TypeOopPtr* elemptr = elemtype->make_oopptr();
 155   bool is_value_array = elemtype->isa_valuetype() != NULL || (elemptr != NULL && elemptr->is_valuetypeptr());
 156 
 157   if (_gvn.type(obj) == TypePtr::NULL_PTR) {
 158     // There's never a type check on null values.
 159     // This cutout lets us avoid the uncommon_trap(Reason_array_check)
 160     // below, which turns into a performance liability if the
 161     // gen_checkcast folds up completely.
 162     return obj;
 163   }
 164 
 165   // Extract the array klass type
 166   Node* array_klass = load_object_klass(ary);
 167   // Get the array klass
 168   const TypeKlassPtr *tak = _gvn.type(array_klass)->is_klassptr();
 169 
 170   // The type of array_klass is usually INexact array-of-oop.  Heroically
 171   // cast array_klass to EXACT array and uncommon-trap if the cast fails.
 172   // Make constant out of the inexact array klass, but use it only if the cast
 173   // succeeds.
 174   bool always_see_exact_class = false;
 175   if (MonomorphicArrayCheck
 176       && !too_many_traps(Deoptimization::Reason_array_check)
 177       && !tak->klass_is_exact()
 178       && tak != TypeKlassPtr::OBJECT) {
 179       // Regarding the fourth condition in the if-statement from above:
 180       //
 181       // If the compiler has determined that the type of array 'ary' (represented
 182       // by 'array_klass') is java/lang/Object, the compiler must not assume that
 183       // the array 'ary' is monomorphic.
 184       //
 185       // If 'ary' were of type java/lang/Object, this arraystore would have to fail,
 186       // because it is not possible to perform a arraystore into an object that is not
 187       // a "proper" array.
 188       //
 189       // Therefore, let's obtain at runtime the type of 'ary' and check if we can still
 190       // successfully perform the store.
 191       //
 192       // The implementation reasons for the condition are the following:
 193       //
 194       // java/lang/Object is the superclass of all arrays, but it is represented by the VM
 195       // as an InstanceKlass. The checks generated by gen_checkcast() (see below) expect
 196       // 'array_klass' to be ObjArrayKlass, which can result in invalid memory accesses.
 197       //
 198       // See issue JDK-8057622 for details.
 199 
 200     always_see_exact_class = true;
 201     // (If no MDO at all, hope for the best, until a trap actually occurs.)
 202 
 203     // Make a constant out of the inexact array klass
 204     const TypeKlassPtr *extak = tak->cast_to_exactness(true)->is_klassptr();
 205     Node* con = makecon(extak);
 206     Node* cmp = _gvn.transform(new CmpPNode( array_klass, con ));
 207     Node* bol = _gvn.transform(new BoolNode( cmp, BoolTest::eq ));
 208     Node* ctrl= control();
 209     { BuildCutout unless(this, bol, PROB_MAX);
 210       uncommon_trap(Deoptimization::Reason_array_check,
 211                     Deoptimization::Action_maybe_recompile,
 212                     tak->klass());
 213     }
 214     if (stopped()) {          // MUST uncommon-trap?
 215       set_control(ctrl);      // Then Don't Do It, just fall into the normal checking
 216     } else {                  // Cast array klass to exactness:
 217       // Use the exact constant value we know it is.
 218       replace_in_map(array_klass,con);
 219       CompileLog* log = C->log();
 220       if (log != NULL) {
 221         log->elem("cast_up reason='monomorphic_array' from='%d' to='(exact)'",
 222                   log->identify(tak->klass()));
 223       }
 224       array_klass = con;      // Use cast value moving forward
 225     }
 226   }
 227 
 228   // Come here for polymorphic array klasses
 229 
 230   // Extract the array element class
 231   int element_klass_offset = in_bytes(ArrayKlass::element_klass_offset());
 232 
 233   Node *p2 = basic_plus_adr(array_klass, array_klass, element_klass_offset);
 234   // We are allowed to use the constant type only if cast succeeded. If always_see_exact_class is true,
 235   // we must set a control edge from the IfTrue node created by the uncommon_trap above to the
 236   // LoadKlassNode.
 237   Node* a_e_klass = _gvn.transform(LoadKlassNode::make(_gvn, always_see_exact_class ? control() : NULL,
 238                                                        immutable_memory(), p2, tak));
 239 
 240   // Handle value type arrays
 241   if (is_value_array) {
 242     // We statically know that this is a value type array, use precise klass ptr
 243     ciValueKlass* vk = elemtype->isa_valuetype() ? elemtype->is_valuetype()->value_klass() :
 244                                                    elemptr->value_klass();
 245     a_e_klass = makecon(TypeKlassPtr::make(vk));
 246   }
 247 
 248   // Check (the hard way) and throw if not a subklass.
 249   return gen_checkcast(obj, a_e_klass);
 250 }
 251 
 252 
 253 void Parse::emit_guard_for_new(ciInstanceKlass* klass) {
 254   if ((!klass->is_initialized() && !klass->is_being_initialized()) ||
 255       klass->is_abstract() || klass->is_interface() ||
 256       klass->name() == ciSymbol::java_lang_Class() ||
 257       iter().is_unresolved_klass()) {
 258     uncommon_trap(Deoptimization::Reason_uninitialized,
 259                   Deoptimization::Action_reinterpret,
 260                   klass);
 261   } if (klass->is_being_initialized()) {
 262     // Emit guarded new
 263     //   if (klass->_init_thread != current_thread ||
 264     //       klass->_init_state != being_initialized)
 265     //      uncommon_trap
 266     Node* cur_thread = _gvn.transform( new ThreadLocalNode() );
 267     Node* merge = new RegionNode(3);
 268     _gvn.set_type(merge, Type::CONTROL);
 269     Node* kls = makecon(TypeKlassPtr::make(klass));
 270 
 271     Node* init_thread_offset = _gvn.MakeConX(in_bytes(InstanceKlass::init_thread_offset()));
 272     Node* adr_node = basic_plus_adr(kls, kls, init_thread_offset);
 273     Node* init_thread = make_load(NULL, adr_node, TypeRawPtr::BOTTOM, T_ADDRESS, MemNode::unordered);
 274     Node *tst   = Bool( CmpP( init_thread, cur_thread), BoolTest::eq);
 275     IfNode* iff = create_and_map_if(control(), tst, PROB_ALWAYS, COUNT_UNKNOWN);
 276     set_control(IfTrue(iff));
 277     merge->set_req(1, IfFalse(iff));
 278 
 279     Node* init_state_offset = _gvn.MakeConX(in_bytes(InstanceKlass::init_state_offset()));
 280     adr_node = basic_plus_adr(kls, kls, init_state_offset);
 281     // Use T_BOOLEAN for InstanceKlass::_init_state so the compiler
 282     // can generate code to load it as unsigned byte.
 283     Node* init_state = make_load(NULL, adr_node, TypeInt::UBYTE, T_BOOLEAN, MemNode::unordered);
 284     Node* being_init = _gvn.intcon(InstanceKlass::being_initialized);
 285     tst   = Bool( CmpI( init_state, being_init), BoolTest::eq);
 286     iff = create_and_map_if(control(), tst, PROB_ALWAYS, COUNT_UNKNOWN);
 287     set_control(IfTrue(iff));
 288     merge->set_req(2, IfFalse(iff));
 289 
 290     PreserveJVMState pjvms(this);
 291     record_for_igvn(merge);
 292     set_control(merge);
 293 
 294     uncommon_trap(Deoptimization::Reason_uninitialized,
 295                   Deoptimization::Action_reinterpret,
 296                   klass);
 297   }
 298 }
 299 
 300 
 301 //------------------------------do_new-----------------------------------------
 302 void Parse::do_new() {
 303   kill_dead_locals();
 304 
 305   bool will_link;
 306   ciInstanceKlass* klass = iter().get_klass(will_link)->as_instance_klass();
 307   assert(will_link, "_new: typeflow responsibility");
 308 
 309   // Should initialize, or throw an InstantiationError?
 310   emit_guard_for_new(klass);
 311   if (stopped()) return;
 312 
 313   Node* kls = makecon(TypeKlassPtr::make(klass));
 314   Node* obj = new_instance(kls);
 315 
 316   // Push resultant oop onto stack
 317   push(obj);
 318 
 319   // Keep track of whether opportunities exist for StringBuilder
 320   // optimizations.
 321   if (OptimizeStringConcat &&
 322       (klass == C->env()->StringBuilder_klass() ||
 323        klass == C->env()->StringBuffer_klass())) {
 324     C->set_has_stringbuilder(true);
 325   }
 326 
 327   // Keep track of boxed values for EliminateAutoBox optimizations.
 328   if (C->eliminate_boxing() && klass->is_box_klass()) {
 329     C->set_has_boxed_value(true);
 330   }
 331 }
 332 
 333 //------------------------------do_defaultvalue---------------------------------
 334 void Parse::do_defaultvalue() {
 335   bool will_link;
 336   ciValueKlass* vk = iter().get_klass(will_link)->as_value_klass();
 337   assert(will_link, "defaultvalue: typeflow responsibility");
 338 
 339   // Should initialize, or throw an InstantiationError?
 340   emit_guard_for_new(vk);
 341   if (stopped()) return;
 342 
 343   // Create and push a new default ValueTypeNode
 344   push(ValueTypeNode::make_default(_gvn, vk));
 345 }
 346 
 347 //------------------------------do_withfield------------------------------------
 348 void Parse::do_withfield() {
 349   bool will_link;
 350   ciField* field = iter().get_field(will_link);
 351   assert(will_link, "withfield: typeflow responsibility");
 352   BasicType bt = field->layout_type();
 353   Node* val = type2size[bt] == 1 ? pop() : pop_pair();
 354   Node* vt = pop();
 355   assert(vt->is_ValueType(), "value type expected here");
 356 
 357   ValueTypeNode* new_vt = vt->clone()->as_ValueType();
 358   new_vt->set_oop(_gvn.zerocon(T_VALUETYPE));
 359   int offset = field->offset();
 360   uint i = 0;
 361   for (; i < new_vt->field_count() && new_vt->field_offset(i) != offset; i++) {}
 362   assert(i < new_vt->field_count(), "field not found");
 363   new_vt->set_field_value(i, val);
 364 
 365   push(_gvn.transform(new_vt));
 366 }
 367 
 368 #ifndef PRODUCT
 369 //------------------------------dump_map_adr_mem-------------------------------
 370 // Debug dump of the mapping from address types to MergeMemNode indices.
 371 void Parse::dump_map_adr_mem() const {
 372   tty->print_cr("--- Mapping from address types to memory Nodes ---");
 373   MergeMemNode *mem = map() == NULL ? NULL : (map()->memory()->is_MergeMem() ?
 374                                       map()->memory()->as_MergeMem() : NULL);
 375   for (uint i = 0; i < (uint)C->num_alias_types(); i++) {
 376     C->alias_type(i)->print_on(tty);
 377     tty->print("\t");
 378     // Node mapping, if any
 379     if (mem && i < mem->req() && mem->in(i) && mem->in(i) != mem->empty_memory()) {
 380       mem->in(i)->dump();
 381     } else {
 382       tty->cr();
 383     }
 384   }
 385 }
 386 
 387 #endif
 388 
 389 
 390 //=============================================================================
 391 //
 392 // parser methods for profiling
 393 
 394 
 395 //----------------------test_counter_against_threshold ------------------------
 396 void Parse::test_counter_against_threshold(Node* cnt, int limit) {
 397   // Test the counter against the limit and uncommon trap if greater.
 398 
 399   // This code is largely copied from the range check code in
 400   // array_addressing()
 401 
 402   // Test invocation count vs threshold
 403   Node *threshold = makecon(TypeInt::make(limit));
 404   Node *chk   = _gvn.transform( new CmpUNode( cnt, threshold) );
 405   BoolTest::mask btest = BoolTest::lt;
 406   Node *tst   = _gvn.transform( new BoolNode( chk, btest) );
 407   // Branch to failure if threshold exceeded
 408   { BuildCutout unless(this, tst, PROB_ALWAYS);
 409     uncommon_trap(Deoptimization::Reason_age,
 410                   Deoptimization::Action_maybe_recompile);
 411   }
 412 }
 413 
 414 //----------------------increment_and_test_invocation_counter-------------------
 415 void Parse::increment_and_test_invocation_counter(int limit) {
 416   if (!count_invocations()) return;
 417 
 418   // Get the Method* node.
 419   ciMethod* m = method();
 420   MethodCounters* counters_adr = m->ensure_method_counters();
 421   if (counters_adr == NULL) {
 422     C->record_failure("method counters allocation failed");
 423     return;
 424   }
 425 
 426   Node* ctrl = control();
 427   const TypePtr* adr_type = TypeRawPtr::make((address) counters_adr);
 428   Node *counters_node = makecon(adr_type);
 429   Node* adr_iic_node = basic_plus_adr(counters_node, counters_node,
 430     MethodCounters::interpreter_invocation_counter_offset_in_bytes());
 431   Node* cnt = make_load(ctrl, adr_iic_node, TypeInt::INT, T_INT, adr_type, MemNode::unordered);
 432 
 433   test_counter_against_threshold(cnt, limit);
 434 
 435   // Add one to the counter and store
 436   Node* incr = _gvn.transform(new AddINode(cnt, _gvn.intcon(1)));
 437   store_to_memory(ctrl, adr_iic_node, incr, T_INT, adr_type, MemNode::unordered);
 438 }
 439 
 440 //----------------------------method_data_addressing---------------------------
 441 Node* Parse::method_data_addressing(ciMethodData* md, ciProfileData* data, ByteSize counter_offset, Node* idx, uint stride) {
 442   // Get offset within MethodData* of the data array
 443   ByteSize data_offset = MethodData::data_offset();
 444 
 445   // Get cell offset of the ProfileData within data array
 446   int cell_offset = md->dp_to_di(data->dp());
 447 
 448   // Add in counter_offset, the # of bytes into the ProfileData of counter or flag
 449   int offset = in_bytes(data_offset) + cell_offset + in_bytes(counter_offset);
 450 
 451   const TypePtr* adr_type = TypeMetadataPtr::make(md);
 452   Node* mdo = makecon(adr_type);
 453   Node* ptr = basic_plus_adr(mdo, mdo, offset);
 454 
 455   if (stride != 0) {
 456     Node* str = _gvn.MakeConX(stride);
 457     Node* scale = _gvn.transform( new MulXNode( idx, str ) );
 458     ptr   = _gvn.transform( new AddPNode( mdo, ptr, scale ) );
 459   }
 460 
 461   return ptr;
 462 }
 463 
 464 //--------------------------increment_md_counter_at----------------------------
 465 void Parse::increment_md_counter_at(ciMethodData* md, ciProfileData* data, ByteSize counter_offset, Node* idx, uint stride) {
 466   Node* adr_node = method_data_addressing(md, data, counter_offset, idx, stride);
 467 
 468   const TypePtr* adr_type = _gvn.type(adr_node)->is_ptr();
 469   Node* cnt  = make_load(NULL, adr_node, TypeInt::INT, T_INT, adr_type, MemNode::unordered);
 470   Node* incr = _gvn.transform(new AddINode(cnt, _gvn.intcon(DataLayout::counter_increment)));
 471   store_to_memory(NULL, adr_node, incr, T_INT, adr_type, MemNode::unordered);
 472 }
 473 
 474 //--------------------------test_for_osr_md_counter_at-------------------------
 475 void Parse::test_for_osr_md_counter_at(ciMethodData* md, ciProfileData* data, ByteSize counter_offset, int limit) {
 476   Node* adr_node = method_data_addressing(md, data, counter_offset);
 477 
 478   const TypePtr* adr_type = _gvn.type(adr_node)->is_ptr();
 479   Node* cnt  = make_load(NULL, adr_node, TypeInt::INT, T_INT, adr_type, MemNode::unordered);
 480 
 481   test_counter_against_threshold(cnt, limit);
 482 }
 483 
 484 //-------------------------------set_md_flag_at--------------------------------
 485 void Parse::set_md_flag_at(ciMethodData* md, ciProfileData* data, int flag_constant) {
 486   Node* adr_node = method_data_addressing(md, data, DataLayout::flags_offset());
 487 
 488   const TypePtr* adr_type = _gvn.type(adr_node)->is_ptr();
 489   Node* flags = make_load(NULL, adr_node, TypeInt::BYTE, T_BYTE, adr_type, MemNode::unordered);
 490   Node* incr = _gvn.transform(new OrINode(flags, _gvn.intcon(flag_constant)));
 491   store_to_memory(NULL, adr_node, incr, T_BYTE, adr_type, MemNode::unordered);
 492 }
 493 
 494 //----------------------------profile_taken_branch-----------------------------
 495 void Parse::profile_taken_branch(int target_bci, bool force_update) {
 496   // This is a potential osr_site if we have a backedge.
 497   int cur_bci = bci();
 498   bool osr_site =
 499     (target_bci <= cur_bci) && count_invocations() && UseOnStackReplacement;
 500 
 501   // If we are going to OSR, restart at the target bytecode.
 502   set_bci(target_bci);
 503 
 504   // To do: factor out the the limit calculations below. These duplicate
 505   // the similar limit calculations in the interpreter.
 506 
 507   if (method_data_update() || force_update) {
 508     ciMethodData* md = method()->method_data();
 509     assert(md != NULL, "expected valid ciMethodData");
 510     ciProfileData* data = md->bci_to_data(cur_bci);
 511     assert(data != NULL && data->is_JumpData(), "need JumpData for taken branch");
 512     increment_md_counter_at(md, data, JumpData::taken_offset());
 513   }
 514 
 515   // In the new tiered system this is all we need to do. In the old
 516   // (c2 based) tiered sytem we must do the code below.
 517 #ifndef TIERED
 518   if (method_data_update()) {
 519     ciMethodData* md = method()->method_data();
 520     if (osr_site) {
 521       ciProfileData* data = md->bci_to_data(cur_bci);
 522       assert(data != NULL && data->is_JumpData(), "need JumpData for taken branch");
 523       int limit = (CompileThreshold
 524                    * (OnStackReplacePercentage - InterpreterProfilePercentage)) / 100;
 525       test_for_osr_md_counter_at(md, data, JumpData::taken_offset(), limit);
 526     }
 527   } else {
 528     // With method data update off, use the invocation counter to trigger an
 529     // OSR compilation, as done in the interpreter.
 530     if (osr_site) {
 531       int limit = (CompileThreshold * OnStackReplacePercentage) / 100;
 532       increment_and_test_invocation_counter(limit);
 533     }
 534   }
 535 #endif // TIERED
 536 
 537   // Restore the original bytecode.
 538   set_bci(cur_bci);
 539 }
 540 
 541 //--------------------------profile_not_taken_branch---------------------------
 542 void Parse::profile_not_taken_branch(bool force_update) {
 543 
 544   if (method_data_update() || force_update) {
 545     ciMethodData* md = method()->method_data();
 546     assert(md != NULL, "expected valid ciMethodData");
 547     ciProfileData* data = md->bci_to_data(bci());
 548     assert(data != NULL && data->is_BranchData(), "need BranchData for not taken branch");
 549     increment_md_counter_at(md, data, BranchData::not_taken_offset());
 550   }
 551 
 552 }
 553 
 554 //---------------------------------profile_call--------------------------------
 555 void Parse::profile_call(Node* receiver) {
 556   if (!method_data_update()) return;
 557 
 558   switch (bc()) {
 559   case Bytecodes::_invokevirtual:
 560   case Bytecodes::_invokeinterface:
 561     profile_receiver_type(receiver);
 562     break;
 563   case Bytecodes::_invokestatic:
 564   case Bytecodes::_invokedynamic:
 565   case Bytecodes::_invokespecial:
 566     profile_generic_call();
 567     break;
 568   default: fatal("unexpected call bytecode");
 569   }
 570 }
 571 
 572 //------------------------------profile_generic_call---------------------------
 573 void Parse::profile_generic_call() {
 574   assert(method_data_update(), "must be generating profile code");
 575 
 576   ciMethodData* md = method()->method_data();
 577   assert(md != NULL, "expected valid ciMethodData");
 578   ciProfileData* data = md->bci_to_data(bci());
 579   assert(data != NULL && data->is_CounterData(), "need CounterData for not taken branch");
 580   increment_md_counter_at(md, data, CounterData::count_offset());
 581 }
 582 
 583 //-----------------------------profile_receiver_type---------------------------
 584 void Parse::profile_receiver_type(Node* receiver) {
 585   assert(method_data_update(), "must be generating profile code");
 586 
 587   ciMethodData* md = method()->method_data();
 588   assert(md != NULL, "expected valid ciMethodData");
 589   ciProfileData* data = md->bci_to_data(bci());
 590   assert(data != NULL && data->is_ReceiverTypeData(), "need ReceiverTypeData here");
 591 
 592   // Skip if we aren't tracking receivers
 593   if (TypeProfileWidth < 1) {
 594     increment_md_counter_at(md, data, CounterData::count_offset());
 595     return;
 596   }
 597   ciReceiverTypeData* rdata = (ciReceiverTypeData*)data->as_ReceiverTypeData();
 598 
 599   Node* method_data = method_data_addressing(md, rdata, in_ByteSize(0));
 600 
 601   // Using an adr_type of TypePtr::BOTTOM to work around anti-dep problems.
 602   // A better solution might be to use TypeRawPtr::BOTTOM with RC_NARROW_MEM.
 603   make_runtime_call(RC_LEAF, OptoRuntime::profile_receiver_type_Type(),
 604                     CAST_FROM_FN_PTR(address,
 605                                      OptoRuntime::profile_receiver_type_C),
 606                     "profile_receiver_type_C",
 607                     TypePtr::BOTTOM,
 608                     method_data, receiver);
 609 }
 610 
 611 //---------------------------------profile_ret---------------------------------
 612 void Parse::profile_ret(int target_bci) {
 613   if (!method_data_update()) return;
 614 
 615   // Skip if we aren't tracking ret targets
 616   if (TypeProfileWidth < 1) return;
 617 
 618   ciMethodData* md = method()->method_data();
 619   assert(md != NULL, "expected valid ciMethodData");
 620   ciProfileData* data = md->bci_to_data(bci());
 621   assert(data != NULL && data->is_RetData(), "need RetData for ret");
 622   ciRetData* ret_data = (ciRetData*)data->as_RetData();
 623 
 624   // Look for the target_bci is already in the table
 625   uint row;
 626   bool table_full = true;
 627   for (row = 0; row < ret_data->row_limit(); row++) {
 628     int key = ret_data->bci(row);
 629     table_full &= (key != RetData::no_bci);
 630     if (key == target_bci) break;
 631   }
 632 
 633   if (row >= ret_data->row_limit()) {
 634     // The target_bci was not found in the table.
 635     if (!table_full) {
 636       // XXX: Make slow call to update RetData
 637     }
 638     return;
 639   }
 640 
 641   // the target_bci is already in the table
 642   increment_md_counter_at(md, data, RetData::bci_count_offset(row));
 643 }
 644 
 645 //--------------------------profile_null_checkcast----------------------------
 646 void Parse::profile_null_checkcast() {
 647   // Set the null-seen flag, done in conjunction with the usual null check. We
 648   // never unset the flag, so this is a one-way switch.
 649   if (!method_data_update()) return;
 650 
 651   ciMethodData* md = method()->method_data();
 652   assert(md != NULL, "expected valid ciMethodData");
 653   ciProfileData* data = md->bci_to_data(bci());
 654   assert(data != NULL && data->is_BitData(), "need BitData for checkcast");
 655   set_md_flag_at(md, data, BitData::null_seen_byte_constant());
 656 }
 657 
 658 //-----------------------------profile_switch_case-----------------------------
 659 void Parse::profile_switch_case(int table_index) {
 660   if (!method_data_update()) return;
 661 
 662   ciMethodData* md = method()->method_data();
 663   assert(md != NULL, "expected valid ciMethodData");
 664 
 665   ciProfileData* data = md->bci_to_data(bci());
 666   assert(data != NULL && data->is_MultiBranchData(), "need MultiBranchData for switch case");
 667   if (table_index >= 0) {
 668     increment_md_counter_at(md, data, MultiBranchData::case_count_offset(table_index));
 669   } else {
 670     increment_md_counter_at(md, data, MultiBranchData::default_count_offset());
 671   }
 672 }