rev 5661 : 8024921: PPC64 (part 113): Extend Load and Store nodes to know about memory ordering.

   1 /*
   2  * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/systemDictionary.hpp"
  27 #include "compiler/compileLog.hpp"
  28 #include "oops/objArrayKlass.hpp"
  29 #include "opto/addnode.hpp"
  30 #include "opto/memnode.hpp"
  31 #include "opto/mulnode.hpp"
  32 #include "opto/parse.hpp"
  33 #include "opto/rootnode.hpp"
  34 #include "opto/runtime.hpp"
  35 #include "runtime/sharedRuntime.hpp"
  36 
  37 //------------------------------make_dtrace_method_entry_exit ----------------
  38 // Dtrace -- record entry or exit of a method if compiled with dtrace support
  39 void GraphKit::make_dtrace_method_entry_exit(ciMethod* method, bool is_entry) {
  40   const TypeFunc *call_type    = OptoRuntime::dtrace_method_entry_exit_Type();
  41   address         call_address = is_entry ? CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry) :
  42                                             CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit);
  43   const char     *call_name    = is_entry ? "dtrace_method_entry" : "dtrace_method_exit";
  44 
  45   // Get base of thread-local storage area
  46   Node* thread = _gvn.transform( new (C) ThreadLocalNode() );
  47 
  48   // Get method
  49   const TypePtr* method_type = TypeMetadataPtr::make(method);
  50   Node *method_node = _gvn.transform( ConNode::make(C, method_type) );
  51 
  52   kill_dead_locals();
  53 
  54   // For some reason, this call reads only raw memory.
  55   const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
  56   make_runtime_call(RC_LEAF | RC_NARROW_MEM,
  57                     call_type, call_address,
  58                     call_name, raw_adr_type,
  59                     thread, method_node);
  60 }
  61 
  62 
  63 //=============================================================================
  64 //------------------------------do_checkcast-----------------------------------
  65 void Parse::do_checkcast() {
  66   bool will_link;
  67   ciKlass* klass = iter().get_klass(will_link);
  68 
  69   Node *obj = peek();
  70 
  71   // Throw uncommon trap if class is not loaded or the value we are casting
  72   // _from_ is not loaded, and value is not null.  If the value _is_ NULL,
  73   // then the checkcast does nothing.
  74   const TypeOopPtr *tp = _gvn.type(obj)->isa_oopptr();
  75   if (!will_link || (tp && tp->klass() && !tp->klass()->is_loaded())) {
  76     if (C->log() != NULL) {
  77       if (!will_link) {
  78         C->log()->elem("assert_null reason='checkcast' klass='%d'",
  79                        C->log()->identify(klass));
  80       }
  81       if (tp && tp->klass() && !tp->klass()->is_loaded()) {
  82         // %%% Cannot happen?
  83         C->log()->elem("assert_null reason='checkcast source' klass='%d'",
  84                        C->log()->identify(tp->klass()));
  85       }
  86     }
  87     null_assert(obj);
  88     assert( stopped() || _gvn.type(peek())->higher_equal(TypePtr::NULL_PTR), "what's left behind is null" );
  89     if (!stopped()) {
  90       profile_null_checkcast();
  91     }
  92     return;
  93   }
  94 
  95   Node *res = gen_checkcast(obj, makecon(TypeKlassPtr::make(klass)) );
  96 
  97   // Pop from stack AFTER gen_checkcast because it can uncommon trap and
  98   // the debug info has to be correct.
  99   pop();
 100   push(res);
 101 }
 102 
 103 
 104 //------------------------------do_instanceof----------------------------------
 105 void Parse::do_instanceof() {
 106   if (stopped())  return;
 107   // We would like to return false if class is not loaded, emitting a
 108   // dependency, but Java requires instanceof to load its operand.
 109 
 110   // Throw uncommon trap if class is not loaded
 111   bool will_link;
 112   ciKlass* klass = iter().get_klass(will_link);
 113 
 114   if (!will_link) {
 115     if (C->log() != NULL) {
 116       C->log()->elem("assert_null reason='instanceof' klass='%d'",
 117                      C->log()->identify(klass));
 118     }
 119     null_assert(peek());
 120     assert( stopped() || _gvn.type(peek())->higher_equal(TypePtr::NULL_PTR), "what's left behind is null" );
 121     if (!stopped()) {
 122       // The object is now known to be null.
 123       // Shortcut the effect of gen_instanceof and return "false" directly.
 124       pop();                   // pop the null
 125       push(_gvn.intcon(0));    // push false answer
 126     }
 127     return;
 128   }
 129 
 130   // Push the bool result back on stack
 131   Node* res = gen_instanceof(peek(), makecon(TypeKlassPtr::make(klass)), true);
 132 
 133   // Pop from stack AFTER gen_instanceof because it can uncommon trap.
 134   pop();
 135   push(res);
 136 }
 137 
 138 //------------------------------array_store_check------------------------------
 139 // pull array from stack and check that the store is valid
 140 void Parse::array_store_check() {
 141 
 142   // Shorthand access to array store elements without popping them.
 143   Node *obj = peek(0);
 144   Node *idx = peek(1);
 145   Node *ary = peek(2);
 146 
 147   if (_gvn.type(obj) == TypePtr::NULL_PTR) {
 148     // There's never a type check on null values.
 149     // This cutout lets us avoid the uncommon_trap(Reason_array_check)
 150     // below, which turns into a performance liability if the
 151     // gen_checkcast folds up completely.
 152     return;
 153   }
 154 
 155   // Extract the array klass type
 156   int klass_offset = oopDesc::klass_offset_in_bytes();
 157   Node* p = basic_plus_adr( ary, ary, klass_offset );
 158   // p's type is array-of-OOPS plus klass_offset
 159   Node* array_klass = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeInstPtr::KLASS) );
 160   // Get the array klass
 161   const TypeKlassPtr *tak = _gvn.type(array_klass)->is_klassptr();
 162 
 163   // array_klass's type is generally INexact array-of-oop.  Heroically
 164   // cast the array klass to EXACT array and uncommon-trap if the cast
 165   // fails.
 166   bool always_see_exact_class = false;
 167   if (MonomorphicArrayCheck
 168       && !too_many_traps(Deoptimization::Reason_array_check)) {
 169     always_see_exact_class = true;
 170     // (If no MDO at all, hope for the best, until a trap actually occurs.)
 171   }
 172 
 173   // Is the array klass is exactly its defined type?
 174   if (always_see_exact_class && !tak->klass_is_exact()) {
 175     // Make a constant out of the inexact array klass
 176     const TypeKlassPtr *extak = tak->cast_to_exactness(true)->is_klassptr();
 177     Node* con = makecon(extak);
 178     Node* cmp = _gvn.transform(new (C) CmpPNode( array_klass, con ));
 179     Node* bol = _gvn.transform(new (C) BoolNode( cmp, BoolTest::eq ));
 180     Node* ctrl= control();
 181     { BuildCutout unless(this, bol, PROB_MAX);
 182       uncommon_trap(Deoptimization::Reason_array_check,
 183                     Deoptimization::Action_maybe_recompile,
 184                     tak->klass());
 185     }
 186     if (stopped()) {          // MUST uncommon-trap?
 187       set_control(ctrl);      // Then Don't Do It, just fall into the normal checking
 188     } else {                  // Cast array klass to exactness:
 189       // Use the exact constant value we know it is.
 190       replace_in_map(array_klass,con);
 191       CompileLog* log = C->log();
 192       if (log != NULL) {
 193         log->elem("cast_up reason='monomorphic_array' from='%d' to='(exact)'",
 194                   log->identify(tak->klass()));
 195       }
 196       array_klass = con;      // Use cast value moving forward
 197     }
 198   }
 199 
 200   // Come here for polymorphic array klasses
 201 
 202   // Extract the array element class
 203   int element_klass_offset = in_bytes(ObjArrayKlass::element_klass_offset());
 204   Node *p2 = basic_plus_adr(array_klass, array_klass, element_klass_offset);
 205   Node *a_e_klass = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p2, tak) );
 206 
 207   // Check (the hard way) and throw if not a subklass.
 208   // Result is ignored, we just need the CFG effects.
 209   gen_checkcast( obj, a_e_klass );
 210 }
 211 
 212 
 213 void Parse::emit_guard_for_new(ciInstanceKlass* klass) {
 214   // Emit guarded new
 215   //   if (klass->_init_thread != current_thread ||
 216   //       klass->_init_state != being_initialized)
 217   //      uncommon_trap
 218   Node* cur_thread = _gvn.transform( new (C) ThreadLocalNode() );
 219   Node* merge = new (C) RegionNode(3);
 220   _gvn.set_type(merge, Type::CONTROL);
 221   Node* kls = makecon(TypeKlassPtr::make(klass));
 222 
 223   Node* init_thread_offset = _gvn.MakeConX(in_bytes(InstanceKlass::init_thread_offset()));
 224   Node* adr_node = basic_plus_adr(kls, kls, init_thread_offset);
 225   Node* init_thread = make_load(NULL, adr_node, TypeRawPtr::BOTTOM, T_ADDRESS, false, LoadNode::unordered);
 226   Node *tst   = Bool( CmpP( init_thread, cur_thread), BoolTest::eq);
 227   IfNode* iff = create_and_map_if(control(), tst, PROB_ALWAYS, COUNT_UNKNOWN);
 228   set_control(IfTrue(iff));
 229   merge->set_req(1, IfFalse(iff));
 230 
 231   Node* init_state_offset = _gvn.MakeConX(in_bytes(InstanceKlass::init_state_offset()));
 232   adr_node = basic_plus_adr(kls, kls, init_state_offset);
 233   // Use T_BOOLEAN for InstanceKlass::_init_state so the compiler
 234   // can generate code to load it as unsigned byte.
 235   Node* init_state = make_load(NULL, adr_node, TypeInt::UBYTE, T_BOOLEAN, false, LoadNode::unordered);
 236   Node* being_init = _gvn.intcon(InstanceKlass::being_initialized);
 237   tst   = Bool( CmpI( init_state, being_init), BoolTest::eq);
 238   iff = create_and_map_if(control(), tst, PROB_ALWAYS, COUNT_UNKNOWN);
 239   set_control(IfTrue(iff));
 240   merge->set_req(2, IfFalse(iff));
 241 
 242   PreserveJVMState pjvms(this);
 243   record_for_igvn(merge);
 244   set_control(merge);
 245 
 246   uncommon_trap(Deoptimization::Reason_uninitialized,
 247                 Deoptimization::Action_reinterpret,
 248                 klass);
 249 }
 250 
 251 
 252 //------------------------------do_new-----------------------------------------
 253 void Parse::do_new() {
 254   kill_dead_locals();
 255 
 256   bool will_link;
 257   ciInstanceKlass* klass = iter().get_klass(will_link)->as_instance_klass();
 258   assert(will_link, "_new: typeflow responsibility");
 259 
 260   // Should initialize, or throw an InstantiationError?
 261   if (!klass->is_initialized() && !klass->is_being_initialized() ||
 262       klass->is_abstract() || klass->is_interface() ||
 263       klass->name() == ciSymbol::java_lang_Class() ||
 264       iter().is_unresolved_klass()) {
 265     uncommon_trap(Deoptimization::Reason_uninitialized,
 266                   Deoptimization::Action_reinterpret,
 267                   klass);
 268     return;
 269   }
 270   if (klass->is_being_initialized()) {
 271     emit_guard_for_new(klass);
 272   }
 273 
 274   Node* kls = makecon(TypeKlassPtr::make(klass));
 275   Node* obj = new_instance(kls);
 276 
 277   // Push resultant oop onto stack
 278   push(obj);
 279 
 280   // Keep track of whether opportunities exist for StringBuilder
 281   // optimizations.
 282   if (OptimizeStringConcat &&
 283       (klass == C->env()->StringBuilder_klass() ||
 284        klass == C->env()->StringBuffer_klass())) {
 285     C->set_has_stringbuilder(true);
 286   }
 287 
 288   // Keep track of boxed values for EliminateAutoBox optimizations.
 289   if (C->eliminate_boxing() && klass->is_box_klass()) {
 290     C->set_has_boxed_value(true);
 291   }
 292 }
 293 
 294 #ifndef PRODUCT
 295 //------------------------------dump_map_adr_mem-------------------------------
 296 // Debug dump of the mapping from address types to MergeMemNode indices.
 297 void Parse::dump_map_adr_mem() const {
 298   tty->print_cr("--- Mapping from address types to memory Nodes ---");
 299   MergeMemNode *mem = map() == NULL ? NULL : (map()->memory()->is_MergeMem() ?
 300                                       map()->memory()->as_MergeMem() : NULL);
 301   for (uint i = 0; i < (uint)C->num_alias_types(); i++) {
 302     C->alias_type(i)->print_on(tty);
 303     tty->print("\t");
 304     // Node mapping, if any
 305     if (mem && i < mem->req() && mem->in(i) && mem->in(i) != mem->empty_memory()) {
 306       mem->in(i)->dump();
 307     } else {
 308       tty->cr();
 309     }
 310   }
 311 }
 312 
 313 #endif
 314 
 315 
 316 //=============================================================================
 317 //
 318 // parser methods for profiling
 319 
 320 
 321 //----------------------test_counter_against_threshold ------------------------
 322 void Parse::test_counter_against_threshold(Node* cnt, int limit) {
 323   // Test the counter against the limit and uncommon trap if greater.
 324 
 325   // This code is largely copied from the range check code in
 326   // array_addressing()
 327 
 328   // Test invocation count vs threshold
 329   Node *threshold = makecon(TypeInt::make(limit));
 330   Node *chk   = _gvn.transform( new (C) CmpUNode( cnt, threshold) );
 331   BoolTest::mask btest = BoolTest::lt;
 332   Node *tst   = _gvn.transform( new (C) BoolNode( chk, btest) );
 333   // Branch to failure if threshold exceeded
 334   { BuildCutout unless(this, tst, PROB_ALWAYS);
 335     uncommon_trap(Deoptimization::Reason_age,
 336                   Deoptimization::Action_maybe_recompile);
 337   }
 338 }
 339 
 340 //----------------------increment_and_test_invocation_counter-------------------
 341 void Parse::increment_and_test_invocation_counter(int limit) {
 342   if (!count_invocations()) return;
 343 
 344   // Get the Method* node.
 345   ciMethod* m = method();
 346   MethodCounters* counters_adr = m->ensure_method_counters();
 347   if (counters_adr == NULL) {
 348     C->record_failure("method counters allocation failed");
 349     return;
 350   }
 351 
 352   Node* ctrl = control();
 353   const TypePtr* adr_type = TypeRawPtr::make((address) counters_adr);
 354   Node *counters_node = makecon(adr_type);
 355   Node* adr_iic_node = basic_plus_adr(counters_node, counters_node,
 356     MethodCounters::interpreter_invocation_counter_offset_in_bytes());
 357   Node* cnt = make_load(ctrl, adr_iic_node, TypeInt::INT, T_INT, adr_type, false, LoadNode::unordered);
 358 
 359   test_counter_against_threshold(cnt, limit);
 360 
 361   // Add one to the counter and store
 362   Node* incr = _gvn.transform(new (C) AddINode(cnt, _gvn.intcon(1)));
 363   store_to_memory(ctrl, adr_iic_node, incr, T_INT, adr_type, false, StoreNode::unordered);
 364 }
 365 
 366 //----------------------------method_data_addressing---------------------------
 367 Node* Parse::method_data_addressing(ciMethodData* md, ciProfileData* data, ByteSize counter_offset, Node* idx, uint stride) {
 368   // Get offset within MethodData* of the data array
 369   ByteSize data_offset = MethodData::data_offset();
 370 
 371   // Get cell offset of the ProfileData within data array
 372   int cell_offset = md->dp_to_di(data->dp());
 373 
 374   // Add in counter_offset, the # of bytes into the ProfileData of counter or flag
 375   int offset = in_bytes(data_offset) + cell_offset + in_bytes(counter_offset);
 376 
 377   const TypePtr* adr_type = TypeMetadataPtr::make(md);
 378   Node* mdo = makecon(adr_type);
 379   Node* ptr = basic_plus_adr(mdo, mdo, offset);
 380 
 381   if (stride != 0) {
 382     Node* str = _gvn.MakeConX(stride);
 383     Node* scale = _gvn.transform( new (C) MulXNode( idx, str ) );
 384     ptr   = _gvn.transform( new (C) AddPNode( mdo, ptr, scale ) );
 385   }
 386 
 387   return ptr;
 388 }
 389 
 390 //--------------------------increment_md_counter_at----------------------------
 391 void Parse::increment_md_counter_at(ciMethodData* md, ciProfileData* data, ByteSize counter_offset, Node* idx, uint stride) {
 392   Node* adr_node = method_data_addressing(md, data, counter_offset, idx, stride);
 393 
 394   const TypePtr* adr_type = _gvn.type(adr_node)->is_ptr();
 395   Node* cnt  = make_load(NULL, adr_node, TypeInt::INT, T_INT, adr_type, false, LoadNode::unordered);
 396   Node* incr = _gvn.transform(new (C) AddINode(cnt, _gvn.intcon(DataLayout::counter_increment)));
 397   store_to_memory(NULL, adr_node, incr, T_INT, adr_type, false, StoreNode::unordered);
 398 }
 399 
 400 //--------------------------test_for_osr_md_counter_at-------------------------
 401 void Parse::test_for_osr_md_counter_at(ciMethodData* md, ciProfileData* data, ByteSize counter_offset, int limit) {
 402   Node* adr_node = method_data_addressing(md, data, counter_offset);
 403 
 404   const TypePtr* adr_type = _gvn.type(adr_node)->is_ptr();
 405   Node* cnt  = make_load(NULL, adr_node, TypeInt::INT, T_INT, adr_type, false, LoadNode::unordered);
 406 
 407   test_counter_against_threshold(cnt, limit);
 408 }
 409 
 410 //-------------------------------set_md_flag_at--------------------------------
 411 void Parse::set_md_flag_at(ciMethodData* md, ciProfileData* data, int flag_constant) {
 412   Node* adr_node = method_data_addressing(md, data, DataLayout::flags_offset());
 413 
 414   const TypePtr* adr_type = _gvn.type(adr_node)->is_ptr();
 415   Node* flags = make_load(NULL, adr_node, TypeInt::BYTE, T_BYTE, adr_type, false, LoadNode::unordered);
 416   Node* incr = _gvn.transform(new (C) OrINode(flags, _gvn.intcon(flag_constant)));
 417   store_to_memory(NULL, adr_node, incr, T_BYTE, adr_type, false, StoreNode::unordered);
 418 }
 419 
 420 //----------------------------profile_taken_branch-----------------------------
 421 void Parse::profile_taken_branch(int target_bci, bool force_update) {
 422   // This is a potential osr_site if we have a backedge.
 423   int cur_bci = bci();
 424   bool osr_site =
 425     (target_bci <= cur_bci) && count_invocations() && UseOnStackReplacement;
 426 
 427   // If we are going to OSR, restart at the target bytecode.
 428   set_bci(target_bci);
 429 
 430   // To do: factor out the the limit calculations below. These duplicate
 431   // the similar limit calculations in the interpreter.
 432 
 433   if (method_data_update() || force_update) {
 434     ciMethodData* md = method()->method_data();
 435     assert(md != NULL, "expected valid ciMethodData");
 436     ciProfileData* data = md->bci_to_data(cur_bci);
 437     assert(data->is_JumpData(), "need JumpData for taken branch");
 438     increment_md_counter_at(md, data, JumpData::taken_offset());
 439   }
 440 
 441   // In the new tiered system this is all we need to do. In the old
 442   // (c2 based) tiered sytem we must do the code below.
 443 #ifndef TIERED
 444   if (method_data_update()) {
 445     ciMethodData* md = method()->method_data();
 446     if (osr_site) {
 447       ciProfileData* data = md->bci_to_data(cur_bci);
 448       int limit = (CompileThreshold
 449                    * (OnStackReplacePercentage - InterpreterProfilePercentage)) / 100;
 450       test_for_osr_md_counter_at(md, data, JumpData::taken_offset(), limit);
 451     }
 452   } else {
 453     // With method data update off, use the invocation counter to trigger an
 454     // OSR compilation, as done in the interpreter.
 455     if (osr_site) {
 456       int limit = (CompileThreshold * OnStackReplacePercentage) / 100;
 457       increment_and_test_invocation_counter(limit);
 458     }
 459   }
 460 #endif // TIERED
 461 
 462   // Restore the original bytecode.
 463   set_bci(cur_bci);
 464 }
 465 
 466 //--------------------------profile_not_taken_branch---------------------------
 467 void Parse::profile_not_taken_branch(bool force_update) {
 468 
 469   if (method_data_update() || force_update) {
 470     ciMethodData* md = method()->method_data();
 471     assert(md != NULL, "expected valid ciMethodData");
 472     ciProfileData* data = md->bci_to_data(bci());
 473     assert(data->is_BranchData(), "need BranchData for not taken branch");
 474     increment_md_counter_at(md, data, BranchData::not_taken_offset());
 475   }
 476 
 477 }
 478 
 479 //---------------------------------profile_call--------------------------------
 480 void Parse::profile_call(Node* receiver) {
 481   if (!method_data_update()) return;
 482 
 483   switch (bc()) {
 484   case Bytecodes::_invokevirtual:
 485   case Bytecodes::_invokeinterface:
 486     profile_receiver_type(receiver);
 487     break;
 488   case Bytecodes::_invokestatic:
 489   case Bytecodes::_invokedynamic:
 490   case Bytecodes::_invokespecial:
 491     profile_generic_call();
 492     break;
 493   default: fatal("unexpected call bytecode");
 494   }
 495 }
 496 
 497 //------------------------------profile_generic_call---------------------------
 498 void Parse::profile_generic_call() {
 499   assert(method_data_update(), "must be generating profile code");
 500 
 501   ciMethodData* md = method()->method_data();
 502   assert(md != NULL, "expected valid ciMethodData");
 503   ciProfileData* data = md->bci_to_data(bci());
 504   assert(data->is_CounterData(), "need CounterData for not taken branch");
 505   increment_md_counter_at(md, data, CounterData::count_offset());
 506 }
 507 
 508 //-----------------------------profile_receiver_type---------------------------
 509 void Parse::profile_receiver_type(Node* receiver) {
 510   assert(method_data_update(), "must be generating profile code");
 511 
 512   ciMethodData* md = method()->method_data();
 513   assert(md != NULL, "expected valid ciMethodData");
 514   ciProfileData* data = md->bci_to_data(bci());
 515   assert(data->is_ReceiverTypeData(), "need ReceiverTypeData here");
 516 
 517   // Skip if we aren't tracking receivers
 518   if (TypeProfileWidth < 1) {
 519     increment_md_counter_at(md, data, CounterData::count_offset());
 520     return;
 521   }
 522   ciReceiverTypeData* rdata = (ciReceiverTypeData*)data->as_ReceiverTypeData();
 523 
 524   Node* method_data = method_data_addressing(md, rdata, in_ByteSize(0));
 525 
 526   // Using an adr_type of TypePtr::BOTTOM to work around anti-dep problems.
 527   // A better solution might be to use TypeRawPtr::BOTTOM with RC_NARROW_MEM.
 528   make_runtime_call(RC_LEAF, OptoRuntime::profile_receiver_type_Type(),
 529                     CAST_FROM_FN_PTR(address,
 530                                      OptoRuntime::profile_receiver_type_C),
 531                     "profile_receiver_type_C",
 532                     TypePtr::BOTTOM,
 533                     method_data, receiver);
 534 }
 535 
 536 //---------------------------------profile_ret---------------------------------
 537 void Parse::profile_ret(int target_bci) {
 538   if (!method_data_update()) return;
 539 
 540   // Skip if we aren't tracking ret targets
 541   if (TypeProfileWidth < 1) return;
 542 
 543   ciMethodData* md = method()->method_data();
 544   assert(md != NULL, "expected valid ciMethodData");
 545   ciProfileData* data = md->bci_to_data(bci());
 546   assert(data->is_RetData(), "need RetData for ret");
 547   ciRetData* ret_data = (ciRetData*)data->as_RetData();
 548 
 549   // Look for the target_bci is already in the table
 550   uint row;
 551   bool table_full = true;
 552   for (row = 0; row < ret_data->row_limit(); row++) {
 553     int key = ret_data->bci(row);
 554     table_full &= (key != RetData::no_bci);
 555     if (key == target_bci) break;
 556   }
 557 
 558   if (row >= ret_data->row_limit()) {
 559     // The target_bci was not found in the table.
 560     if (!table_full) {
 561       // XXX: Make slow call to update RetData
 562     }
 563     return;
 564   }
 565 
 566   // the target_bci is already in the table
 567   increment_md_counter_at(md, data, RetData::bci_count_offset(row));
 568 }
 569 
 570 //--------------------------profile_null_checkcast----------------------------
 571 void Parse::profile_null_checkcast() {
 572   // Set the null-seen flag, done in conjunction with the usual null check. We
 573   // never unset the flag, so this is a one-way switch.
 574   if (!method_data_update()) return;
 575 
 576   ciMethodData* md = method()->method_data();
 577   assert(md != NULL, "expected valid ciMethodData");
 578   ciProfileData* data = md->bci_to_data(bci());
 579   assert(data->is_BitData(), "need BitData for checkcast");
 580   set_md_flag_at(md, data, BitData::null_seen_byte_constant());
 581 }
 582 
 583 //-----------------------------profile_switch_case-----------------------------
 584 void Parse::profile_switch_case(int table_index) {
 585   if (!method_data_update()) return;
 586 
 587   ciMethodData* md = method()->method_data();
 588   assert(md != NULL, "expected valid ciMethodData");
 589 
 590   ciProfileData* data = md->bci_to_data(bci());
 591   assert(data->is_MultiBranchData(), "need MultiBranchData for switch case");
 592   if (table_index >= 0) {
 593     increment_md_counter_at(md, data, MultiBranchData::case_count_offset(table_index));
 594   } else {
 595     increment_md_counter_at(md, data, MultiBranchData::default_count_offset());
 596   }
 597 }
--- EOF ---