1 /* 2 * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/systemDictionary.hpp" 27 #include "compiler/compileLog.hpp" 28 #include "oops/objArrayKlass.hpp" 29 #include "opto/addnode.hpp" 30 #include "opto/memnode.hpp" 31 #include "opto/mulnode.hpp" 32 #include "opto/parse.hpp" 33 #include "opto/rootnode.hpp" 34 #include "opto/runtime.hpp" 35 #include "runtime/sharedRuntime.hpp" 36 37 //------------------------------make_dtrace_method_entry_exit ---------------- 38 // Dtrace -- record entry or exit of a method if compiled with dtrace support 39 void GraphKit::make_dtrace_method_entry_exit(ciMethod* method, bool is_entry) { 40 const TypeFunc *call_type = OptoRuntime::dtrace_method_entry_exit_Type(); 41 address call_address = is_entry ? CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry) : 42 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit); 43 const char *call_name = is_entry ? "dtrace_method_entry" : "dtrace_method_exit"; 44 45 // Get base of thread-local storage area 46 Node* thread = _gvn.transform( new ThreadLocalNode() ); 47 48 // Get method 49 const TypePtr* method_type = TypeMetadataPtr::make(method); 50 Node *method_node = _gvn.transform(ConNode::make(method_type)); 51 52 kill_dead_locals(); 53 54 // For some reason, this call reads only raw memory. 55 const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM; 56 make_runtime_call(RC_LEAF | RC_NARROW_MEM, 57 call_type, call_address, 58 call_name, raw_adr_type, 59 thread, method_node); 60 } 61 62 63 //============================================================================= 64 //------------------------------do_checkcast----------------------------------- 65 void Parse::do_checkcast() { 66 bool will_link; 67 ciKlass* klass = iter().get_klass(will_link); 68 69 Node *obj = peek(); 70 71 // Throw uncommon trap if class is not loaded or the value we are casting 72 // _from_ is not loaded, and value is not null. If the value _is_ NULL, 73 // then the checkcast does nothing. 74 const TypeOopPtr *tp = _gvn.type(obj)->isa_oopptr(); 75 if (!will_link || (tp && tp->klass() && !tp->klass()->is_loaded())) { 76 if (C->log() != NULL) { 77 if (!will_link) { 78 C->log()->elem("assert_null reason='checkcast' klass='%d'", 79 C->log()->identify(klass)); 80 } 81 if (tp && tp->klass() && !tp->klass()->is_loaded()) { 82 // %%% Cannot happen? 83 C->log()->elem("assert_null reason='checkcast source' klass='%d'", 84 C->log()->identify(tp->klass())); 85 } 86 } 87 null_assert(obj); 88 assert( stopped() || _gvn.type(peek())->higher_equal(TypePtr::NULL_PTR), "what's left behind is null" ); 89 if (!stopped()) { 90 profile_null_checkcast(); 91 } 92 return; 93 } 94 95 Node *res = gen_checkcast(obj, makecon(TypeKlassPtr::make(klass)) ); 96 97 // Pop from stack AFTER gen_checkcast because it can uncommon trap and 98 // the debug info has to be correct. 99 pop(); 100 push(res); 101 } 102 103 104 //------------------------------do_instanceof---------------------------------- 105 void Parse::do_instanceof() { 106 if (stopped()) return; 107 // We would like to return false if class is not loaded, emitting a 108 // dependency, but Java requires instanceof to load its operand. 109 110 // Throw uncommon trap if class is not loaded 111 bool will_link; 112 ciKlass* klass = iter().get_klass(will_link); 113 114 if (!will_link) { 115 if (C->log() != NULL) { 116 C->log()->elem("assert_null reason='instanceof' klass='%d'", 117 C->log()->identify(klass)); 118 } 119 null_assert(peek()); 120 assert( stopped() || _gvn.type(peek())->higher_equal(TypePtr::NULL_PTR), "what's left behind is null" ); 121 if (!stopped()) { 122 // The object is now known to be null. 123 // Shortcut the effect of gen_instanceof and return "false" directly. 124 pop(); // pop the null 125 push(_gvn.intcon(0)); // push false answer 126 } 127 return; 128 } 129 130 // Push the bool result back on stack 131 Node* res = gen_instanceof(peek(), makecon(TypeKlassPtr::make(klass)), true); 132 133 // Pop from stack AFTER gen_instanceof because it can uncommon trap. 134 pop(); 135 push(res); 136 } 137 138 //------------------------------array_store_check------------------------------ 139 // pull array from stack and check that the store is valid 140 void Parse::array_store_check() { 141 142 // Shorthand access to array store elements without popping them. 143 Node *obj = peek(0); 144 Node *idx = peek(1); 145 Node *ary = peek(2); 146 147 if (ShenandoahVerifyReadsToFromSpace) { 148 obj = shenandoah_read_barrier(obj); 149 ary = shenandoah_read_barrier(ary); 150 } 151 152 if (_gvn.type(obj) == TypePtr::NULL_PTR) { 153 // There's never a type check on null values. 154 // This cutout lets us avoid the uncommon_trap(Reason_array_check) 155 // below, which turns into a performance liability if the 156 // gen_checkcast folds up completely. 157 return; 158 } 159 160 // Extract the array klass type 161 int klass_offset = oopDesc::klass_offset_in_bytes(); 162 Node* p = basic_plus_adr( ary, ary, klass_offset ); 163 // p's type is array-of-OOPS plus klass_offset 164 Node* array_klass = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, TypeInstPtr::KLASS)); 165 // Get the array klass 166 const TypeKlassPtr *tak = _gvn.type(array_klass)->is_klassptr(); 167 168 // The type of array_klass is usually INexact array-of-oop. Heroically 169 // cast array_klass to EXACT array and uncommon-trap if the cast fails. 170 // Make constant out of the inexact array klass, but use it only if the cast 171 // succeeds. 172 bool always_see_exact_class = false; 173 if (MonomorphicArrayCheck 174 && !too_many_traps(Deoptimization::Reason_array_check) 175 && !tak->klass_is_exact() 176 && tak != TypeKlassPtr::OBJECT) { 177 // Regarding the fourth condition in the if-statement from above: 178 // 179 // If the compiler has determined that the type of array 'ary' (represented 180 // by 'array_klass') is java/lang/Object, the compiler must not assume that 181 // the array 'ary' is monomorphic. 182 // 183 // If 'ary' were of type java/lang/Object, this arraystore would have to fail, 184 // because it is not possible to perform a arraystore into an object that is not 185 // a "proper" array. 186 // 187 // Therefore, let's obtain at runtime the type of 'ary' and check if we can still 188 // successfully perform the store. 189 // 190 // The implementation reasons for the condition are the following: 191 // 192 // java/lang/Object is the superclass of all arrays, but it is represented by the VM 193 // as an InstanceKlass. The checks generated by gen_checkcast() (see below) expect 194 // 'array_klass' to be ObjArrayKlass, which can result in invalid memory accesses. 195 // 196 // See issue JDK-8057622 for details. 197 198 always_see_exact_class = true; 199 // (If no MDO at all, hope for the best, until a trap actually occurs.) 200 201 // Make a constant out of the inexact array klass 202 const TypeKlassPtr *extak = tak->cast_to_exactness(true)->is_klassptr(); 203 Node* con = makecon(extak); 204 Node* cmp = _gvn.transform(new CmpPNode( array_klass, con )); 205 Node* bol = _gvn.transform(new BoolNode( cmp, BoolTest::eq )); 206 Node* ctrl= control(); 207 { BuildCutout unless(this, bol, PROB_MAX); 208 uncommon_trap(Deoptimization::Reason_array_check, 209 Deoptimization::Action_maybe_recompile, 210 tak->klass()); 211 } 212 if (stopped()) { // MUST uncommon-trap? 213 set_control(ctrl); // Then Don't Do It, just fall into the normal checking 214 } else { // Cast array klass to exactness: 215 // Use the exact constant value we know it is. 216 replace_in_map(array_klass,con); 217 CompileLog* log = C->log(); 218 if (log != NULL) { 219 log->elem("cast_up reason='monomorphic_array' from='%d' to='(exact)'", 220 log->identify(tak->klass())); 221 } 222 array_klass = con; // Use cast value moving forward 223 } 224 } 225 226 // Come here for polymorphic array klasses 227 228 // Extract the array element class 229 int element_klass_offset = in_bytes(ObjArrayKlass::element_klass_offset()); 230 Node *p2 = basic_plus_adr(array_klass, array_klass, element_klass_offset); 231 // We are allowed to use the constant type only if cast succeeded. If always_see_exact_class is true, 232 // we must set a control edge from the IfTrue node created by the uncommon_trap above to the 233 // LoadKlassNode. 234 Node* a_e_klass = _gvn.transform(LoadKlassNode::make(_gvn, always_see_exact_class ? control() : NULL, 235 immutable_memory(), p2, tak)); 236 237 // Check (the hard way) and throw if not a subklass. 238 // Result is ignored, we just need the CFG effects. 239 gen_checkcast(obj, a_e_klass); 240 } 241 242 243 void Parse::emit_guard_for_new(ciInstanceKlass* klass) { 244 // Emit guarded new 245 // if (klass->_init_thread != current_thread || 246 // klass->_init_state != being_initialized) 247 // uncommon_trap 248 Node* cur_thread = _gvn.transform( new ThreadLocalNode() ); 249 Node* merge = new RegionNode(3); 250 _gvn.set_type(merge, Type::CONTROL); 251 Node* kls = makecon(TypeKlassPtr::make(klass)); 252 253 Node* init_thread_offset = _gvn.MakeConX(in_bytes(InstanceKlass::init_thread_offset())); 254 Node* adr_node = basic_plus_adr(kls, kls, init_thread_offset); 255 Node* init_thread = make_load(NULL, adr_node, TypeRawPtr::BOTTOM, T_ADDRESS, MemNode::unordered); 256 Node *tst = Bool( CmpP( init_thread, cur_thread), BoolTest::eq); 257 IfNode* iff = create_and_map_if(control(), tst, PROB_ALWAYS, COUNT_UNKNOWN); 258 set_control(IfTrue(iff)); 259 merge->set_req(1, IfFalse(iff)); 260 261 Node* init_state_offset = _gvn.MakeConX(in_bytes(InstanceKlass::init_state_offset())); 262 adr_node = basic_plus_adr(kls, kls, init_state_offset); 263 // Use T_BOOLEAN for InstanceKlass::_init_state so the compiler 264 // can generate code to load it as unsigned byte. 265 Node* init_state = make_load(NULL, adr_node, TypeInt::UBYTE, T_BOOLEAN, MemNode::unordered); 266 Node* being_init = _gvn.intcon(InstanceKlass::being_initialized); 267 tst = Bool( CmpI( init_state, being_init), BoolTest::eq); 268 iff = create_and_map_if(control(), tst, PROB_ALWAYS, COUNT_UNKNOWN); 269 set_control(IfTrue(iff)); 270 merge->set_req(2, IfFalse(iff)); 271 272 PreserveJVMState pjvms(this); 273 record_for_igvn(merge); 274 set_control(merge); 275 276 uncommon_trap(Deoptimization::Reason_uninitialized, 277 Deoptimization::Action_reinterpret, 278 klass); 279 } 280 281 282 //------------------------------do_new----------------------------------------- 283 void Parse::do_new() { 284 kill_dead_locals(); 285 286 bool will_link; 287 ciInstanceKlass* klass = iter().get_klass(will_link)->as_instance_klass(); 288 assert(will_link, "_new: typeflow responsibility"); 289 290 // Should initialize, or throw an InstantiationError? 291 if (!klass->is_initialized() && !klass->is_being_initialized() || 292 klass->is_abstract() || klass->is_interface() || 293 klass->name() == ciSymbol::java_lang_Class() || 294 iter().is_unresolved_klass()) { 295 uncommon_trap(Deoptimization::Reason_uninitialized, 296 Deoptimization::Action_reinterpret, 297 klass); 298 return; 299 } 300 if (klass->is_being_initialized()) { 301 emit_guard_for_new(klass); 302 } 303 304 Node* kls = makecon(TypeKlassPtr::make(klass)); 305 Node* obj = new_instance(kls); 306 307 // Push resultant oop onto stack 308 push(obj); 309 310 // Keep track of whether opportunities exist for StringBuilder 311 // optimizations. 312 if (OptimizeStringConcat && 313 (klass == C->env()->StringBuilder_klass() || 314 klass == C->env()->StringBuffer_klass())) { 315 C->set_has_stringbuilder(true); 316 } 317 318 // Keep track of boxed values for EliminateAutoBox optimizations. 319 if (C->eliminate_boxing() && klass->is_box_klass()) { 320 C->set_has_boxed_value(true); 321 } 322 } 323 324 #ifndef PRODUCT 325 //------------------------------dump_map_adr_mem------------------------------- 326 // Debug dump of the mapping from address types to MergeMemNode indices. 327 void Parse::dump_map_adr_mem() const { 328 tty->print_cr("--- Mapping from address types to memory Nodes ---"); 329 MergeMemNode *mem = map() == NULL ? NULL : (map()->memory()->is_MergeMem() ? 330 map()->memory()->as_MergeMem() : NULL); 331 for (uint i = 0; i < (uint)C->num_alias_types(); i++) { 332 C->alias_type(i)->print_on(tty); 333 tty->print("\t"); 334 // Node mapping, if any 335 if (mem && i < mem->req() && mem->in(i) && mem->in(i) != mem->empty_memory()) { 336 mem->in(i)->dump(); 337 } else { 338 tty->cr(); 339 } 340 } 341 } 342 343 #endif 344 345 346 //============================================================================= 347 // 348 // parser methods for profiling 349 350 351 //----------------------test_counter_against_threshold ------------------------ 352 void Parse::test_counter_against_threshold(Node* cnt, int limit) { 353 // Test the counter against the limit and uncommon trap if greater. 354 355 // This code is largely copied from the range check code in 356 // array_addressing() 357 358 // Test invocation count vs threshold 359 Node *threshold = makecon(TypeInt::make(limit)); 360 Node *chk = _gvn.transform( new CmpUNode( cnt, threshold) ); 361 BoolTest::mask btest = BoolTest::lt; 362 Node *tst = _gvn.transform( new BoolNode( chk, btest) ); 363 // Branch to failure if threshold exceeded 364 { BuildCutout unless(this, tst, PROB_ALWAYS); 365 uncommon_trap(Deoptimization::Reason_age, 366 Deoptimization::Action_maybe_recompile); 367 } 368 } 369 370 //----------------------increment_and_test_invocation_counter------------------- 371 void Parse::increment_and_test_invocation_counter(int limit) { 372 if (!count_invocations()) return; 373 374 // Get the Method* node. 375 ciMethod* m = method(); 376 MethodCounters* counters_adr = m->ensure_method_counters(); 377 if (counters_adr == NULL) { 378 C->record_failure("method counters allocation failed"); 379 return; 380 } 381 382 Node* ctrl = control(); 383 const TypePtr* adr_type = TypeRawPtr::make((address) counters_adr); 384 Node *counters_node = makecon(adr_type); 385 Node* adr_iic_node = basic_plus_adr(counters_node, counters_node, 386 MethodCounters::interpreter_invocation_counter_offset_in_bytes()); 387 Node* cnt = make_load(ctrl, adr_iic_node, TypeInt::INT, T_INT, adr_type, MemNode::unordered); 388 389 test_counter_against_threshold(cnt, limit); 390 391 // Add one to the counter and store 392 Node* incr = _gvn.transform(new AddINode(cnt, _gvn.intcon(1))); 393 store_to_memory(ctrl, adr_iic_node, incr, T_INT, adr_type, MemNode::unordered); 394 } 395 396 //----------------------------method_data_addressing--------------------------- 397 Node* Parse::method_data_addressing(ciMethodData* md, ciProfileData* data, ByteSize counter_offset, Node* idx, uint stride) { 398 // Get offset within MethodData* of the data array 399 ByteSize data_offset = MethodData::data_offset(); 400 401 // Get cell offset of the ProfileData within data array 402 int cell_offset = md->dp_to_di(data->dp()); 403 404 // Add in counter_offset, the # of bytes into the ProfileData of counter or flag 405 int offset = in_bytes(data_offset) + cell_offset + in_bytes(counter_offset); 406 407 const TypePtr* adr_type = TypeMetadataPtr::make(md); 408 Node* mdo = makecon(adr_type); 409 Node* ptr = basic_plus_adr(mdo, mdo, offset); 410 411 if (stride != 0) { 412 Node* str = _gvn.MakeConX(stride); 413 Node* scale = _gvn.transform( new MulXNode( idx, str ) ); 414 ptr = _gvn.transform( new AddPNode( mdo, ptr, scale ) ); 415 } 416 417 return ptr; 418 } 419 420 //--------------------------increment_md_counter_at---------------------------- 421 void Parse::increment_md_counter_at(ciMethodData* md, ciProfileData* data, ByteSize counter_offset, Node* idx, uint stride) { 422 Node* adr_node = method_data_addressing(md, data, counter_offset, idx, stride); 423 424 const TypePtr* adr_type = _gvn.type(adr_node)->is_ptr(); 425 Node* cnt = make_load(NULL, adr_node, TypeInt::INT, T_INT, adr_type, MemNode::unordered); 426 Node* incr = _gvn.transform(new AddINode(cnt, _gvn.intcon(DataLayout::counter_increment))); 427 store_to_memory(NULL, adr_node, incr, T_INT, adr_type, MemNode::unordered); 428 } 429 430 //--------------------------test_for_osr_md_counter_at------------------------- 431 void Parse::test_for_osr_md_counter_at(ciMethodData* md, ciProfileData* data, ByteSize counter_offset, int limit) { 432 Node* adr_node = method_data_addressing(md, data, counter_offset); 433 434 const TypePtr* adr_type = _gvn.type(adr_node)->is_ptr(); 435 Node* cnt = make_load(NULL, adr_node, TypeInt::INT, T_INT, adr_type, MemNode::unordered); 436 437 test_counter_against_threshold(cnt, limit); 438 } 439 440 //-------------------------------set_md_flag_at-------------------------------- 441 void Parse::set_md_flag_at(ciMethodData* md, ciProfileData* data, int flag_constant) { 442 Node* adr_node = method_data_addressing(md, data, DataLayout::flags_offset()); 443 444 const TypePtr* adr_type = _gvn.type(adr_node)->is_ptr(); 445 Node* flags = make_load(NULL, adr_node, TypeInt::BYTE, T_BYTE, adr_type, MemNode::unordered); 446 Node* incr = _gvn.transform(new OrINode(flags, _gvn.intcon(flag_constant))); 447 store_to_memory(NULL, adr_node, incr, T_BYTE, adr_type, MemNode::unordered); 448 } 449 450 //----------------------------profile_taken_branch----------------------------- 451 void Parse::profile_taken_branch(int target_bci, bool force_update) { 452 // This is a potential osr_site if we have a backedge. 453 int cur_bci = bci(); 454 bool osr_site = 455 (target_bci <= cur_bci) && count_invocations() && UseOnStackReplacement; 456 457 // If we are going to OSR, restart at the target bytecode. 458 set_bci(target_bci); 459 460 // To do: factor out the the limit calculations below. These duplicate 461 // the similar limit calculations in the interpreter. 462 463 if (method_data_update() || force_update) { 464 ciMethodData* md = method()->method_data(); 465 assert(md != NULL, "expected valid ciMethodData"); 466 ciProfileData* data = md->bci_to_data(cur_bci); 467 assert(data->is_JumpData(), "need JumpData for taken branch"); 468 increment_md_counter_at(md, data, JumpData::taken_offset()); 469 } 470 471 // In the new tiered system this is all we need to do. In the old 472 // (c2 based) tiered sytem we must do the code below. 473 #ifndef TIERED 474 if (method_data_update()) { 475 ciMethodData* md = method()->method_data(); 476 if (osr_site) { 477 ciProfileData* data = md->bci_to_data(cur_bci); 478 int limit = (CompileThreshold 479 * (OnStackReplacePercentage - InterpreterProfilePercentage)) / 100; 480 test_for_osr_md_counter_at(md, data, JumpData::taken_offset(), limit); 481 } 482 } else { 483 // With method data update off, use the invocation counter to trigger an 484 // OSR compilation, as done in the interpreter. 485 if (osr_site) { 486 int limit = (CompileThreshold * OnStackReplacePercentage) / 100; 487 increment_and_test_invocation_counter(limit); 488 } 489 } 490 #endif // TIERED 491 492 // Restore the original bytecode. 493 set_bci(cur_bci); 494 } 495 496 //--------------------------profile_not_taken_branch--------------------------- 497 void Parse::profile_not_taken_branch(bool force_update) { 498 499 if (method_data_update() || force_update) { 500 ciMethodData* md = method()->method_data(); 501 assert(md != NULL, "expected valid ciMethodData"); 502 ciProfileData* data = md->bci_to_data(bci()); 503 assert(data->is_BranchData(), "need BranchData for not taken branch"); 504 increment_md_counter_at(md, data, BranchData::not_taken_offset()); 505 } 506 507 } 508 509 //---------------------------------profile_call-------------------------------- 510 void Parse::profile_call(Node* receiver) { 511 if (!method_data_update()) return; 512 513 switch (bc()) { 514 case Bytecodes::_invokevirtual: 515 case Bytecodes::_invokeinterface: 516 profile_receiver_type(receiver); 517 break; 518 case Bytecodes::_invokestatic: 519 case Bytecodes::_invokedynamic: 520 case Bytecodes::_invokespecial: 521 profile_generic_call(); 522 break; 523 default: fatal("unexpected call bytecode"); 524 } 525 } 526 527 //------------------------------profile_generic_call--------------------------- 528 void Parse::profile_generic_call() { 529 assert(method_data_update(), "must be generating profile code"); 530 531 ciMethodData* md = method()->method_data(); 532 assert(md != NULL, "expected valid ciMethodData"); 533 ciProfileData* data = md->bci_to_data(bci()); 534 assert(data->is_CounterData(), "need CounterData for not taken branch"); 535 increment_md_counter_at(md, data, CounterData::count_offset()); 536 } 537 538 //-----------------------------profile_receiver_type--------------------------- 539 void Parse::profile_receiver_type(Node* receiver) { 540 assert(method_data_update(), "must be generating profile code"); 541 542 ciMethodData* md = method()->method_data(); 543 assert(md != NULL, "expected valid ciMethodData"); 544 ciProfileData* data = md->bci_to_data(bci()); 545 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData here"); 546 547 // Skip if we aren't tracking receivers 548 if (TypeProfileWidth < 1) { 549 increment_md_counter_at(md, data, CounterData::count_offset()); 550 return; 551 } 552 ciReceiverTypeData* rdata = (ciReceiverTypeData*)data->as_ReceiverTypeData(); 553 554 Node* method_data = method_data_addressing(md, rdata, in_ByteSize(0)); 555 556 // Using an adr_type of TypePtr::BOTTOM to work around anti-dep problems. 557 // A better solution might be to use TypeRawPtr::BOTTOM with RC_NARROW_MEM. 558 make_runtime_call(RC_LEAF, OptoRuntime::profile_receiver_type_Type(), 559 CAST_FROM_FN_PTR(address, 560 OptoRuntime::profile_receiver_type_C), 561 "profile_receiver_type_C", 562 TypePtr::BOTTOM, 563 method_data, receiver); 564 } 565 566 //---------------------------------profile_ret--------------------------------- 567 void Parse::profile_ret(int target_bci) { 568 if (!method_data_update()) return; 569 570 // Skip if we aren't tracking ret targets 571 if (TypeProfileWidth < 1) return; 572 573 ciMethodData* md = method()->method_data(); 574 assert(md != NULL, "expected valid ciMethodData"); 575 ciProfileData* data = md->bci_to_data(bci()); 576 assert(data->is_RetData(), "need RetData for ret"); 577 ciRetData* ret_data = (ciRetData*)data->as_RetData(); 578 579 // Look for the target_bci is already in the table 580 uint row; 581 bool table_full = true; 582 for (row = 0; row < ret_data->row_limit(); row++) { 583 int key = ret_data->bci(row); 584 table_full &= (key != RetData::no_bci); 585 if (key == target_bci) break; 586 } 587 588 if (row >= ret_data->row_limit()) { 589 // The target_bci was not found in the table. 590 if (!table_full) { 591 // XXX: Make slow call to update RetData 592 } 593 return; 594 } 595 596 // the target_bci is already in the table 597 increment_md_counter_at(md, data, RetData::bci_count_offset(row)); 598 } 599 600 //--------------------------profile_null_checkcast---------------------------- 601 void Parse::profile_null_checkcast() { 602 // Set the null-seen flag, done in conjunction with the usual null check. We 603 // never unset the flag, so this is a one-way switch. 604 if (!method_data_update()) return; 605 606 ciMethodData* md = method()->method_data(); 607 assert(md != NULL, "expected valid ciMethodData"); 608 ciProfileData* data = md->bci_to_data(bci()); 609 assert(data->is_BitData(), "need BitData for checkcast"); 610 set_md_flag_at(md, data, BitData::null_seen_byte_constant()); 611 } 612 613 //-----------------------------profile_switch_case----------------------------- 614 void Parse::profile_switch_case(int table_index) { 615 if (!method_data_update()) return; 616 617 ciMethodData* md = method()->method_data(); 618 assert(md != NULL, "expected valid ciMethodData"); 619 620 ciProfileData* data = md->bci_to_data(bci()); 621 assert(data->is_MultiBranchData(), "need MultiBranchData for switch case"); 622 if (table_index >= 0) { 623 increment_md_counter_at(md, data, MultiBranchData::case_count_offset(table_index)); 624 } else { 625 increment_md_counter_at(md, data, MultiBranchData::default_count_offset()); 626 } 627 }