1 /* 2 * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "ci/ciValueKlass.hpp" 27 #include "classfile/systemDictionary.hpp" 28 #include "compiler/compileLog.hpp" 29 #include "oops/objArrayKlass.hpp" 30 #include "oops/valueArrayKlass.hpp" 31 #include "opto/addnode.hpp" 32 #include "opto/memnode.hpp" 33 #include "opto/mulnode.hpp" 34 #include "opto/parse.hpp" 35 #include "opto/rootnode.hpp" 36 #include "opto/runtime.hpp" 37 #include "opto/valuetypenode.hpp" 38 #include "runtime/sharedRuntime.hpp" 39 40 //------------------------------make_dtrace_method_entry_exit ---------------- 41 // Dtrace -- record entry or exit of a method if compiled with dtrace support 42 void GraphKit::make_dtrace_method_entry_exit(ciMethod* method, bool is_entry) { 43 const TypeFunc *call_type = OptoRuntime::dtrace_method_entry_exit_Type(); 44 address call_address = is_entry ? CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry) : 45 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit); 46 const char *call_name = is_entry ? "dtrace_method_entry" : "dtrace_method_exit"; 47 48 // Get base of thread-local storage area 49 Node* thread = _gvn.transform( new ThreadLocalNode() ); 50 51 // Get method 52 const TypePtr* method_type = TypeMetadataPtr::make(method); 53 Node *method_node = _gvn.transform(ConNode::make(method_type)); 54 55 kill_dead_locals(); 56 57 // For some reason, this call reads only raw memory. 58 const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM; 59 make_runtime_call(RC_LEAF | RC_NARROW_MEM, 60 call_type, call_address, 61 call_name, raw_adr_type, 62 thread, method_node); 63 } 64 65 66 //============================================================================= 67 //------------------------------do_checkcast----------------------------------- 68 void Parse::do_checkcast() { 69 bool will_link; 70 ciKlass* klass = iter().get_klass(will_link); 71 72 Node *obj = peek(); 73 74 // Throw uncommon trap if class is not loaded or the value we are casting 75 // _from_ is not loaded, and value is not null. If the value _is_ NULL, 76 // then the checkcast does nothing. 77 const TypeOopPtr *tp = _gvn.type(obj)->isa_oopptr(); 78 if (!will_link || (tp && tp->klass() && !tp->klass()->is_loaded())) { 79 if (C->log() != NULL) { 80 if (!will_link) { 81 C->log()->elem("assert_null reason='checkcast' klass='%d'", 82 C->log()->identify(klass)); 83 } 84 if (tp && tp->klass() && !tp->klass()->is_loaded()) { 85 // %%% Cannot happen? 86 C->log()->elem("assert_null reason='checkcast source' klass='%d'", 87 C->log()->identify(tp->klass())); 88 } 89 } 90 null_assert(obj); 91 assert( stopped() || _gvn.type(peek())->higher_equal(TypePtr::NULL_PTR), "what's left behind is null" ); 92 if (!stopped()) { 93 profile_null_checkcast(); 94 } 95 return; 96 } 97 98 Node *res = gen_checkcast(obj, makecon(TypeKlassPtr::make(klass)) ); 99 100 // Pop from stack AFTER gen_checkcast because it can uncommon trap and 101 // the debug info has to be correct. 102 pop(); 103 push(res); 104 } 105 106 107 //------------------------------do_instanceof---------------------------------- 108 void Parse::do_instanceof() { 109 if (stopped()) return; 110 // We would like to return false if class is not loaded, emitting a 111 // dependency, but Java requires instanceof to load its operand. 112 113 // Throw uncommon trap if class is not loaded 114 bool will_link; 115 ciKlass* klass = iter().get_klass(will_link); 116 117 if (!will_link) { 118 if (C->log() != NULL) { 119 C->log()->elem("assert_null reason='instanceof' klass='%d'", 120 C->log()->identify(klass)); 121 } 122 null_assert(peek()); 123 assert( stopped() || _gvn.type(peek())->higher_equal(TypePtr::NULL_PTR), "what's left behind is null" ); 124 if (!stopped()) { 125 // The object is now known to be null. 126 // Shortcut the effect of gen_instanceof and return "false" directly. 127 pop(); // pop the null 128 push(_gvn.intcon(0)); // push false answer 129 } 130 return; 131 } 132 133 // Push the bool result back on stack 134 Node* res = gen_instanceof(peek(), makecon(TypeKlassPtr::make(klass)), true); 135 136 // Pop from stack AFTER gen_instanceof because it can uncommon trap. 137 pop(); 138 push(res); 139 } 140 141 //------------------------------array_store_check------------------------------ 142 // pull array from stack and check that the store is valid 143 void Parse::array_store_check(bool target_is_valuetypearray) { 144 145 // Shorthand access to array store elements without popping them. 146 Node *obj = peek(0); 147 Node *idx = peek(1); 148 Node *ary = peek(2); 149 150 if (_gvn.type(obj) == TypePtr::NULL_PTR) { 151 // There's never a type check on null values. 152 // This cutout lets us avoid the uncommon_trap(Reason_array_check) 153 // below, which turns into a performance liability if the 154 // gen_checkcast folds up completely. 155 return; 156 } 157 158 // Extract the array klass type 159 int klass_offset = oopDesc::klass_offset_in_bytes(); 160 Node* p = basic_plus_adr( ary, ary, klass_offset ); 161 // p's type is array-of-OOPS plus klass_offset 162 Node* array_klass = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, TypeInstPtr::KLASS)); 163 // Get the array klass 164 const TypeKlassPtr *tak = _gvn.type(array_klass)->is_klassptr(); 165 166 // The type of array_klass is usually INexact array-of-oop. Heroically 167 // cast array_klass to EXACT array and uncommon-trap if the cast fails. 168 // Make constant out of the inexact array klass, but use it only if the cast 169 // succeeds. 170 bool always_see_exact_class = false; 171 if (MonomorphicArrayCheck 172 && !too_many_traps(Deoptimization::Reason_array_check) 173 && !tak->klass_is_exact() 174 && tak != TypeKlassPtr::OBJECT) { 175 // Regarding the fourth condition in the if-statement from above: 176 // 177 // If the compiler has determined that the type of array 'ary' (represented 178 // by 'array_klass') is java/lang/Object, the compiler must not assume that 179 // the array 'ary' is monomorphic. 180 // 181 // If 'ary' were of type java/lang/Object, this arraystore would have to fail, 182 // because it is not possible to perform a arraystore into an object that is not 183 // a "proper" array. 184 // 185 // Therefore, let's obtain at runtime the type of 'ary' and check if we can still 186 // successfully perform the store. 187 // 188 // The implementation reasons for the condition are the following: 189 // 190 // java/lang/Object is the superclass of all arrays, but it is represented by the VM 191 // as an InstanceKlass. The checks generated by gen_checkcast() (see below) expect 192 // 'array_klass' to be ObjArrayKlass, which can result in invalid memory accesses. 193 // 194 // See issue JDK-8057622 for details. 195 196 always_see_exact_class = true; 197 // (If no MDO at all, hope for the best, until a trap actually occurs.) 198 199 // Make a constant out of the inexact array klass 200 const TypeKlassPtr *extak = tak->cast_to_exactness(true)->is_klassptr(); 201 Node* con = makecon(extak); 202 Node* cmp = _gvn.transform(new CmpPNode( array_klass, con )); 203 Node* bol = _gvn.transform(new BoolNode( cmp, BoolTest::eq )); 204 Node* ctrl= control(); 205 { BuildCutout unless(this, bol, PROB_MAX); 206 uncommon_trap(Deoptimization::Reason_array_check, 207 Deoptimization::Action_maybe_recompile, 208 tak->klass()); 209 } 210 if (stopped()) { // MUST uncommon-trap? 211 set_control(ctrl); // Then Don't Do It, just fall into the normal checking 212 } else { // Cast array klass to exactness: 213 // Use the exact constant value we know it is. 214 replace_in_map(array_klass,con); 215 CompileLog* log = C->log(); 216 if (log != NULL) { 217 log->elem("cast_up reason='monomorphic_array' from='%d' to='(exact)'", 218 log->identify(tak->klass())); 219 } 220 array_klass = con; // Use cast value moving forward 221 } 222 } 223 224 // Come here for polymorphic array klasses 225 226 // Extract the array element class 227 int element_klass_offset = in_bytes(ArrayKlass::element_klass_offset()); 228 229 Node *p2 = basic_plus_adr(array_klass, array_klass, element_klass_offset); 230 // We are allowed to use the constant type only if cast succeeded. If always_see_exact_class is true, 231 // we must set a control edge from the IfTrue node created by the uncommon_trap above to the 232 // LoadKlassNode. 233 Node* a_e_klass = _gvn.transform(LoadKlassNode::make(_gvn, always_see_exact_class ? control() : NULL, 234 immutable_memory(), p2, tak)); 235 236 if (target_is_valuetypearray) { 237 ciKlass* target_elem_klass = gvn().type(a_e_klass)->is_klassptr()->klass(); 238 ciKlass* source_klass = gvn().type(obj)->is_valuetype()->value_klass(); 239 if (!target_elem_klass->equals(source_klass)) { 240 Node* slow_ctl = type_check(a_e_klass, TypeKlassPtr::make(source_klass), 1.0); 241 { 242 PreserveJVMState pjvms(this); 243 set_control(slow_ctl); 244 builtin_throw(Deoptimization::Reason_class_check); 245 } 246 } 247 } else { 248 // Check (the hard way) and throw if not a subklass. 249 // Result is ignored, we just need the CFG effects. 250 gen_checkcast(obj, a_e_klass); 251 } 252 } 253 254 255 void Parse::emit_guard_for_new(ciInstanceKlass* klass) { 256 // Emit guarded new 257 // if (klass->_init_thread != current_thread || 258 // klass->_init_state != being_initialized) 259 // uncommon_trap 260 Node* cur_thread = _gvn.transform( new ThreadLocalNode() ); 261 Node* merge = new RegionNode(3); 262 _gvn.set_type(merge, Type::CONTROL); 263 Node* kls = makecon(TypeKlassPtr::make(klass)); 264 265 Node* init_thread_offset = _gvn.MakeConX(in_bytes(InstanceKlass::init_thread_offset())); 266 Node* adr_node = basic_plus_adr(kls, kls, init_thread_offset); 267 Node* init_thread = make_load(NULL, adr_node, TypeRawPtr::BOTTOM, T_ADDRESS, MemNode::unordered); 268 Node *tst = Bool( CmpP( init_thread, cur_thread), BoolTest::eq); 269 IfNode* iff = create_and_map_if(control(), tst, PROB_ALWAYS, COUNT_UNKNOWN); 270 set_control(IfTrue(iff)); 271 merge->set_req(1, IfFalse(iff)); 272 273 Node* init_state_offset = _gvn.MakeConX(in_bytes(InstanceKlass::init_state_offset())); 274 adr_node = basic_plus_adr(kls, kls, init_state_offset); 275 // Use T_BOOLEAN for InstanceKlass::_init_state so the compiler 276 // can generate code to load it as unsigned byte. 277 Node* init_state = make_load(NULL, adr_node, TypeInt::UBYTE, T_BOOLEAN, MemNode::unordered); 278 Node* being_init = _gvn.intcon(InstanceKlass::being_initialized); 279 tst = Bool( CmpI( init_state, being_init), BoolTest::eq); 280 iff = create_and_map_if(control(), tst, PROB_ALWAYS, COUNT_UNKNOWN); 281 set_control(IfTrue(iff)); 282 merge->set_req(2, IfFalse(iff)); 283 284 PreserveJVMState pjvms(this); 285 record_for_igvn(merge); 286 set_control(merge); 287 288 uncommon_trap(Deoptimization::Reason_uninitialized, 289 Deoptimization::Action_reinterpret, 290 klass); 291 } 292 293 294 //------------------------------do_new----------------------------------------- 295 void Parse::do_new() { 296 kill_dead_locals(); 297 298 bool will_link; 299 ciInstanceKlass* klass = iter().get_klass(will_link)->as_instance_klass(); 300 assert(will_link, "_new: typeflow responsibility"); 301 302 // Should initialize, or throw an InstantiationError? 303 if ((!klass->is_initialized() && !klass->is_being_initialized()) || 304 klass->is_abstract() || klass->is_interface() || 305 klass->name() == ciSymbol::java_lang_Class() || 306 iter().is_unresolved_klass()) { 307 uncommon_trap(Deoptimization::Reason_uninitialized, 308 Deoptimization::Action_reinterpret, 309 klass); 310 return; 311 } 312 if (klass->is_being_initialized()) { 313 emit_guard_for_new(klass); 314 } 315 316 Node* kls = makecon(TypeKlassPtr::make(klass)); 317 Node* obj = new_instance(kls); 318 319 // Push resultant oop onto stack 320 push(obj); 321 322 // Keep track of whether opportunities exist for StringBuilder 323 // optimizations. 324 if (OptimizeStringConcat && 325 (klass == C->env()->StringBuilder_klass() || 326 klass == C->env()->StringBuffer_klass())) { 327 C->set_has_stringbuilder(true); 328 } 329 330 // Keep track of boxed values for EliminateAutoBox optimizations. 331 if (C->eliminate_boxing() && klass->is_box_klass()) { 332 C->set_has_boxed_value(true); 333 } 334 } 335 336 //------------------------------do_vdefault------------------------------------- 337 void Parse::do_vdefault() { 338 bool will_link; 339 ciValueKlass* vk = iter().get_klass(will_link)->as_value_klass(); 340 assert(will_link, "vdefault: typeflow responsibility"); 341 // Create and push a new default ValueTypeNode 342 push(ValueTypeNode::make_default(_gvn, vk)); 343 } 344 345 //------------------------------do_vwithfield----------------------------------- 346 void Parse::do_vwithfield() { 347 bool will_link; 348 ciField* field = iter().get_field(will_link); 349 assert(will_link, "vdefault: typeflow responsibility"); 350 BasicType bt = field->layout_type(); 351 Node* val = type2size[bt] == 1 ? pop() : pop_pair(); 352 Node* vt = pop(); 353 assert(vt->is_ValueType(), "value type expected here"); 354 355 ValueTypeNode* new_vt = vt->clone()->as_ValueType(); 356 new_vt->set_oop(_gvn.zerocon(T_VALUETYPE)); 357 int offset = field->offset(); 358 uint i = 0; 359 for (; i < new_vt->field_count() && new_vt->field_offset(i) != offset; i++) {} 360 assert(i < new_vt->field_count(), "field not found"); 361 new_vt->set_field_value(i, val); 362 363 push(_gvn.transform(new_vt)); 364 } 365 366 #ifndef PRODUCT 367 //------------------------------dump_map_adr_mem------------------------------- 368 // Debug dump of the mapping from address types to MergeMemNode indices. 369 void Parse::dump_map_adr_mem() const { 370 tty->print_cr("--- Mapping from address types to memory Nodes ---"); 371 MergeMemNode *mem = map() == NULL ? NULL : (map()->memory()->is_MergeMem() ? 372 map()->memory()->as_MergeMem() : NULL); 373 for (uint i = 0; i < (uint)C->num_alias_types(); i++) { 374 C->alias_type(i)->print_on(tty); 375 tty->print("\t"); 376 // Node mapping, if any 377 if (mem && i < mem->req() && mem->in(i) && mem->in(i) != mem->empty_memory()) { 378 mem->in(i)->dump(); 379 } else { 380 tty->cr(); 381 } 382 } 383 } 384 385 #endif 386 387 388 //============================================================================= 389 // 390 // parser methods for profiling 391 392 393 //----------------------test_counter_against_threshold ------------------------ 394 void Parse::test_counter_against_threshold(Node* cnt, int limit) { 395 // Test the counter against the limit and uncommon trap if greater. 396 397 // This code is largely copied from the range check code in 398 // array_addressing() 399 400 // Test invocation count vs threshold 401 Node *threshold = makecon(TypeInt::make(limit)); 402 Node *chk = _gvn.transform( new CmpUNode( cnt, threshold) ); 403 BoolTest::mask btest = BoolTest::lt; 404 Node *tst = _gvn.transform( new BoolNode( chk, btest) ); 405 // Branch to failure if threshold exceeded 406 { BuildCutout unless(this, tst, PROB_ALWAYS); 407 uncommon_trap(Deoptimization::Reason_age, 408 Deoptimization::Action_maybe_recompile); 409 } 410 } 411 412 //----------------------increment_and_test_invocation_counter------------------- 413 void Parse::increment_and_test_invocation_counter(int limit) { 414 if (!count_invocations()) return; 415 416 // Get the Method* node. 417 ciMethod* m = method(); 418 MethodCounters* counters_adr = m->ensure_method_counters(); 419 if (counters_adr == NULL) { 420 C->record_failure("method counters allocation failed"); 421 return; 422 } 423 424 Node* ctrl = control(); 425 const TypePtr* adr_type = TypeRawPtr::make((address) counters_adr); 426 Node *counters_node = makecon(adr_type); 427 Node* adr_iic_node = basic_plus_adr(counters_node, counters_node, 428 MethodCounters::interpreter_invocation_counter_offset_in_bytes()); 429 Node* cnt = make_load(ctrl, adr_iic_node, TypeInt::INT, T_INT, adr_type, MemNode::unordered); 430 431 test_counter_against_threshold(cnt, limit); 432 433 // Add one to the counter and store 434 Node* incr = _gvn.transform(new AddINode(cnt, _gvn.intcon(1))); 435 store_to_memory(ctrl, adr_iic_node, incr, T_INT, adr_type, MemNode::unordered); 436 } 437 438 //----------------------------method_data_addressing--------------------------- 439 Node* Parse::method_data_addressing(ciMethodData* md, ciProfileData* data, ByteSize counter_offset, Node* idx, uint stride) { 440 // Get offset within MethodData* of the data array 441 ByteSize data_offset = MethodData::data_offset(); 442 443 // Get cell offset of the ProfileData within data array 444 int cell_offset = md->dp_to_di(data->dp()); 445 446 // Add in counter_offset, the # of bytes into the ProfileData of counter or flag 447 int offset = in_bytes(data_offset) + cell_offset + in_bytes(counter_offset); 448 449 const TypePtr* adr_type = TypeMetadataPtr::make(md); 450 Node* mdo = makecon(adr_type); 451 Node* ptr = basic_plus_adr(mdo, mdo, offset); 452 453 if (stride != 0) { 454 Node* str = _gvn.MakeConX(stride); 455 Node* scale = _gvn.transform( new MulXNode( idx, str ) ); 456 ptr = _gvn.transform( new AddPNode( mdo, ptr, scale ) ); 457 } 458 459 return ptr; 460 } 461 462 //--------------------------increment_md_counter_at---------------------------- 463 void Parse::increment_md_counter_at(ciMethodData* md, ciProfileData* data, ByteSize counter_offset, Node* idx, uint stride) { 464 Node* adr_node = method_data_addressing(md, data, counter_offset, idx, stride); 465 466 const TypePtr* adr_type = _gvn.type(adr_node)->is_ptr(); 467 Node* cnt = make_load(NULL, adr_node, TypeInt::INT, T_INT, adr_type, MemNode::unordered); 468 Node* incr = _gvn.transform(new AddINode(cnt, _gvn.intcon(DataLayout::counter_increment))); 469 store_to_memory(NULL, adr_node, incr, T_INT, adr_type, MemNode::unordered); 470 } 471 472 //--------------------------test_for_osr_md_counter_at------------------------- 473 void Parse::test_for_osr_md_counter_at(ciMethodData* md, ciProfileData* data, ByteSize counter_offset, int limit) { 474 Node* adr_node = method_data_addressing(md, data, counter_offset); 475 476 const TypePtr* adr_type = _gvn.type(adr_node)->is_ptr(); 477 Node* cnt = make_load(NULL, adr_node, TypeInt::INT, T_INT, adr_type, MemNode::unordered); 478 479 test_counter_against_threshold(cnt, limit); 480 } 481 482 //-------------------------------set_md_flag_at-------------------------------- 483 void Parse::set_md_flag_at(ciMethodData* md, ciProfileData* data, int flag_constant) { 484 Node* adr_node = method_data_addressing(md, data, DataLayout::flags_offset()); 485 486 const TypePtr* adr_type = _gvn.type(adr_node)->is_ptr(); 487 Node* flags = make_load(NULL, adr_node, TypeInt::BYTE, T_BYTE, adr_type, MemNode::unordered); 488 Node* incr = _gvn.transform(new OrINode(flags, _gvn.intcon(flag_constant))); 489 store_to_memory(NULL, adr_node, incr, T_BYTE, adr_type, MemNode::unordered); 490 } 491 492 //----------------------------profile_taken_branch----------------------------- 493 void Parse::profile_taken_branch(int target_bci, bool force_update) { 494 // This is a potential osr_site if we have a backedge. 495 int cur_bci = bci(); 496 bool osr_site = 497 (target_bci <= cur_bci) && count_invocations() && UseOnStackReplacement; 498 499 // If we are going to OSR, restart at the target bytecode. 500 set_bci(target_bci); 501 502 // To do: factor out the the limit calculations below. These duplicate 503 // the similar limit calculations in the interpreter. 504 505 if (method_data_update() || force_update) { 506 ciMethodData* md = method()->method_data(); 507 assert(md != NULL, "expected valid ciMethodData"); 508 ciProfileData* data = md->bci_to_data(cur_bci); 509 assert(data->is_JumpData(), "need JumpData for taken branch"); 510 increment_md_counter_at(md, data, JumpData::taken_offset()); 511 } 512 513 // In the new tiered system this is all we need to do. In the old 514 // (c2 based) tiered sytem we must do the code below. 515 #ifndef TIERED 516 if (method_data_update()) { 517 ciMethodData* md = method()->method_data(); 518 if (osr_site) { 519 ciProfileData* data = md->bci_to_data(cur_bci); 520 int limit = (CompileThreshold 521 * (OnStackReplacePercentage - InterpreterProfilePercentage)) / 100; 522 test_for_osr_md_counter_at(md, data, JumpData::taken_offset(), limit); 523 } 524 } else { 525 // With method data update off, use the invocation counter to trigger an 526 // OSR compilation, as done in the interpreter. 527 if (osr_site) { 528 int limit = (CompileThreshold * OnStackReplacePercentage) / 100; 529 increment_and_test_invocation_counter(limit); 530 } 531 } 532 #endif // TIERED 533 534 // Restore the original bytecode. 535 set_bci(cur_bci); 536 } 537 538 //--------------------------profile_not_taken_branch--------------------------- 539 void Parse::profile_not_taken_branch(bool force_update) { 540 541 if (method_data_update() || force_update) { 542 ciMethodData* md = method()->method_data(); 543 assert(md != NULL, "expected valid ciMethodData"); 544 ciProfileData* data = md->bci_to_data(bci()); 545 assert(data->is_BranchData(), "need BranchData for not taken branch"); 546 increment_md_counter_at(md, data, BranchData::not_taken_offset()); 547 } 548 549 } 550 551 //---------------------------------profile_call-------------------------------- 552 void Parse::profile_call(Node* receiver) { 553 if (!method_data_update()) return; 554 555 switch (bc()) { 556 case Bytecodes::_invokevirtual: 557 case Bytecodes::_invokeinterface: 558 profile_receiver_type(receiver); 559 break; 560 case Bytecodes::_invokestatic: 561 case Bytecodes::_invokedynamic: 562 case Bytecodes::_invokespecial: 563 profile_generic_call(); 564 break; 565 default: fatal("unexpected call bytecode"); 566 } 567 } 568 569 //------------------------------profile_generic_call--------------------------- 570 void Parse::profile_generic_call() { 571 assert(method_data_update(), "must be generating profile code"); 572 573 ciMethodData* md = method()->method_data(); 574 assert(md != NULL, "expected valid ciMethodData"); 575 ciProfileData* data = md->bci_to_data(bci()); 576 assert(data->is_CounterData(), "need CounterData for not taken branch"); 577 increment_md_counter_at(md, data, CounterData::count_offset()); 578 } 579 580 //-----------------------------profile_receiver_type--------------------------- 581 void Parse::profile_receiver_type(Node* receiver) { 582 assert(method_data_update(), "must be generating profile code"); 583 584 ciMethodData* md = method()->method_data(); 585 assert(md != NULL, "expected valid ciMethodData"); 586 ciProfileData* data = md->bci_to_data(bci()); 587 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData here"); 588 589 // Skip if we aren't tracking receivers 590 if (TypeProfileWidth < 1) { 591 increment_md_counter_at(md, data, CounterData::count_offset()); 592 return; 593 } 594 ciReceiverTypeData* rdata = (ciReceiverTypeData*)data->as_ReceiverTypeData(); 595 596 Node* method_data = method_data_addressing(md, rdata, in_ByteSize(0)); 597 598 // Using an adr_type of TypePtr::BOTTOM to work around anti-dep problems. 599 // A better solution might be to use TypeRawPtr::BOTTOM with RC_NARROW_MEM. 600 make_runtime_call(RC_LEAF, OptoRuntime::profile_receiver_type_Type(), 601 CAST_FROM_FN_PTR(address, 602 OptoRuntime::profile_receiver_type_C), 603 "profile_receiver_type_C", 604 TypePtr::BOTTOM, 605 method_data, receiver); 606 } 607 608 //---------------------------------profile_ret--------------------------------- 609 void Parse::profile_ret(int target_bci) { 610 if (!method_data_update()) return; 611 612 // Skip if we aren't tracking ret targets 613 if (TypeProfileWidth < 1) return; 614 615 ciMethodData* md = method()->method_data(); 616 assert(md != NULL, "expected valid ciMethodData"); 617 ciProfileData* data = md->bci_to_data(bci()); 618 assert(data->is_RetData(), "need RetData for ret"); 619 ciRetData* ret_data = (ciRetData*)data->as_RetData(); 620 621 // Look for the target_bci is already in the table 622 uint row; 623 bool table_full = true; 624 for (row = 0; row < ret_data->row_limit(); row++) { 625 int key = ret_data->bci(row); 626 table_full &= (key != RetData::no_bci); 627 if (key == target_bci) break; 628 } 629 630 if (row >= ret_data->row_limit()) { 631 // The target_bci was not found in the table. 632 if (!table_full) { 633 // XXX: Make slow call to update RetData 634 } 635 return; 636 } 637 638 // the target_bci is already in the table 639 increment_md_counter_at(md, data, RetData::bci_count_offset(row)); 640 } 641 642 //--------------------------profile_null_checkcast---------------------------- 643 void Parse::profile_null_checkcast() { 644 // Set the null-seen flag, done in conjunction with the usual null check. We 645 // never unset the flag, so this is a one-way switch. 646 if (!method_data_update()) return; 647 648 ciMethodData* md = method()->method_data(); 649 assert(md != NULL, "expected valid ciMethodData"); 650 ciProfileData* data = md->bci_to_data(bci()); 651 assert(data->is_BitData(), "need BitData for checkcast"); 652 set_md_flag_at(md, data, BitData::null_seen_byte_constant()); 653 } 654 655 //-----------------------------profile_switch_case----------------------------- 656 void Parse::profile_switch_case(int table_index) { 657 if (!method_data_update()) return; 658 659 ciMethodData* md = method()->method_data(); 660 assert(md != NULL, "expected valid ciMethodData"); 661 662 ciProfileData* data = md->bci_to_data(bci()); 663 assert(data->is_MultiBranchData(), "need MultiBranchData for switch case"); 664 if (table_index >= 0) { 665 increment_md_counter_at(md, data, MultiBranchData::case_count_offset(table_index)); 666 } else { 667 increment_md_counter_at(md, data, MultiBranchData::default_count_offset()); 668 } 669 }