1 /* 2 * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "ci/ciValueKlass.hpp" 27 #include "classfile/systemDictionary.hpp" 28 #include "compiler/compileLog.hpp" 29 #include "oops/objArrayKlass.hpp" 30 #include "oops/valueArrayKlass.hpp" 31 #include "opto/addnode.hpp" 32 #include "opto/castnode.hpp" 33 #include "opto/memnode.hpp" 34 #include "opto/mulnode.hpp" 35 #include "opto/parse.hpp" 36 #include "opto/rootnode.hpp" 37 #include "opto/runtime.hpp" 38 #include "opto/valuetypenode.hpp" 39 #include "runtime/sharedRuntime.hpp" 40 41 //------------------------------make_dtrace_method_entry_exit ---------------- 42 // Dtrace -- record entry or exit of a method if compiled with dtrace support 43 void GraphKit::make_dtrace_method_entry_exit(ciMethod* method, bool is_entry) { 44 const TypeFunc *call_type = OptoRuntime::dtrace_method_entry_exit_Type(); 45 address call_address = is_entry ? CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry) : 46 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit); 47 const char *call_name = is_entry ? "dtrace_method_entry" : "dtrace_method_exit"; 48 49 // Get base of thread-local storage area 50 Node* thread = _gvn.transform( new ThreadLocalNode() ); 51 52 // Get method 53 const TypePtr* method_type = TypeMetadataPtr::make(method); 54 Node *method_node = _gvn.transform(ConNode::make(method_type)); 55 56 kill_dead_locals(); 57 58 // For some reason, this call reads only raw memory. 59 const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM; 60 make_runtime_call(RC_LEAF | RC_NARROW_MEM, 61 call_type, call_address, 62 call_name, raw_adr_type, 63 thread, method_node); 64 } 65 66 67 //============================================================================= 68 //------------------------------do_checkcast----------------------------------- 69 void Parse::do_checkcast() { 70 bool will_link; 71 ciKlass* klass = iter().get_klass(will_link); 72 bool never_null = iter().is_klass_never_null(); 73 74 Node *obj = peek(); 75 76 // Throw uncommon trap if class is not loaded or the value we are casting 77 // _from_ is not loaded, and value is not null. If the value _is_ NULL, 78 // then the checkcast does nothing. 79 const TypeOopPtr *tp = _gvn.type(obj)->isa_oopptr(); 80 if (!will_link || (tp && tp->klass() && !tp->klass()->is_loaded())) { 81 assert(!never_null, "Null-free value type should be loaded"); 82 if (C->log() != NULL) { 83 if (!will_link) { 84 C->log()->elem("assert_null reason='checkcast' klass='%d'", 85 C->log()->identify(klass)); 86 } 87 if (tp && tp->klass() && !tp->klass()->is_loaded()) { 88 // %%% Cannot happen? 89 C->log()->elem("assert_null reason='checkcast source' klass='%d'", 90 C->log()->identify(tp->klass())); 91 } 92 } 93 null_assert(obj); 94 assert( stopped() || _gvn.type(peek())->higher_equal(TypePtr::NULL_PTR), "what's left behind is null" ); 95 if (!stopped()) { 96 profile_null_checkcast(); 97 } 98 return; 99 } 100 101 Node* res = gen_checkcast(obj, makecon(TypeKlassPtr::make(klass)), NULL, never_null); 102 if (stopped()) { 103 return; 104 } 105 106 // Pop from stack AFTER gen_checkcast because it can uncommon trap and 107 // the debug info has to be correct. 108 pop(); 109 push(res); 110 } 111 112 113 //------------------------------do_instanceof---------------------------------- 114 void Parse::do_instanceof() { 115 if (stopped()) return; 116 // We would like to return false if class is not loaded, emitting a 117 // dependency, but Java requires instanceof to load its operand. 118 119 // Throw uncommon trap if class is not loaded 120 bool will_link; 121 ciKlass* klass = iter().get_klass(will_link); 122 123 if (!will_link) { 124 if (C->log() != NULL) { 125 C->log()->elem("assert_null reason='instanceof' klass='%d'", 126 C->log()->identify(klass)); 127 } 128 null_assert(peek()); 129 assert( stopped() || _gvn.type(peek())->higher_equal(TypePtr::NULL_PTR), "what's left behind is null" ); 130 if (!stopped()) { 131 // The object is now known to be null. 132 // Shortcut the effect of gen_instanceof and return "false" directly. 133 pop(); // pop the null 134 push(_gvn.intcon(0)); // push false answer 135 } 136 return; 137 } 138 139 // Push the bool result back on stack 140 Node* res = gen_instanceof(peek(), makecon(TypeKlassPtr::make(klass)), true); 141 142 // Pop from stack AFTER gen_instanceof because it can uncommon trap. 143 pop(); 144 push(res); 145 } 146 147 //------------------------------array_store_check------------------------------ 148 // pull array from stack and check that the store is valid 149 Node* Parse::array_store_check() { 150 // Shorthand access to array store elements without popping them. 151 Node *obj = peek(0); 152 Node *idx = peek(1); 153 Node *ary = peek(2); 154 155 if (_gvn.type(obj) == TypePtr::NULL_PTR) { 156 // There's never a type check on null values. 157 // This cutout lets us avoid the uncommon_trap(Reason_array_check) 158 // below, which turns into a performance liability if the 159 // gen_checkcast folds up completely. 160 return obj; 161 } 162 163 // Extract the array klass type 164 Node* array_klass = load_object_klass(ary); 165 // Get the array klass 166 const TypeKlassPtr *tak = _gvn.type(array_klass)->is_klassptr(); 167 168 // The type of array_klass is usually INexact array-of-oop. Heroically 169 // cast array_klass to EXACT array and uncommon-trap if the cast fails. 170 // Make constant out of the inexact array klass, but use it only if the cast 171 // succeeds. 172 bool always_see_exact_class = false; 173 if (MonomorphicArrayCheck 174 && !tak->klass_is_exact()) { 175 // Regarding the fourth condition in the if-statement from above: 176 // 177 // If the compiler has determined that the type of array 'ary' (represented 178 // by 'array_klass') is java/lang/Object, the compiler must not assume that 179 // the array 'ary' is monomorphic. 180 // 181 // If 'ary' were of type java/lang/Object, this arraystore would have to fail, 182 // because it is not possible to perform a arraystore into an object that is not 183 // a "proper" array. 184 // 185 // Therefore, let's obtain at runtime the type of 'ary' and check if we can still 186 // successfully perform the store. 187 // 188 // The implementation reasons for the condition are the following: 189 // 190 // java/lang/Object is the superclass of all arrays, but it is represented by the VM 191 // as an InstanceKlass. The checks generated by gen_checkcast() (see below) expect 192 // 'array_klass' to be ObjArrayKlass, which can result in invalid memory accesses. 193 // 194 // See issue JDK-8057622 for details. 195 196 // (If no MDO at all, hope for the best, until a trap actually occurs.) 197 198 // Make a constant out of the inexact array klass 199 const TypeKlassPtr *extak = NULL; 200 const TypeOopPtr* ary_t = _gvn.type(ary)->is_oopptr(); 201 ciKlass* ary_spec = ary_t->speculative_type(); 202 Deoptimization::DeoptReason reason = Deoptimization::Reason_none; 203 // Try to cast the array to an exact type from profile data. First 204 // check the speculative type. 205 if (ary_spec != NULL && !too_many_traps(Deoptimization::Reason_speculate_class_check)) { 206 extak = TypeKlassPtr::make(ary_spec); 207 reason = Deoptimization::Reason_speculate_class_check; 208 } else if (UseArrayLoadStoreProfile) { 209 // No speculative type: check profile data at this bci. 210 reason = Deoptimization::Reason_class_check; 211 if (!too_many_traps(reason)) { 212 ciKlass* array_type = NULL; 213 ciKlass* element_type = NULL; 214 ProfilePtrKind element_ptr = ProfileMaybeNull; 215 bool flat_array = true; 216 bool null_free_array = true; 217 method()->array_access_profiled_type(bci(), array_type, element_type, element_ptr, flat_array, null_free_array); 218 if (array_type != NULL) { 219 extak = TypeKlassPtr::make(array_type); 220 } 221 } 222 } else if (!too_many_traps(Deoptimization::Reason_class_check) && tak != TypeKlassPtr::OBJECT) { 223 extak = tak->cast_to_exactness(true)->is_klassptr(); 224 } 225 if (extak != NULL) { 226 always_see_exact_class = true; 227 Node* con = makecon(extak); 228 Node* cmp = _gvn.transform(new CmpPNode( array_klass, con )); 229 Node* bol = _gvn.transform(new BoolNode( cmp, BoolTest::eq )); 230 Node* ctrl= control(); 231 { BuildCutout unless(this, bol, PROB_MAX); 232 uncommon_trap(reason, 233 Deoptimization::Action_maybe_recompile, 234 tak->klass()); 235 } 236 if (stopped()) { // MUST uncommon-trap? 237 set_control(ctrl); // Then Don't Do It, just fall into the normal checking 238 } else { // Cast array klass to exactness: 239 // Use the exact constant value we know it is. 240 replace_in_map(array_klass,con); 241 Node* cast = _gvn.transform(new CheckCastPPNode(control(), ary, extak->as_instance_type())); 242 replace_in_map(ary, cast); 243 244 CompileLog* log = C->log(); 245 if (log != NULL) { 246 log->elem("cast_up reason='monomorphic_array' from='%d' to='(exact)'", 247 log->identify(tak->klass())); 248 } 249 array_klass = con; // Use cast value moving forward 250 } 251 } 252 } 253 254 // Come here for polymorphic array klasses 255 256 // Extract the array element class 257 int element_klass_offset = in_bytes(ArrayKlass::element_klass_offset()); 258 259 Node *p2 = basic_plus_adr(array_klass, array_klass, element_klass_offset); 260 // We are allowed to use the constant type only if cast succeeded. If always_see_exact_class is true, 261 // we must set a control edge from the IfTrue node created by the uncommon_trap above to the 262 // LoadKlassNode. 263 Node* a_e_klass = _gvn.transform(LoadKlassNode::make(_gvn, always_see_exact_class ? control() : NULL, 264 immutable_memory(), p2, tak)); 265 266 // Handle value type arrays 267 const Type* elemtype = _gvn.type(ary)->is_aryptr()->elem(); 268 if (elemtype->isa_valuetype() != NULL || elemtype->is_valuetypeptr()) { 269 // We statically know that this is a value type array, use precise klass ptr 270 a_e_klass = makecon(TypeKlassPtr::make(elemtype->value_klass())); 271 } 272 273 // Check (the hard way) and throw if not a subklass. 274 return gen_checkcast(obj, a_e_klass); 275 } 276 277 278 //------------------------------do_new----------------------------------------- 279 void Parse::do_new() { 280 kill_dead_locals(); 281 282 bool will_link; 283 ciInstanceKlass* klass = iter().get_klass(will_link)->as_instance_klass(); 284 assert(will_link, "_new: typeflow responsibility"); 285 286 // Should throw an InstantiationError? 287 if (klass->is_abstract() || klass->is_interface() || 288 klass->name() == ciSymbol::java_lang_Class() || 289 iter().is_unresolved_klass()) { 290 uncommon_trap(Deoptimization::Reason_unhandled, 291 Deoptimization::Action_none, 292 klass); 293 return; 294 } 295 296 if (C->needs_clinit_barrier(klass, method())) { 297 clinit_barrier(klass, method()); 298 if (stopped()) return; 299 } 300 301 Node* kls = makecon(TypeKlassPtr::make(klass)); 302 Node* obj = new_instance(kls); 303 304 // Push resultant oop onto stack 305 push(obj); 306 307 // Keep track of whether opportunities exist for StringBuilder 308 // optimizations. 309 if (OptimizeStringConcat && 310 (klass == C->env()->StringBuilder_klass() || 311 klass == C->env()->StringBuffer_klass())) { 312 C->set_has_stringbuilder(true); 313 } 314 315 // Keep track of boxed values for EliminateAutoBox optimizations. 316 if (C->eliminate_boxing() && klass->is_box_klass()) { 317 C->set_has_boxed_value(true); 318 } 319 } 320 321 //------------------------------do_defaultvalue--------------------------------- 322 void Parse::do_defaultvalue() { 323 bool will_link; 324 ciValueKlass* vk = iter().get_klass(will_link)->as_value_klass(); 325 assert(will_link, "defaultvalue: typeflow responsibility"); 326 327 // Should throw an InstantiationError? 328 if (iter().is_unresolved_klass()) { 329 uncommon_trap(Deoptimization::Reason_unhandled, 330 Deoptimization::Action_none, 331 vk); 332 return; 333 } 334 335 if (C->needs_clinit_barrier(vk, method())) { 336 clinit_barrier(vk, method()); 337 if (stopped()) return; 338 } 339 340 ValueTypeNode* vt = ValueTypeNode::make_default(_gvn, vk); 341 if (vk->is_scalarizable()) { 342 push(vt); 343 } else { 344 push(vt->get_oop()); 345 } 346 } 347 348 //------------------------------do_withfield------------------------------------ 349 void Parse::do_withfield() { 350 bool will_link; 351 ciField* field = iter().get_field(will_link); 352 assert(will_link, "withfield: typeflow responsibility"); 353 BasicType bt = field->layout_type(); 354 Node* val = type2size[bt] == 1 ? pop() : pop_pair(); 355 ciValueKlass* holder_klass = field->holder()->as_value_klass(); 356 Node* holder = pop(); 357 358 if (!holder->is_ValueType()) { 359 // Null check and scalarize value type holder 360 inc_sp(2); 361 holder = null_check(holder); 362 dec_sp(2); 363 if (stopped()) return; 364 holder = ValueTypeNode::make_from_oop(this, holder, holder_klass); 365 } 366 if (!val->is_ValueType() && field->is_flattenable()) { 367 // Null check and scalarize value type field value 368 inc_sp(2); 369 val = null_check(val); 370 dec_sp(2); 371 if (stopped()) return; 372 val = ValueTypeNode::make_from_oop(this, val, gvn().type(val)->value_klass()); 373 } else if (val->is_ValueType() && !field->is_flattenable()) { 374 // Non-flattenable field should not be scalarized 375 // Re-execute withfield if buffering triggers deoptimization 376 PreserveReexecuteState preexecs(this); 377 jvms()->set_should_reexecute(true); 378 inc_sp(2); 379 val = ValueTypePtrNode::make_from_value_type(this, val->as_ValueType()); 380 } 381 382 // Clone the value type node and set the new field value 383 ValueTypeNode* new_vt = holder->clone()->as_ValueType(); 384 new_vt->set_oop(_gvn.zerocon(T_VALUETYPE)); 385 gvn().set_type(new_vt, new_vt->bottom_type()); 386 new_vt->set_field_value_by_offset(field->offset(), val); 387 388 if (holder_klass->is_scalarizable()) { 389 push(_gvn.transform(new_vt)); 390 } else { 391 // Re-execute withfield if buffering triggers deoptimization 392 PreserveReexecuteState preexecs(this); 393 jvms()->set_should_reexecute(true); 394 inc_sp(2); 395 push(new_vt->allocate(this)->get_oop()); 396 } 397 } 398 399 #ifndef PRODUCT 400 //------------------------------dump_map_adr_mem------------------------------- 401 // Debug dump of the mapping from address types to MergeMemNode indices. 402 void Parse::dump_map_adr_mem() const { 403 tty->print_cr("--- Mapping from address types to memory Nodes ---"); 404 MergeMemNode *mem = map() == NULL ? NULL : (map()->memory()->is_MergeMem() ? 405 map()->memory()->as_MergeMem() : NULL); 406 for (uint i = 0; i < (uint)C->num_alias_types(); i++) { 407 C->alias_type(i)->print_on(tty); 408 tty->print("\t"); 409 // Node mapping, if any 410 if (mem && i < mem->req() && mem->in(i) && mem->in(i) != mem->empty_memory()) { 411 mem->in(i)->dump(); 412 } else { 413 tty->cr(); 414 } 415 } 416 } 417 418 #endif 419 420 421 //============================================================================= 422 // 423 // parser methods for profiling 424 425 426 //----------------------test_counter_against_threshold ------------------------ 427 void Parse::test_counter_against_threshold(Node* cnt, int limit) { 428 // Test the counter against the limit and uncommon trap if greater. 429 430 // This code is largely copied from the range check code in 431 // array_addressing() 432 433 // Test invocation count vs threshold 434 Node *threshold = makecon(TypeInt::make(limit)); 435 Node *chk = _gvn.transform( new CmpUNode( cnt, threshold) ); 436 BoolTest::mask btest = BoolTest::lt; 437 Node *tst = _gvn.transform( new BoolNode( chk, btest) ); 438 // Branch to failure if threshold exceeded 439 { BuildCutout unless(this, tst, PROB_ALWAYS); 440 uncommon_trap(Deoptimization::Reason_age, 441 Deoptimization::Action_maybe_recompile); 442 } 443 } 444 445 //----------------------increment_and_test_invocation_counter------------------- 446 void Parse::increment_and_test_invocation_counter(int limit) { 447 if (!count_invocations()) return; 448 449 // Get the Method* node. 450 ciMethod* m = method(); 451 MethodCounters* counters_adr = m->ensure_method_counters(); 452 if (counters_adr == NULL) { 453 C->record_failure("method counters allocation failed"); 454 return; 455 } 456 457 Node* ctrl = control(); 458 const TypePtr* adr_type = TypeRawPtr::make((address) counters_adr); 459 Node *counters_node = makecon(adr_type); 460 Node* adr_iic_node = basic_plus_adr(counters_node, counters_node, 461 MethodCounters::interpreter_invocation_counter_offset_in_bytes()); 462 Node* cnt = make_load(ctrl, adr_iic_node, TypeInt::INT, T_INT, adr_type, MemNode::unordered); 463 464 test_counter_against_threshold(cnt, limit); 465 466 // Add one to the counter and store 467 Node* incr = _gvn.transform(new AddINode(cnt, _gvn.intcon(1))); 468 store_to_memory(ctrl, adr_iic_node, incr, T_INT, adr_type, MemNode::unordered); 469 } 470 471 //----------------------------method_data_addressing--------------------------- 472 Node* Parse::method_data_addressing(ciMethodData* md, ciProfileData* data, ByteSize counter_offset, Node* idx, uint stride) { 473 // Get offset within MethodData* of the data array 474 ByteSize data_offset = MethodData::data_offset(); 475 476 // Get cell offset of the ProfileData within data array 477 int cell_offset = md->dp_to_di(data->dp()); 478 479 // Add in counter_offset, the # of bytes into the ProfileData of counter or flag 480 int offset = in_bytes(data_offset) + cell_offset + in_bytes(counter_offset); 481 482 const TypePtr* adr_type = TypeMetadataPtr::make(md); 483 Node* mdo = makecon(adr_type); 484 Node* ptr = basic_plus_adr(mdo, mdo, offset); 485 486 if (stride != 0) { 487 Node* str = _gvn.MakeConX(stride); 488 Node* scale = _gvn.transform( new MulXNode( idx, str ) ); 489 ptr = _gvn.transform( new AddPNode( mdo, ptr, scale ) ); 490 } 491 492 return ptr; 493 } 494 495 //--------------------------increment_md_counter_at---------------------------- 496 void Parse::increment_md_counter_at(ciMethodData* md, ciProfileData* data, ByteSize counter_offset, Node* idx, uint stride) { 497 Node* adr_node = method_data_addressing(md, data, counter_offset, idx, stride); 498 499 const TypePtr* adr_type = _gvn.type(adr_node)->is_ptr(); 500 Node* cnt = make_load(NULL, adr_node, TypeInt::INT, T_INT, adr_type, MemNode::unordered); 501 Node* incr = _gvn.transform(new AddINode(cnt, _gvn.intcon(DataLayout::counter_increment))); 502 store_to_memory(NULL, adr_node, incr, T_INT, adr_type, MemNode::unordered); 503 } 504 505 //--------------------------test_for_osr_md_counter_at------------------------- 506 void Parse::test_for_osr_md_counter_at(ciMethodData* md, ciProfileData* data, ByteSize counter_offset, int limit) { 507 Node* adr_node = method_data_addressing(md, data, counter_offset); 508 509 const TypePtr* adr_type = _gvn.type(adr_node)->is_ptr(); 510 Node* cnt = make_load(NULL, adr_node, TypeInt::INT, T_INT, adr_type, MemNode::unordered); 511 512 test_counter_against_threshold(cnt, limit); 513 } 514 515 //-------------------------------set_md_flag_at-------------------------------- 516 void Parse::set_md_flag_at(ciMethodData* md, ciProfileData* data, int flag_constant) { 517 Node* adr_node = method_data_addressing(md, data, DataLayout::flags_offset()); 518 519 const TypePtr* adr_type = _gvn.type(adr_node)->is_ptr(); 520 Node* flags = make_load(NULL, adr_node, TypeInt::INT, T_INT, adr_type, MemNode::unordered); 521 Node* incr = _gvn.transform(new OrINode(flags, _gvn.intcon(flag_constant))); 522 store_to_memory(NULL, adr_node, incr, T_INT, adr_type, MemNode::unordered); 523 } 524 525 //----------------------------profile_taken_branch----------------------------- 526 void Parse::profile_taken_branch(int target_bci, bool force_update) { 527 // This is a potential osr_site if we have a backedge. 528 int cur_bci = bci(); 529 bool osr_site = 530 (target_bci <= cur_bci) && count_invocations() && UseOnStackReplacement; 531 532 // If we are going to OSR, restart at the target bytecode. 533 set_bci(target_bci); 534 535 // To do: factor out the the limit calculations below. These duplicate 536 // the similar limit calculations in the interpreter. 537 538 if (method_data_update() || force_update) { 539 ciMethodData* md = method()->method_data(); 540 assert(md != NULL, "expected valid ciMethodData"); 541 ciProfileData* data = md->bci_to_data(cur_bci); 542 assert(data != NULL && data->is_JumpData(), "need JumpData for taken branch"); 543 increment_md_counter_at(md, data, JumpData::taken_offset()); 544 } 545 546 // In the new tiered system this is all we need to do. In the old 547 // (c2 based) tiered sytem we must do the code below. 548 #ifndef TIERED 549 if (method_data_update()) { 550 ciMethodData* md = method()->method_data(); 551 if (osr_site) { 552 ciProfileData* data = md->bci_to_data(cur_bci); 553 assert(data != NULL && data->is_JumpData(), "need JumpData for taken branch"); 554 int limit = (int)((int64_t)CompileThreshold 555 * (OnStackReplacePercentage - InterpreterProfilePercentage) / 100); 556 test_for_osr_md_counter_at(md, data, JumpData::taken_offset(), limit); 557 } 558 } else { 559 // With method data update off, use the invocation counter to trigger an 560 // OSR compilation, as done in the interpreter. 561 if (osr_site) { 562 int limit = (int)((int64_t)CompileThreshold * OnStackReplacePercentage / 100); 563 increment_and_test_invocation_counter(limit); 564 } 565 } 566 #endif // TIERED 567 568 // Restore the original bytecode. 569 set_bci(cur_bci); 570 } 571 572 //--------------------------profile_not_taken_branch--------------------------- 573 void Parse::profile_not_taken_branch(bool force_update) { 574 575 if (method_data_update() || force_update) { 576 ciMethodData* md = method()->method_data(); 577 assert(md != NULL, "expected valid ciMethodData"); 578 ciProfileData* data = md->bci_to_data(bci()); 579 assert(data != NULL && data->is_BranchData(), "need BranchData for not taken branch"); 580 increment_md_counter_at(md, data, BranchData::not_taken_offset()); 581 } 582 583 } 584 585 //---------------------------------profile_call-------------------------------- 586 void Parse::profile_call(Node* receiver) { 587 if (!method_data_update()) return; 588 589 switch (bc()) { 590 case Bytecodes::_invokevirtual: 591 case Bytecodes::_invokeinterface: 592 profile_receiver_type(receiver); 593 break; 594 case Bytecodes::_invokestatic: 595 case Bytecodes::_invokedynamic: 596 case Bytecodes::_invokespecial: 597 profile_generic_call(); 598 break; 599 default: fatal("unexpected call bytecode"); 600 } 601 } 602 603 //------------------------------profile_generic_call--------------------------- 604 void Parse::profile_generic_call() { 605 assert(method_data_update(), "must be generating profile code"); 606 607 ciMethodData* md = method()->method_data(); 608 assert(md != NULL, "expected valid ciMethodData"); 609 ciProfileData* data = md->bci_to_data(bci()); 610 assert(data != NULL && data->is_CounterData(), "need CounterData for not taken branch"); 611 increment_md_counter_at(md, data, CounterData::count_offset()); 612 } 613 614 //-----------------------------profile_receiver_type--------------------------- 615 void Parse::profile_receiver_type(Node* receiver) { 616 assert(method_data_update(), "must be generating profile code"); 617 618 ciMethodData* md = method()->method_data(); 619 assert(md != NULL, "expected valid ciMethodData"); 620 ciProfileData* data = md->bci_to_data(bci()); 621 assert(data != NULL && data->is_ReceiverTypeData(), "need ReceiverTypeData here"); 622 623 // Skip if we aren't tracking receivers 624 if (TypeProfileWidth < 1) { 625 increment_md_counter_at(md, data, CounterData::count_offset()); 626 return; 627 } 628 ciReceiverTypeData* rdata = (ciReceiverTypeData*)data->as_ReceiverTypeData(); 629 630 Node* method_data = method_data_addressing(md, rdata, in_ByteSize(0)); 631 632 // Using an adr_type of TypePtr::BOTTOM to work around anti-dep problems. 633 // A better solution might be to use TypeRawPtr::BOTTOM with RC_NARROW_MEM. 634 make_runtime_call(RC_LEAF, OptoRuntime::profile_receiver_type_Type(), 635 CAST_FROM_FN_PTR(address, 636 OptoRuntime::profile_receiver_type_C), 637 "profile_receiver_type_C", 638 TypePtr::BOTTOM, 639 method_data, receiver); 640 } 641 642 //---------------------------------profile_ret--------------------------------- 643 void Parse::profile_ret(int target_bci) { 644 if (!method_data_update()) return; 645 646 // Skip if we aren't tracking ret targets 647 if (TypeProfileWidth < 1) return; 648 649 ciMethodData* md = method()->method_data(); 650 assert(md != NULL, "expected valid ciMethodData"); 651 ciProfileData* data = md->bci_to_data(bci()); 652 assert(data != NULL && data->is_RetData(), "need RetData for ret"); 653 ciRetData* ret_data = (ciRetData*)data->as_RetData(); 654 655 // Look for the target_bci is already in the table 656 uint row; 657 bool table_full = true; 658 for (row = 0; row < ret_data->row_limit(); row++) { 659 int key = ret_data->bci(row); 660 table_full &= (key != RetData::no_bci); 661 if (key == target_bci) break; 662 } 663 664 if (row >= ret_data->row_limit()) { 665 // The target_bci was not found in the table. 666 if (!table_full) { 667 // XXX: Make slow call to update RetData 668 } 669 return; 670 } 671 672 // the target_bci is already in the table 673 increment_md_counter_at(md, data, RetData::bci_count_offset(row)); 674 } 675 676 //--------------------------profile_null_checkcast---------------------------- 677 void Parse::profile_null_checkcast() { 678 // Set the null-seen flag, done in conjunction with the usual null check. We 679 // never unset the flag, so this is a one-way switch. 680 if (!method_data_update()) return; 681 682 ciMethodData* md = method()->method_data(); 683 assert(md != NULL, "expected valid ciMethodData"); 684 ciProfileData* data = md->bci_to_data(bci()); 685 assert(data != NULL && data->is_BitData(), "need BitData for checkcast"); 686 set_md_flag_at(md, data, BitData::null_seen_byte_constant()); 687 } 688 689 //-----------------------------profile_switch_case----------------------------- 690 void Parse::profile_switch_case(int table_index) { 691 if (!method_data_update()) return; 692 693 ciMethodData* md = method()->method_data(); 694 assert(md != NULL, "expected valid ciMethodData"); 695 696 ciProfileData* data = md->bci_to_data(bci()); 697 assert(data != NULL && data->is_MultiBranchData(), "need MultiBranchData for switch case"); 698 if (table_index >= 0) { 699 increment_md_counter_at(md, data, MultiBranchData::case_count_offset(table_index)); 700 } else { 701 increment_md_counter_at(md, data, MultiBranchData::default_count_offset()); 702 } 703 }