1 /* 2 * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "ci/ciValueKlass.hpp" 27 #include "classfile/systemDictionary.hpp" 28 #include "compiler/compileLog.hpp" 29 #include "oops/objArrayKlass.hpp" 30 #include "oops/valueArrayKlass.hpp" 31 #include "opto/addnode.hpp" 32 #include "opto/memnode.hpp" 33 #include "opto/mulnode.hpp" 34 #include "opto/parse.hpp" 35 #include "opto/rootnode.hpp" 36 #include "opto/runtime.hpp" 37 #include "opto/valuetypenode.hpp" 38 #include "runtime/sharedRuntime.hpp" 39 40 //------------------------------make_dtrace_method_entry_exit ---------------- 41 // Dtrace -- record entry or exit of a method if compiled with dtrace support 42 void GraphKit::make_dtrace_method_entry_exit(ciMethod* method, bool is_entry) { 43 const TypeFunc *call_type = OptoRuntime::dtrace_method_entry_exit_Type(); 44 address call_address = is_entry ? CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry) : 45 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit); 46 const char *call_name = is_entry ? "dtrace_method_entry" : "dtrace_method_exit"; 47 48 // Get base of thread-local storage area 49 Node* thread = _gvn.transform( new ThreadLocalNode() ); 50 51 // Get method 52 const TypePtr* method_type = TypeMetadataPtr::make(method); 53 Node *method_node = _gvn.transform(ConNode::make(method_type)); 54 55 kill_dead_locals(); 56 57 // For some reason, this call reads only raw memory. 58 const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM; 59 make_runtime_call(RC_LEAF | RC_NARROW_MEM, 60 call_type, call_address, 61 call_name, raw_adr_type, 62 thread, method_node); 63 } 64 65 66 //============================================================================= 67 //------------------------------do_checkcast----------------------------------- 68 void Parse::do_checkcast() { 69 bool will_link; 70 ciKlass* klass = iter().get_klass(will_link); 71 bool never_null = iter().get_never_null(); 72 73 Node *obj = peek(); 74 75 // Throw uncommon trap if class is not loaded or the value we are casting 76 // _from_ is not loaded, and value is not null. If the value _is_ NULL, 77 // then the checkcast does nothing. 78 const TypeOopPtr *tp = _gvn.type(obj)->isa_oopptr(); 79 if (!will_link || (tp && tp->klass() && !tp->klass()->is_loaded())) { 80 if (C->log() != NULL) { 81 if (!will_link) { 82 C->log()->elem("assert_null reason='checkcast' klass='%d'", 83 C->log()->identify(klass)); 84 } 85 if (tp && tp->klass() && !tp->klass()->is_loaded()) { 86 // %%% Cannot happen? 87 C->log()->elem("assert_null reason='checkcast source' klass='%d'", 88 C->log()->identify(tp->klass())); 89 } 90 } 91 null_assert(obj); 92 assert( stopped() || _gvn.type(peek())->higher_equal(TypePtr::NULL_PTR), "what's left behind is null" ); 93 if (!stopped()) { 94 profile_null_checkcast(); 95 } 96 return; 97 } 98 99 Node* res = gen_checkcast(obj, makecon(TypeKlassPtr::make(klass)), NULL, never_null); 100 if (stopped()) { 101 return; 102 } 103 104 // Pop from stack AFTER gen_checkcast because it can uncommon trap and 105 // the debug info has to be correct. 106 pop(); 107 push(res); 108 } 109 110 111 //------------------------------do_instanceof---------------------------------- 112 void Parse::do_instanceof() { 113 if (stopped()) return; 114 // We would like to return false if class is not loaded, emitting a 115 // dependency, but Java requires instanceof to load its operand. 116 117 // Throw uncommon trap if class is not loaded 118 bool will_link; 119 ciKlass* klass = iter().get_klass(will_link); 120 121 if (!will_link) { 122 if (C->log() != NULL) { 123 C->log()->elem("assert_null reason='instanceof' klass='%d'", 124 C->log()->identify(klass)); 125 } 126 null_assert(peek()); 127 assert( stopped() || _gvn.type(peek())->higher_equal(TypePtr::NULL_PTR), "what's left behind is null" ); 128 if (!stopped()) { 129 // The object is now known to be null. 130 // Shortcut the effect of gen_instanceof and return "false" directly. 131 pop(); // pop the null 132 push(_gvn.intcon(0)); // push false answer 133 } 134 return; 135 } 136 137 // Push the bool result back on stack 138 Node* res = gen_instanceof(peek(), makecon(TypeKlassPtr::make(klass)), true); 139 140 // Pop from stack AFTER gen_instanceof because it can uncommon trap. 141 pop(); 142 push(res); 143 } 144 145 //------------------------------array_store_check------------------------------ 146 // pull array from stack and check that the store is valid 147 Node* Parse::array_store_check() { 148 // Shorthand access to array store elements without popping them. 149 Node *obj = peek(0); 150 Node *idx = peek(1); 151 Node *ary = peek(2); 152 153 const TypeAryPtr* ary_t = _gvn.type(ary)->is_aryptr(); 154 const Type* elemtype = ary_t->elem(); 155 const TypeOopPtr* elemptr = elemtype->make_oopptr(); 156 bool is_value_array = elemtype->isa_valuetype() != NULL || (elemptr != NULL && elemptr->is_valuetypeptr()); 157 158 if (_gvn.type(obj) == TypePtr::NULL_PTR) { 159 // There's never a type check on null values. 160 // This cutout lets us avoid the uncommon_trap(Reason_array_check) 161 // below, which turns into a performance liability if the 162 // gen_checkcast folds up completely. 163 return obj; 164 } 165 166 // Extract the array klass type 167 Node* array_klass = load_object_klass(ary); 168 // Get the array klass 169 const TypeKlassPtr *tak = _gvn.type(array_klass)->is_klassptr(); 170 171 // The type of array_klass is usually INexact array-of-oop. Heroically 172 // cast array_klass to EXACT array and uncommon-trap if the cast fails. 173 // Make constant out of the inexact array klass, but use it only if the cast 174 // succeeds. 175 bool always_see_exact_class = false; 176 if (MonomorphicArrayCheck 177 && !too_many_traps(Deoptimization::Reason_array_check) 178 && !tak->klass_is_exact() 179 && tak != TypeKlassPtr::OBJECT) { 180 // Regarding the fourth condition in the if-statement from above: 181 // 182 // If the compiler has determined that the type of array 'ary' (represented 183 // by 'array_klass') is java/lang/Object, the compiler must not assume that 184 // the array 'ary' is monomorphic. 185 // 186 // If 'ary' were of type java/lang/Object, this arraystore would have to fail, 187 // because it is not possible to perform a arraystore into an object that is not 188 // a "proper" array. 189 // 190 // Therefore, let's obtain at runtime the type of 'ary' and check if we can still 191 // successfully perform the store. 192 // 193 // The implementation reasons for the condition are the following: 194 // 195 // java/lang/Object is the superclass of all arrays, but it is represented by the VM 196 // as an InstanceKlass. The checks generated by gen_checkcast() (see below) expect 197 // 'array_klass' to be ObjArrayKlass, which can result in invalid memory accesses. 198 // 199 // See issue JDK-8057622 for details. 200 201 always_see_exact_class = true; 202 // (If no MDO at all, hope for the best, until a trap actually occurs.) 203 204 // Make a constant out of the inexact array klass 205 const TypeKlassPtr *extak = tak->cast_to_exactness(true)->is_klassptr(); 206 Node* con = makecon(extak); 207 Node* cmp = _gvn.transform(new CmpPNode( array_klass, con )); 208 Node* bol = _gvn.transform(new BoolNode( cmp, BoolTest::eq )); 209 Node* ctrl= control(); 210 { BuildCutout unless(this, bol, PROB_MAX); 211 uncommon_trap(Deoptimization::Reason_array_check, 212 Deoptimization::Action_maybe_recompile, 213 tak->klass()); 214 } 215 if (stopped()) { // MUST uncommon-trap? 216 set_control(ctrl); // Then Don't Do It, just fall into the normal checking 217 } else { // Cast array klass to exactness: 218 // Use the exact constant value we know it is. 219 replace_in_map(array_klass,con); 220 CompileLog* log = C->log(); 221 if (log != NULL) { 222 log->elem("cast_up reason='monomorphic_array' from='%d' to='(exact)'", 223 log->identify(tak->klass())); 224 } 225 array_klass = con; // Use cast value moving forward 226 } 227 } 228 229 // Come here for polymorphic array klasses 230 231 // Extract the array element class 232 int element_klass_offset = in_bytes(ArrayKlass::element_klass_offset()); 233 234 Node *p2 = basic_plus_adr(array_klass, array_klass, element_klass_offset); 235 // We are allowed to use the constant type only if cast succeeded. If always_see_exact_class is true, 236 // we must set a control edge from the IfTrue node created by the uncommon_trap above to the 237 // LoadKlassNode. 238 Node* a_e_klass = _gvn.transform(LoadKlassNode::make(_gvn, always_see_exact_class ? control() : NULL, 239 immutable_memory(), p2, tak)); 240 241 // Handle value type arrays 242 if (is_value_array) { 243 // We statically know that this is a value type array, use precise klass ptr 244 ciValueKlass* vk = elemtype->isa_valuetype() ? elemtype->is_valuetype()->value_klass() : 245 elemptr->value_klass(); 246 a_e_klass = makecon(TypeKlassPtr::make(vk)); 247 } 248 249 // Check (the hard way) and throw if not a subklass. 250 return gen_checkcast(obj, a_e_klass); 251 } 252 253 254 void Parse::emit_guard_for_new(ciInstanceKlass* klass) { 255 if ((!klass->is_initialized() && !klass->is_being_initialized()) || 256 klass->is_abstract() || klass->is_interface() || 257 klass->name() == ciSymbol::java_lang_Class() || 258 iter().is_unresolved_klass()) { 259 uncommon_trap(Deoptimization::Reason_uninitialized, 260 Deoptimization::Action_reinterpret, 261 klass); 262 } if (klass->is_being_initialized()) { 263 // Emit guarded new 264 // if (klass->_init_thread != current_thread || 265 // klass->_init_state != being_initialized) 266 // uncommon_trap 267 Node* cur_thread = _gvn.transform( new ThreadLocalNode() ); 268 Node* merge = new RegionNode(3); 269 _gvn.set_type(merge, Type::CONTROL); 270 Node* kls = makecon(TypeKlassPtr::make(klass)); 271 272 Node* init_thread_offset = _gvn.MakeConX(in_bytes(InstanceKlass::init_thread_offset())); 273 Node* adr_node = basic_plus_adr(kls, kls, init_thread_offset); 274 Node* init_thread = make_load(NULL, adr_node, TypeRawPtr::BOTTOM, T_ADDRESS, MemNode::unordered); 275 Node *tst = Bool( CmpP( init_thread, cur_thread), BoolTest::eq); 276 IfNode* iff = create_and_map_if(control(), tst, PROB_ALWAYS, COUNT_UNKNOWN); 277 set_control(IfTrue(iff)); 278 merge->set_req(1, IfFalse(iff)); 279 280 Node* init_state_offset = _gvn.MakeConX(in_bytes(InstanceKlass::init_state_offset())); 281 adr_node = basic_plus_adr(kls, kls, init_state_offset); 282 // Use T_BOOLEAN for InstanceKlass::_init_state so the compiler 283 // can generate code to load it as unsigned byte. 284 Node* init_state = make_load(NULL, adr_node, TypeInt::UBYTE, T_BOOLEAN, MemNode::unordered); 285 Node* being_init = _gvn.intcon(InstanceKlass::being_initialized); 286 tst = Bool( CmpI( init_state, being_init), BoolTest::eq); 287 iff = create_and_map_if(control(), tst, PROB_ALWAYS, COUNT_UNKNOWN); 288 set_control(IfTrue(iff)); 289 merge->set_req(2, IfFalse(iff)); 290 291 PreserveJVMState pjvms(this); 292 record_for_igvn(merge); 293 set_control(merge); 294 295 uncommon_trap(Deoptimization::Reason_uninitialized, 296 Deoptimization::Action_reinterpret, 297 klass); 298 } 299 } 300 301 302 //------------------------------do_new----------------------------------------- 303 void Parse::do_new() { 304 kill_dead_locals(); 305 306 bool will_link; 307 ciInstanceKlass* klass = iter().get_klass(will_link)->as_instance_klass(); 308 assert(will_link, "_new: typeflow responsibility"); 309 310 // Should initialize, or throw an InstantiationError? 311 emit_guard_for_new(klass); 312 if (stopped()) return; 313 314 Node* kls = makecon(TypeKlassPtr::make(klass)); 315 Node* obj = new_instance(kls); 316 317 // Push resultant oop onto stack 318 push(obj); 319 320 // Keep track of whether opportunities exist for StringBuilder 321 // optimizations. 322 if (OptimizeStringConcat && 323 (klass == C->env()->StringBuilder_klass() || 324 klass == C->env()->StringBuffer_klass())) { 325 C->set_has_stringbuilder(true); 326 } 327 328 // Keep track of boxed values for EliminateAutoBox optimizations. 329 if (C->eliminate_boxing() && klass->is_box_klass()) { 330 C->set_has_boxed_value(true); 331 } 332 } 333 334 //------------------------------do_defaultvalue--------------------------------- 335 void Parse::do_defaultvalue() { 336 bool will_link; 337 ciValueKlass* vk = iter().get_klass(will_link)->as_value_klass(); 338 assert(will_link, "defaultvalue: typeflow responsibility"); 339 340 // Should initialize, or throw an InstantiationError? 341 emit_guard_for_new(vk); 342 if (stopped()) return; 343 344 // Always scalarize default value because it's not NULL by definition 345 push(ValueTypeNode::make_default(_gvn, vk)); 346 } 347 348 //------------------------------do_withfield------------------------------------ 349 void Parse::do_withfield() { 350 bool will_link; 351 ciField* field = iter().get_field(will_link); 352 assert(will_link, "withfield: typeflow responsibility"); 353 BasicType bt = field->layout_type(); 354 Node* val = type2size[bt] == 1 ? pop() : pop_pair(); 355 ciValueKlass* holder_klass = field->holder()->as_value_klass(); 356 Node* holder = pop(); 357 358 if (!holder->is_ValueType()) { 359 // Null check and scalarize value type holder 360 inc_sp(2); 361 holder = null_check(holder); 362 dec_sp(2); 363 if (stopped()) return; 364 holder = ValueTypeNode::make_from_oop(this, holder, holder_klass); 365 } 366 if (!val->is_ValueType() && field->is_flattenable()) { 367 // Null check and scalarize value type field value 368 inc_sp(2); 369 val = null_check(val); 370 dec_sp(2); 371 if (stopped()) return; 372 val = ValueTypeNode::make_from_oop(this, val, gvn().type(val)->value_klass()); 373 } else if (val->is_ValueType() && !field->is_flattenable()) { 374 // Non-flattenable field should not be scalarized 375 val = ValueTypePtrNode::make_from_value_type(this, val->as_ValueType()); 376 } 377 378 // Clone the value type node and set the new field value 379 ValueTypeNode* new_vt = holder->clone()->as_ValueType(); 380 new_vt->set_oop(_gvn.zerocon(T_VALUETYPE)); 381 gvn().set_type(new_vt, new_vt->bottom_type()); 382 new_vt->set_field_value_by_offset(field->offset(), val); 383 384 if (holder_klass->is_scalarizable()) { 385 push(_gvn.transform(new_vt)); 386 } else { 387 push(new_vt->allocate(this)->get_oop()); 388 } 389 } 390 391 #ifndef PRODUCT 392 //------------------------------dump_map_adr_mem------------------------------- 393 // Debug dump of the mapping from address types to MergeMemNode indices. 394 void Parse::dump_map_adr_mem() const { 395 tty->print_cr("--- Mapping from address types to memory Nodes ---"); 396 MergeMemNode *mem = map() == NULL ? NULL : (map()->memory()->is_MergeMem() ? 397 map()->memory()->as_MergeMem() : NULL); 398 for (uint i = 0; i < (uint)C->num_alias_types(); i++) { 399 C->alias_type(i)->print_on(tty); 400 tty->print("\t"); 401 // Node mapping, if any 402 if (mem && i < mem->req() && mem->in(i) && mem->in(i) != mem->empty_memory()) { 403 mem->in(i)->dump(); 404 } else { 405 tty->cr(); 406 } 407 } 408 } 409 410 #endif 411 412 413 //============================================================================= 414 // 415 // parser methods for profiling 416 417 418 //----------------------test_counter_against_threshold ------------------------ 419 void Parse::test_counter_against_threshold(Node* cnt, int limit) { 420 // Test the counter against the limit and uncommon trap if greater. 421 422 // This code is largely copied from the range check code in 423 // array_addressing() 424 425 // Test invocation count vs threshold 426 Node *threshold = makecon(TypeInt::make(limit)); 427 Node *chk = _gvn.transform( new CmpUNode( cnt, threshold) ); 428 BoolTest::mask btest = BoolTest::lt; 429 Node *tst = _gvn.transform( new BoolNode( chk, btest) ); 430 // Branch to failure if threshold exceeded 431 { BuildCutout unless(this, tst, PROB_ALWAYS); 432 uncommon_trap(Deoptimization::Reason_age, 433 Deoptimization::Action_maybe_recompile); 434 } 435 } 436 437 //----------------------increment_and_test_invocation_counter------------------- 438 void Parse::increment_and_test_invocation_counter(int limit) { 439 if (!count_invocations()) return; 440 441 // Get the Method* node. 442 ciMethod* m = method(); 443 MethodCounters* counters_adr = m->ensure_method_counters(); 444 if (counters_adr == NULL) { 445 C->record_failure("method counters allocation failed"); 446 return; 447 } 448 449 Node* ctrl = control(); 450 const TypePtr* adr_type = TypeRawPtr::make((address) counters_adr); 451 Node *counters_node = makecon(adr_type); 452 Node* adr_iic_node = basic_plus_adr(counters_node, counters_node, 453 MethodCounters::interpreter_invocation_counter_offset_in_bytes()); 454 Node* cnt = make_load(ctrl, adr_iic_node, TypeInt::INT, T_INT, adr_type, MemNode::unordered); 455 456 test_counter_against_threshold(cnt, limit); 457 458 // Add one to the counter and store 459 Node* incr = _gvn.transform(new AddINode(cnt, _gvn.intcon(1))); 460 store_to_memory(ctrl, adr_iic_node, incr, T_INT, adr_type, MemNode::unordered); 461 } 462 463 //----------------------------method_data_addressing--------------------------- 464 Node* Parse::method_data_addressing(ciMethodData* md, ciProfileData* data, ByteSize counter_offset, Node* idx, uint stride) { 465 // Get offset within MethodData* of the data array 466 ByteSize data_offset = MethodData::data_offset(); 467 468 // Get cell offset of the ProfileData within data array 469 int cell_offset = md->dp_to_di(data->dp()); 470 471 // Add in counter_offset, the # of bytes into the ProfileData of counter or flag 472 int offset = in_bytes(data_offset) + cell_offset + in_bytes(counter_offset); 473 474 const TypePtr* adr_type = TypeMetadataPtr::make(md); 475 Node* mdo = makecon(adr_type); 476 Node* ptr = basic_plus_adr(mdo, mdo, offset); 477 478 if (stride != 0) { 479 Node* str = _gvn.MakeConX(stride); 480 Node* scale = _gvn.transform( new MulXNode( idx, str ) ); 481 ptr = _gvn.transform( new AddPNode( mdo, ptr, scale ) ); 482 } 483 484 return ptr; 485 } 486 487 //--------------------------increment_md_counter_at---------------------------- 488 void Parse::increment_md_counter_at(ciMethodData* md, ciProfileData* data, ByteSize counter_offset, Node* idx, uint stride) { 489 Node* adr_node = method_data_addressing(md, data, counter_offset, idx, stride); 490 491 const TypePtr* adr_type = _gvn.type(adr_node)->is_ptr(); 492 Node* cnt = make_load(NULL, adr_node, TypeInt::INT, T_INT, adr_type, MemNode::unordered); 493 Node* incr = _gvn.transform(new AddINode(cnt, _gvn.intcon(DataLayout::counter_increment))); 494 store_to_memory(NULL, adr_node, incr, T_INT, adr_type, MemNode::unordered); 495 } 496 497 //--------------------------test_for_osr_md_counter_at------------------------- 498 void Parse::test_for_osr_md_counter_at(ciMethodData* md, ciProfileData* data, ByteSize counter_offset, int limit) { 499 Node* adr_node = method_data_addressing(md, data, counter_offset); 500 501 const TypePtr* adr_type = _gvn.type(adr_node)->is_ptr(); 502 Node* cnt = make_load(NULL, adr_node, TypeInt::INT, T_INT, adr_type, MemNode::unordered); 503 504 test_counter_against_threshold(cnt, limit); 505 } 506 507 //-------------------------------set_md_flag_at-------------------------------- 508 void Parse::set_md_flag_at(ciMethodData* md, ciProfileData* data, int flag_constant) { 509 Node* adr_node = method_data_addressing(md, data, DataLayout::flags_offset()); 510 511 const TypePtr* adr_type = _gvn.type(adr_node)->is_ptr(); 512 Node* flags = make_load(NULL, adr_node, TypeInt::INT, T_INT, adr_type, MemNode::unordered); 513 Node* incr = _gvn.transform(new OrINode(flags, _gvn.intcon(flag_constant))); 514 store_to_memory(NULL, adr_node, incr, T_INT, adr_type, MemNode::unordered); 515 } 516 517 //----------------------------profile_taken_branch----------------------------- 518 void Parse::profile_taken_branch(int target_bci, bool force_update) { 519 // This is a potential osr_site if we have a backedge. 520 int cur_bci = bci(); 521 bool osr_site = 522 (target_bci <= cur_bci) && count_invocations() && UseOnStackReplacement; 523 524 // If we are going to OSR, restart at the target bytecode. 525 set_bci(target_bci); 526 527 // To do: factor out the the limit calculations below. These duplicate 528 // the similar limit calculations in the interpreter. 529 530 if (method_data_update() || force_update) { 531 ciMethodData* md = method()->method_data(); 532 assert(md != NULL, "expected valid ciMethodData"); 533 ciProfileData* data = md->bci_to_data(cur_bci); 534 assert(data != NULL && data->is_JumpData(), "need JumpData for taken branch"); 535 increment_md_counter_at(md, data, JumpData::taken_offset()); 536 } 537 538 // In the new tiered system this is all we need to do. In the old 539 // (c2 based) tiered sytem we must do the code below. 540 #ifndef TIERED 541 if (method_data_update()) { 542 ciMethodData* md = method()->method_data(); 543 if (osr_site) { 544 ciProfileData* data = md->bci_to_data(cur_bci); 545 assert(data != NULL && data->is_JumpData(), "need JumpData for taken branch"); 546 int limit = (CompileThreshold 547 * (OnStackReplacePercentage - InterpreterProfilePercentage)) / 100; 548 test_for_osr_md_counter_at(md, data, JumpData::taken_offset(), limit); 549 } 550 } else { 551 // With method data update off, use the invocation counter to trigger an 552 // OSR compilation, as done in the interpreter. 553 if (osr_site) { 554 int limit = (CompileThreshold * OnStackReplacePercentage) / 100; 555 increment_and_test_invocation_counter(limit); 556 } 557 } 558 #endif // TIERED 559 560 // Restore the original bytecode. 561 set_bci(cur_bci); 562 } 563 564 //--------------------------profile_not_taken_branch--------------------------- 565 void Parse::profile_not_taken_branch(bool force_update) { 566 567 if (method_data_update() || force_update) { 568 ciMethodData* md = method()->method_data(); 569 assert(md != NULL, "expected valid ciMethodData"); 570 ciProfileData* data = md->bci_to_data(bci()); 571 assert(data != NULL && data->is_BranchData(), "need BranchData for not taken branch"); 572 increment_md_counter_at(md, data, BranchData::not_taken_offset()); 573 } 574 575 } 576 577 //---------------------------------profile_call-------------------------------- 578 void Parse::profile_call(Node* receiver) { 579 if (!method_data_update()) return; 580 581 switch (bc()) { 582 case Bytecodes::_invokevirtual: 583 case Bytecodes::_invokeinterface: 584 profile_receiver_type(receiver); 585 break; 586 case Bytecodes::_invokestatic: 587 case Bytecodes::_invokedynamic: 588 case Bytecodes::_invokespecial: 589 profile_generic_call(); 590 break; 591 default: fatal("unexpected call bytecode"); 592 } 593 } 594 595 //------------------------------profile_generic_call--------------------------- 596 void Parse::profile_generic_call() { 597 assert(method_data_update(), "must be generating profile code"); 598 599 ciMethodData* md = method()->method_data(); 600 assert(md != NULL, "expected valid ciMethodData"); 601 ciProfileData* data = md->bci_to_data(bci()); 602 assert(data != NULL && data->is_CounterData(), "need CounterData for not taken branch"); 603 increment_md_counter_at(md, data, CounterData::count_offset()); 604 } 605 606 //-----------------------------profile_receiver_type--------------------------- 607 void Parse::profile_receiver_type(Node* receiver) { 608 assert(method_data_update(), "must be generating profile code"); 609 610 ciMethodData* md = method()->method_data(); 611 assert(md != NULL, "expected valid ciMethodData"); 612 ciProfileData* data = md->bci_to_data(bci()); 613 assert(data != NULL && data->is_ReceiverTypeData(), "need ReceiverTypeData here"); 614 615 // Skip if we aren't tracking receivers 616 if (TypeProfileWidth < 1) { 617 increment_md_counter_at(md, data, CounterData::count_offset()); 618 return; 619 } 620 ciReceiverTypeData* rdata = (ciReceiverTypeData*)data->as_ReceiverTypeData(); 621 622 Node* method_data = method_data_addressing(md, rdata, in_ByteSize(0)); 623 624 // Using an adr_type of TypePtr::BOTTOM to work around anti-dep problems. 625 // A better solution might be to use TypeRawPtr::BOTTOM with RC_NARROW_MEM. 626 make_runtime_call(RC_LEAF, OptoRuntime::profile_receiver_type_Type(), 627 CAST_FROM_FN_PTR(address, 628 OptoRuntime::profile_receiver_type_C), 629 "profile_receiver_type_C", 630 TypePtr::BOTTOM, 631 method_data, receiver); 632 } 633 634 //---------------------------------profile_ret--------------------------------- 635 void Parse::profile_ret(int target_bci) { 636 if (!method_data_update()) return; 637 638 // Skip if we aren't tracking ret targets 639 if (TypeProfileWidth < 1) return; 640 641 ciMethodData* md = method()->method_data(); 642 assert(md != NULL, "expected valid ciMethodData"); 643 ciProfileData* data = md->bci_to_data(bci()); 644 assert(data != NULL && data->is_RetData(), "need RetData for ret"); 645 ciRetData* ret_data = (ciRetData*)data->as_RetData(); 646 647 // Look for the target_bci is already in the table 648 uint row; 649 bool table_full = true; 650 for (row = 0; row < ret_data->row_limit(); row++) { 651 int key = ret_data->bci(row); 652 table_full &= (key != RetData::no_bci); 653 if (key == target_bci) break; 654 } 655 656 if (row >= ret_data->row_limit()) { 657 // The target_bci was not found in the table. 658 if (!table_full) { 659 // XXX: Make slow call to update RetData 660 } 661 return; 662 } 663 664 // the target_bci is already in the table 665 increment_md_counter_at(md, data, RetData::bci_count_offset(row)); 666 } 667 668 //--------------------------profile_null_checkcast---------------------------- 669 void Parse::profile_null_checkcast() { 670 // Set the null-seen flag, done in conjunction with the usual null check. We 671 // never unset the flag, so this is a one-way switch. 672 if (!method_data_update()) return; 673 674 ciMethodData* md = method()->method_data(); 675 assert(md != NULL, "expected valid ciMethodData"); 676 ciProfileData* data = md->bci_to_data(bci()); 677 assert(data != NULL && data->is_BitData(), "need BitData for checkcast"); 678 set_md_flag_at(md, data, BitData::null_seen_byte_constant()); 679 } 680 681 //-----------------------------profile_switch_case----------------------------- 682 void Parse::profile_switch_case(int table_index) { 683 if (!method_data_update()) return; 684 685 ciMethodData* md = method()->method_data(); 686 assert(md != NULL, "expected valid ciMethodData"); 687 688 ciProfileData* data = md->bci_to_data(bci()); 689 assert(data != NULL && data->is_MultiBranchData(), "need MultiBranchData for switch case"); 690 if (table_index >= 0) { 691 increment_md_counter_at(md, data, MultiBranchData::case_count_offset(table_index)); 692 } else { 693 increment_md_counter_at(md, data, MultiBranchData::default_count_offset()); 694 } 695 }