1 /* 2 * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, 20 * CA 95054 USA or visit www.sun.com if you need additional information or 21 * have any questions. 22 * 23 */ 24 25 #include "incls/_precompiled.incl" 26 #include "incls/_parse1.cpp.incl" 27 28 // Static array so we can figure out which bytecodes stop us from compiling 29 // the most. Some of the non-static variables are needed in bytecodeInfo.cpp 30 // and eventually should be encapsulated in a proper class (gri 8/18/98). 31 32 int nodes_created = 0; 33 int methods_parsed = 0; 34 int methods_seen = 0; 35 int blocks_parsed = 0; 36 int blocks_seen = 0; 37 38 int explicit_null_checks_inserted = 0; 39 int explicit_null_checks_elided = 0; 40 int all_null_checks_found = 0, implicit_null_checks = 0; 41 int implicit_null_throws = 0; 42 43 int reclaim_idx = 0; 44 int reclaim_in = 0; 45 int reclaim_node = 0; 46 47 #ifndef PRODUCT 48 bool Parse::BytecodeParseHistogram::_initialized = false; 49 uint Parse::BytecodeParseHistogram::_bytecodes_parsed [Bytecodes::number_of_codes]; 50 uint Parse::BytecodeParseHistogram::_nodes_constructed[Bytecodes::number_of_codes]; 51 uint Parse::BytecodeParseHistogram::_nodes_transformed[Bytecodes::number_of_codes]; 52 uint Parse::BytecodeParseHistogram::_new_values [Bytecodes::number_of_codes]; 53 #endif 54 55 //------------------------------print_statistics------------------------------- 56 #ifndef PRODUCT 57 void Parse::print_statistics() { 58 tty->print_cr("--- Compiler Statistics ---"); 59 tty->print("Methods seen: %d Methods parsed: %d", methods_seen, methods_parsed); 60 tty->print(" Nodes created: %d", nodes_created); 61 tty->cr(); 62 if (methods_seen != methods_parsed) 63 tty->print_cr("Reasons for parse failures (NOT cumulative):"); 64 tty->print_cr("Blocks parsed: %d Blocks seen: %d", blocks_parsed, blocks_seen); 65 66 if( explicit_null_checks_inserted ) 67 tty->print_cr("%d original NULL checks - %d elided (%2d%%); optimizer leaves %d,", explicit_null_checks_inserted, explicit_null_checks_elided, (100*explicit_null_checks_elided)/explicit_null_checks_inserted, all_null_checks_found); 68 if( all_null_checks_found ) 69 tty->print_cr("%d made implicit (%2d%%)", implicit_null_checks, 70 (100*implicit_null_checks)/all_null_checks_found); 71 if( implicit_null_throws ) 72 tty->print_cr("%d implicit null exceptions at runtime", 73 implicit_null_throws); 74 75 if( PrintParseStatistics && BytecodeParseHistogram::initialized() ) { 76 BytecodeParseHistogram::print(); 77 } 78 } 79 #endif 80 81 //------------------------------ON STACK REPLACEMENT--------------------------- 82 83 // Construct a node which can be used to get incoming state for 84 // on stack replacement. 85 Node *Parse::fetch_interpreter_state(int index, 86 BasicType bt, 87 Node *local_addrs, 88 Node *local_addrs_base) { 89 Node *mem = memory(Compile::AliasIdxRaw); 90 Node *adr = basic_plus_adr( local_addrs_base, local_addrs, -index*wordSize ); 91 92 // Very similar to LoadNode::make, except we handle un-aligned longs and 93 // doubles on Sparc. Intel can handle them just fine directly. 94 Node *l; 95 switch( bt ) { // Signature is flattened 96 case T_INT: l = new (C, 3) LoadINode( 0, mem, adr, TypeRawPtr::BOTTOM ); break; 97 case T_FLOAT: l = new (C, 3) LoadFNode( 0, mem, adr, TypeRawPtr::BOTTOM ); break; 98 case T_ADDRESS: l = new (C, 3) LoadPNode( 0, mem, adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM ); break; 99 case T_OBJECT: l = new (C, 3) LoadPNode( 0, mem, adr, TypeRawPtr::BOTTOM, TypeInstPtr::BOTTOM ); break; 100 case T_LONG: 101 case T_DOUBLE: { 102 // Since arguments are in reverse order, the argument address 'adr' 103 // refers to the back half of the long/double. Recompute adr. 104 adr = basic_plus_adr( local_addrs_base, local_addrs, -(index+1)*wordSize ); 105 if( Matcher::misaligned_doubles_ok ) { 106 l = (bt == T_DOUBLE) 107 ? (Node*)new (C, 3) LoadDNode( 0, mem, adr, TypeRawPtr::BOTTOM ) 108 : (Node*)new (C, 3) LoadLNode( 0, mem, adr, TypeRawPtr::BOTTOM ); 109 } else { 110 l = (bt == T_DOUBLE) 111 ? (Node*)new (C, 3) LoadD_unalignedNode( 0, mem, adr, TypeRawPtr::BOTTOM ) 112 : (Node*)new (C, 3) LoadL_unalignedNode( 0, mem, adr, TypeRawPtr::BOTTOM ); 113 } 114 break; 115 } 116 default: ShouldNotReachHere(); 117 } 118 return _gvn.transform(l); 119 } 120 121 // Helper routine to prevent the interpreter from handing 122 // unexpected typestate to an OSR method. 123 // The Node l is a value newly dug out of the interpreter frame. 124 // The type is the type predicted by ciTypeFlow. Note that it is 125 // not a general type, but can only come from Type::get_typeflow_type. 126 // The safepoint is a map which will feed an uncommon trap. 127 Node* Parse::check_interpreter_type(Node* l, const Type* type, 128 SafePointNode* &bad_type_exit) { 129 130 const TypeOopPtr* tp = type->isa_oopptr(); 131 132 // TypeFlow may assert null-ness if a type appears unloaded. 133 if (type == TypePtr::NULL_PTR || 134 (tp != NULL && !tp->klass()->is_loaded())) { 135 // Value must be null, not a real oop. 136 Node* chk = _gvn.transform( new (C, 3) CmpPNode(l, null()) ); 137 Node* tst = _gvn.transform( new (C, 2) BoolNode(chk, BoolTest::eq) ); 138 IfNode* iff = create_and_map_if(control(), tst, PROB_MAX, COUNT_UNKNOWN); 139 set_control(_gvn.transform( new (C, 1) IfTrueNode(iff) )); 140 Node* bad_type = _gvn.transform( new (C, 1) IfFalseNode(iff) ); 141 bad_type_exit->control()->add_req(bad_type); 142 l = null(); 143 } 144 145 // Typeflow can also cut off paths from the CFG, based on 146 // types which appear unloaded, or call sites which appear unlinked. 147 // When paths are cut off, values at later merge points can rise 148 // toward more specific classes. Make sure these specific classes 149 // are still in effect. 150 if (tp != NULL && tp->klass() != C->env()->Object_klass()) { 151 // TypeFlow asserted a specific object type. Value must have that type. 152 Node* bad_type_ctrl = NULL; 153 l = gen_checkcast(l, makecon(TypeKlassPtr::make(tp->klass())), &bad_type_ctrl); 154 bad_type_exit->control()->add_req(bad_type_ctrl); 155 } 156 157 BasicType bt_l = _gvn.type(l)->basic_type(); 158 BasicType bt_t = type->basic_type(); 159 assert(_gvn.type(l)->higher_equal(type), "must constrain OSR typestate"); 160 return l; 161 } 162 163 // Helper routine which sets up elements of the initial parser map when 164 // performing a parse for on stack replacement. Add values into map. 165 // The only parameter contains the address of a interpreter arguments. 166 void Parse::load_interpreter_state(Node* osr_buf) { 167 int index; 168 int max_locals = jvms()->loc_size(); 169 int max_stack = jvms()->stk_size(); 170 171 172 // Mismatch between method and jvms can occur since map briefly held 173 // an OSR entry state (which takes up one RawPtr word). 174 assert(max_locals == method()->max_locals(), "sanity"); 175 assert(max_stack >= method()->max_stack(), "sanity"); 176 assert((int)jvms()->endoff() == TypeFunc::Parms + max_locals + max_stack, "sanity"); 177 assert((int)jvms()->endoff() == (int)map()->req(), "sanity"); 178 179 // Find the start block. 180 Block* osr_block = start_block(); 181 assert(osr_block->start() == osr_bci(), "sanity"); 182 183 // Set initial BCI. 184 set_parse_bci(osr_block->start()); 185 186 // Set initial stack depth. 187 set_sp(osr_block->start_sp()); 188 189 // Check bailouts. We currently do not perform on stack replacement 190 // of loops in catch blocks or loops which branch with a non-empty stack. 191 if (sp() != 0) { 192 C->record_method_not_compilable("OSR starts with non-empty stack"); 193 return; 194 } 195 // Do not OSR inside finally clauses: 196 if (osr_block->has_trap_at(osr_block->start())) { 197 C->record_method_not_compilable("OSR starts with an immediate trap"); 198 return; 199 } 200 201 // Commute monitors from interpreter frame to compiler frame. 202 assert(jvms()->monitor_depth() == 0, "should be no active locks at beginning of osr"); 203 int mcnt = osr_block->flow()->monitor_count(); 204 Node *monitors_addr = basic_plus_adr(osr_buf, osr_buf, (max_locals+mcnt*2-1)*wordSize); 205 for (index = 0; index < mcnt; index++) { 206 // Make a BoxLockNode for the monitor. 207 Node *box = _gvn.transform(new (C, 1) BoxLockNode(next_monitor())); 208 209 210 // Displaced headers and locked objects are interleaved in the 211 // temp OSR buffer. We only copy the locked objects out here. 212 // Fetch the locked object from the OSR temp buffer and copy to our fastlock node. 213 Node *lock_object = fetch_interpreter_state(index*2, T_OBJECT, monitors_addr, osr_buf); 214 // Try and copy the displaced header to the BoxNode 215 Node *displaced_hdr = fetch_interpreter_state((index*2) + 1, T_ADDRESS, monitors_addr, osr_buf); 216 217 218 store_to_memory(control(), box, displaced_hdr, T_ADDRESS, Compile::AliasIdxRaw); 219 220 // Build a bogus FastLockNode (no code will be generated) and push the 221 // monitor into our debug info. 222 const FastLockNode *flock = _gvn.transform(new (C, 3) FastLockNode( 0, lock_object, box ))->as_FastLock(); 223 map()->push_monitor(flock); 224 225 // If the lock is our method synchronization lock, tuck it away in 226 // _sync_lock for return and rethrow exit paths. 227 if (index == 0 && method()->is_synchronized()) { 228 _synch_lock = flock; 229 } 230 } 231 232 // Use the raw liveness computation to make sure that unexpected 233 // values don't propagate into the OSR frame. 234 MethodLivenessResult live_locals = method()->liveness_at_bci(osr_bci()); 235 if (!live_locals.is_valid()) { 236 // Degenerate or breakpointed method. 237 C->record_method_not_compilable("OSR in empty or breakpointed method"); 238 return; 239 } 240 MethodLivenessResult raw_live_locals = method()->raw_liveness_at_bci(osr_bci()); 241 242 // Extract the needed locals from the interpreter frame. 243 Node *locals_addr = basic_plus_adr(osr_buf, osr_buf, (max_locals-1)*wordSize); 244 245 // find all the locals that the interpreter thinks contain live oops 246 const BitMap live_oops = method()->live_local_oops_at_bci(osr_bci()); 247 for (index = 0; index < max_locals; index++) { 248 249 if (!live_locals.at(index)) { 250 continue; 251 } 252 253 const Type *type = osr_block->local_type_at(index); 254 255 if (type->isa_oopptr() != NULL) { 256 257 // 6403625: Verify that the interpreter oopMap thinks that the oop is live 258 // else we might load a stale oop if the MethodLiveness disagrees with the 259 // result of the interpreter. If the interpreter says it is dead we agree 260 // by making the value go to top. 261 // 262 263 if (!live_oops.at(index)) { 264 if (C->log() != NULL) { 265 C->log()->elem("OSR_mismatch local_index='%d'",index); 266 } 267 set_local(index, null()); 268 // and ignore it for the loads 269 continue; 270 } 271 } 272 273 // Filter out TOP, HALF, and BOTTOM. (Cf. ensure_phi.) 274 if (type == Type::TOP || type == Type::HALF) { 275 continue; 276 } 277 // If the type falls to bottom, then this must be a local that 278 // is mixing ints and oops or some such. Forcing it to top 279 // makes it go dead. 280 if (type == Type::BOTTOM) { 281 continue; 282 } 283 // Construct code to access the appropriate local. 284 Node *value = fetch_interpreter_state(index, type->basic_type(), locals_addr, osr_buf); 285 set_local(index, value); 286 } 287 288 // Extract the needed stack entries from the interpreter frame. 289 for (index = 0; index < sp(); index++) { 290 const Type *type = osr_block->stack_type_at(index); 291 if (type != Type::TOP) { 292 // Currently the compiler bails out when attempting to on stack replace 293 // at a bci with a non-empty stack. We should not reach here. 294 ShouldNotReachHere(); 295 } 296 } 297 298 // End the OSR migration 299 make_runtime_call(RC_LEAF, OptoRuntime::osr_end_Type(), 300 CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_end), 301 "OSR_migration_end", TypeRawPtr::BOTTOM, 302 osr_buf); 303 304 // Now that the interpreter state is loaded, make sure it will match 305 // at execution time what the compiler is expecting now: 306 SafePointNode* bad_type_exit = clone_map(); 307 bad_type_exit->set_control(new (C, 1) RegionNode(1)); 308 309 for (index = 0; index < max_locals; index++) { 310 if (stopped()) break; 311 Node* l = local(index); 312 if (l->is_top()) continue; // nothing here 313 const Type *type = osr_block->local_type_at(index); 314 if (type->isa_oopptr() != NULL) { 315 if (!live_oops.at(index)) { 316 // skip type check for dead oops 317 continue; 318 } 319 } 320 if (type->basic_type() == T_ADDRESS && !raw_live_locals.at(index)) { 321 // Skip type check for dead address locals 322 continue; 323 } 324 set_local(index, check_interpreter_type(l, type, bad_type_exit)); 325 } 326 327 for (index = 0; index < sp(); index++) { 328 if (stopped()) break; 329 Node* l = stack(index); 330 if (l->is_top()) continue; // nothing here 331 const Type *type = osr_block->stack_type_at(index); 332 set_stack(index, check_interpreter_type(l, type, bad_type_exit)); 333 } 334 335 if (bad_type_exit->control()->req() > 1) { 336 // Build an uncommon trap here, if any inputs can be unexpected. 337 bad_type_exit->set_control(_gvn.transform( bad_type_exit->control() )); 338 record_for_igvn(bad_type_exit->control()); 339 SafePointNode* types_are_good = map(); 340 set_map(bad_type_exit); 341 // The unexpected type happens because a new edge is active 342 // in the CFG, which typeflow had previously ignored. 343 // E.g., Object x = coldAtFirst() && notReached()? "str": new Integer(123). 344 // This x will be typed as Integer if notReached is not yet linked. 345 uncommon_trap(Deoptimization::Reason_unreached, 346 Deoptimization::Action_reinterpret); 347 set_map(types_are_good); 348 } 349 } 350 351 //------------------------------Parse------------------------------------------ 352 // Main parser constructor. 353 Parse::Parse(JVMState* caller, ciMethod* parse_method, float expected_uses) 354 : _exits(caller) 355 { 356 // Init some variables 357 _caller = caller; 358 _method = parse_method; 359 _expected_uses = expected_uses; 360 _depth = 1 + (caller->has_method() ? caller->depth() : 0); 361 _wrote_final = false; 362 _entry_bci = InvocationEntryBci; 363 _tf = NULL; 364 _block = NULL; 365 debug_only(_block_count = -1); 366 debug_only(_blocks = (Block*)-1); 367 #ifndef PRODUCT 368 if (PrintCompilation || PrintOpto) { 369 // Make sure I have an inline tree, so I can print messages about it. 370 JVMState* ilt_caller = is_osr_parse() ? caller->caller() : caller; 371 InlineTree::find_subtree_from_root(C->ilt(), ilt_caller, parse_method, true); 372 } 373 _max_switch_depth = 0; 374 _est_switch_depth = 0; 375 #endif 376 377 _tf = TypeFunc::make(method()); 378 _iter.reset_to_method(method()); 379 _flow = method()->get_flow_analysis(); 380 if (_flow->failing()) { 381 C->record_method_not_compilable_all_tiers(_flow->failure_reason()); 382 } 383 384 #ifndef PRODUCT 385 if (_flow->has_irreducible_entry()) { 386 C->set_parsed_irreducible_loop(true); 387 } 388 #endif 389 390 if (_expected_uses <= 0) { 391 _prof_factor = 1; 392 } else { 393 float prof_total = parse_method->interpreter_invocation_count(); 394 if (prof_total <= _expected_uses) { 395 _prof_factor = 1; 396 } else { 397 _prof_factor = _expected_uses / prof_total; 398 } 399 } 400 401 CompileLog* log = C->log(); 402 if (log != NULL) { 403 log->begin_head("parse method='%d' uses='%g'", 404 log->identify(parse_method), expected_uses); 405 if (depth() == 1 && C->is_osr_compilation()) { 406 log->print(" osr_bci='%d'", C->entry_bci()); 407 } 408 log->stamp(); 409 log->end_head(); 410 } 411 412 // Accumulate deoptimization counts. 413 // (The range_check and store_check counts are checked elsewhere.) 414 ciMethodData* md = method()->method_data(); 415 for (uint reason = 0; reason < md->trap_reason_limit(); reason++) { 416 uint md_count = md->trap_count(reason); 417 if (md_count != 0) { 418 if (md_count == md->trap_count_limit()) 419 md_count += md->overflow_trap_count(); 420 uint total_count = C->trap_count(reason); 421 uint old_count = total_count; 422 total_count += md_count; 423 // Saturate the add if it overflows. 424 if (total_count < old_count || total_count < md_count) 425 total_count = (uint)-1; 426 C->set_trap_count(reason, total_count); 427 if (log != NULL) 428 log->elem("observe trap='%s' count='%d' total='%d'", 429 Deoptimization::trap_reason_name(reason), 430 md_count, total_count); 431 } 432 } 433 // Accumulate total sum of decompilations, also. 434 C->set_decompile_count(C->decompile_count() + md->decompile_count()); 435 436 _count_invocations = C->do_count_invocations(); 437 _method_data_update = C->do_method_data_update(); 438 439 if (log != NULL && method()->has_exception_handlers()) { 440 log->elem("observe that='has_exception_handlers'"); 441 } 442 443 assert(method()->can_be_compiled(), "Can not parse this method, cutout earlier"); 444 assert(method()->has_balanced_monitors(), "Can not parse unbalanced monitors, cutout earlier"); 445 446 // Always register dependence if JVMTI is enabled, because 447 // either breakpoint setting or hotswapping of methods may 448 // cause deoptimization. 449 if (C->env()->jvmti_can_hotswap_or_post_breakpoint()) { 450 C->dependencies()->assert_evol_method(method()); 451 } 452 453 methods_seen++; 454 455 // Do some special top-level things. 456 if (depth() == 1 && C->is_osr_compilation()) { 457 _entry_bci = C->entry_bci(); 458 _flow = method()->get_osr_flow_analysis(osr_bci()); 459 if (_flow->failing()) { 460 C->record_method_not_compilable(_flow->failure_reason()); 461 #ifndef PRODUCT 462 if (PrintOpto && (Verbose || WizardMode)) { 463 tty->print_cr("OSR @%d type flow bailout: %s", _entry_bci, _flow->failure_reason()); 464 if (Verbose) { 465 method()->print_oop(); 466 method()->print_codes(); 467 _flow->print(); 468 } 469 } 470 #endif 471 } 472 _tf = C->tf(); // the OSR entry type is different 473 } 474 475 #ifdef ASSERT 476 if (depth() == 1) { 477 assert(C->is_osr_compilation() == this->is_osr_parse(), "OSR in sync"); 478 if (C->tf() != tf()) { 479 MutexLockerEx ml(Compile_lock, Mutex::_no_safepoint_check_flag); 480 assert(C->env()->system_dictionary_modification_counter_changed(), 481 "Must invalidate if TypeFuncs differ"); 482 } 483 } else { 484 assert(!this->is_osr_parse(), "no recursive OSR"); 485 } 486 #endif 487 488 methods_parsed++; 489 #ifndef PRODUCT 490 // add method size here to guarantee that inlined methods are added too 491 if (TimeCompiler) 492 _total_bytes_compiled += method()->code_size(); 493 494 show_parse_info(); 495 #endif 496 497 if (failing()) { 498 if (log) log->done("parse"); 499 return; 500 } 501 502 gvn().set_type(root(), root()->bottom_type()); 503 gvn().transform(top()); 504 505 // Import the results of the ciTypeFlow. 506 init_blocks(); 507 508 // Merge point for all normal exits 509 build_exits(); 510 511 // Setup the initial JVM state map. 512 SafePointNode* entry_map = create_entry_map(); 513 514 // Check for bailouts during map initialization 515 if (failing() || entry_map == NULL) { 516 if (log) log->done("parse"); 517 return; 518 } 519 520 Node_Notes* caller_nn = C->default_node_notes(); 521 // Collect debug info for inlined calls unless -XX:-DebugInlinedCalls. 522 if (DebugInlinedCalls || depth() == 1) { 523 C->set_default_node_notes(make_node_notes(caller_nn)); 524 } 525 526 if (is_osr_parse()) { 527 Node* osr_buf = entry_map->in(TypeFunc::Parms+0); 528 entry_map->set_req(TypeFunc::Parms+0, top()); 529 set_map(entry_map); 530 load_interpreter_state(osr_buf); 531 } else { 532 set_map(entry_map); 533 do_method_entry(); 534 } 535 536 // Check for bailouts during method entry. 537 if (failing()) { 538 if (log) log->done("parse"); 539 C->set_default_node_notes(caller_nn); 540 return; 541 } 542 543 entry_map = map(); // capture any changes performed by method setup code 544 assert(jvms()->endoff() == map()->req(), "map matches JVMS layout"); 545 546 // We begin parsing as if we have just encountered a jump to the 547 // method entry. 548 Block* entry_block = start_block(); 549 assert(entry_block->start() == (is_osr_parse() ? osr_bci() : 0), ""); 550 set_map_clone(entry_map); 551 merge_common(entry_block, entry_block->next_path_num()); 552 553 #ifndef PRODUCT 554 BytecodeParseHistogram *parse_histogram_obj = new (C->env()->arena()) BytecodeParseHistogram(this, C); 555 set_parse_histogram( parse_histogram_obj ); 556 #endif 557 558 // Parse all the basic blocks. 559 do_all_blocks(); 560 561 C->set_default_node_notes(caller_nn); 562 563 // Check for bailouts during conversion to graph 564 if (failing()) { 565 if (log) log->done("parse"); 566 return; 567 } 568 569 // Fix up all exiting control flow. 570 set_map(entry_map); 571 do_exits(); 572 573 if (log) log->done("parse nodes='%d' memory='%d'", 574 C->unique(), C->node_arena()->used()); 575 } 576 577 //---------------------------do_all_blocks------------------------------------- 578 void Parse::do_all_blocks() { 579 bool has_irreducible = flow()->has_irreducible_entry(); 580 581 // Walk over all blocks in Reverse Post-Order. 582 while (true) { 583 bool progress = false; 584 for (int rpo = 0; rpo < block_count(); rpo++) { 585 Block* block = rpo_at(rpo); 586 587 if (block->is_parsed()) continue; 588 589 if (!block->is_merged()) { 590 // Dead block, no state reaches this block 591 continue; 592 } 593 594 // Prepare to parse this block. 595 load_state_from(block); 596 597 if (stopped()) { 598 // Block is dead. 599 continue; 600 } 601 602 blocks_parsed++; 603 604 progress = true; 605 if (block->is_loop_head() || block->is_handler() || has_irreducible && !block->is_ready()) { 606 // Not all preds have been parsed. We must build phis everywhere. 607 // (Note that dead locals do not get phis built, ever.) 608 ensure_phis_everywhere(); 609 610 // Leave behind an undisturbed copy of the map, for future merges. 611 set_map(clone_map()); 612 } 613 614 if (control()->is_Region() && !block->is_loop_head() && !has_irreducible && !block->is_handler()) { 615 // In the absence of irreducible loops, the Region and Phis 616 // associated with a merge that doesn't involve a backedge can 617 // be simplified now since the RPO parsing order guarantees 618 // that any path which was supposed to reach here has already 619 // been parsed or must be dead. 620 Node* c = control(); 621 Node* result = _gvn.transform_no_reclaim(control()); 622 if (c != result && TraceOptoParse) { 623 tty->print_cr("Block #%d replace %d with %d", block->rpo(), c->_idx, result->_idx); 624 } 625 if (result != top()) { 626 record_for_igvn(result); 627 } 628 } 629 630 // Parse the block. 631 do_one_block(); 632 633 // Check for bailouts. 634 if (failing()) return; 635 } 636 637 // with irreducible loops multiple passes might be necessary to parse everything 638 if (!has_irreducible || !progress) { 639 break; 640 } 641 } 642 643 blocks_seen += block_count(); 644 645 #ifndef PRODUCT 646 // Make sure there are no half-processed blocks remaining. 647 // Every remaining unprocessed block is dead and may be ignored now. 648 for (int rpo = 0; rpo < block_count(); rpo++) { 649 Block* block = rpo_at(rpo); 650 if (!block->is_parsed()) { 651 if (TraceOptoParse) { 652 tty->print_cr("Skipped dead block %d at bci:%d", rpo, block->start()); 653 } 654 assert(!block->is_merged(), "no half-processed blocks"); 655 } 656 } 657 #endif 658 } 659 660 //-------------------------------build_exits---------------------------------- 661 // Build normal and exceptional exit merge points. 662 void Parse::build_exits() { 663 // make a clone of caller to prevent sharing of side-effects 664 _exits.set_map(_exits.clone_map()); 665 _exits.clean_stack(_exits.sp()); 666 _exits.sync_jvms(); 667 668 RegionNode* region = new (C, 1) RegionNode(1); 669 record_for_igvn(region); 670 gvn().set_type_bottom(region); 671 _exits.set_control(region); 672 673 // Note: iophi and memphi are not transformed until do_exits. 674 Node* iophi = new (C, region->req()) PhiNode(region, Type::ABIO); 675 Node* memphi = new (C, region->req()) PhiNode(region, Type::MEMORY, TypePtr::BOTTOM); 676 _exits.set_i_o(iophi); 677 _exits.set_all_memory(memphi); 678 679 // Add a return value to the exit state. (Do not push it yet.) 680 if (tf()->range()->cnt() > TypeFunc::Parms) { 681 const Type* ret_type = tf()->range()->field_at(TypeFunc::Parms); 682 // Don't "bind" an unloaded return klass to the ret_phi. If the klass 683 // becomes loaded during the subsequent parsing, the loaded and unloaded 684 // types will not join when we transform and push in do_exits(). 685 const TypeOopPtr* ret_oop_type = ret_type->isa_oopptr(); 686 if (ret_oop_type && !ret_oop_type->klass()->is_loaded()) { 687 ret_type = TypeOopPtr::BOTTOM; 688 } 689 int ret_size = type2size[ret_type->basic_type()]; 690 Node* ret_phi = new (C, region->req()) PhiNode(region, ret_type); 691 _exits.ensure_stack(ret_size); 692 assert((int)(tf()->range()->cnt() - TypeFunc::Parms) == ret_size, "good tf range"); 693 assert(method()->return_type()->size() == ret_size, "tf agrees w/ method"); 694 _exits.set_argument(0, ret_phi); // here is where the parser finds it 695 // Note: ret_phi is not yet pushed, until do_exits. 696 } 697 } 698 699 700 //----------------------------build_start_state------------------------------- 701 // Construct a state which contains only the incoming arguments from an 702 // unknown caller. The method & bci will be NULL & InvocationEntryBci. 703 JVMState* Compile::build_start_state(StartNode* start, const TypeFunc* tf) { 704 int arg_size = tf->domain()->cnt(); 705 int max_size = MAX2(arg_size, (int)tf->range()->cnt()); 706 JVMState* jvms = new (this) JVMState(max_size - TypeFunc::Parms); 707 SafePointNode* map = new (this, max_size) SafePointNode(max_size, NULL); 708 record_for_igvn(map); 709 assert(arg_size == TypeFunc::Parms + (is_osr_compilation() ? 1 : method()->arg_size()), "correct arg_size"); 710 Node_Notes* old_nn = default_node_notes(); 711 if (old_nn != NULL && has_method()) { 712 Node_Notes* entry_nn = old_nn->clone(this); 713 JVMState* entry_jvms = new(this) JVMState(method(), old_nn->jvms()); 714 entry_jvms->set_offsets(0); 715 entry_jvms->set_bci(entry_bci()); 716 entry_nn->set_jvms(entry_jvms); 717 set_default_node_notes(entry_nn); 718 } 719 uint i; 720 for (i = 0; i < (uint)arg_size; i++) { 721 Node* parm = initial_gvn()->transform(new (this, 1) ParmNode(start, i)); 722 map->init_req(i, parm); 723 // Record all these guys for later GVN. 724 record_for_igvn(parm); 725 } 726 for (; i < map->req(); i++) { 727 map->init_req(i, top()); 728 } 729 assert(jvms->argoff() == TypeFunc::Parms, "parser gets arguments here"); 730 set_default_node_notes(old_nn); 731 map->set_jvms(jvms); 732 jvms->set_map(map); 733 return jvms; 734 } 735 736 //-----------------------------make_node_notes--------------------------------- 737 Node_Notes* Parse::make_node_notes(Node_Notes* caller_nn) { 738 if (caller_nn == NULL) return NULL; 739 Node_Notes* nn = caller_nn->clone(C); 740 JVMState* caller_jvms = nn->jvms(); 741 JVMState* jvms = new (C) JVMState(method(), caller_jvms); 742 jvms->set_offsets(0); 743 jvms->set_bci(_entry_bci); 744 nn->set_jvms(jvms); 745 return nn; 746 } 747 748 749 //--------------------------return_values-------------------------------------- 750 void Compile::return_values(JVMState* jvms) { 751 GraphKit kit(jvms); 752 Node* ret = new (this, TypeFunc::Parms) ReturnNode(TypeFunc::Parms, 753 kit.control(), 754 kit.i_o(), 755 kit.reset_memory(), 756 kit.frameptr(), 757 kit.returnadr()); 758 // Add zero or 1 return values 759 int ret_size = tf()->range()->cnt() - TypeFunc::Parms; 760 if (ret_size > 0) { 761 kit.inc_sp(-ret_size); // pop the return value(s) 762 kit.sync_jvms(); 763 ret->add_req(kit.argument(0)); 764 // Note: The second dummy edge is not needed by a ReturnNode. 765 } 766 // bind it to root 767 root()->add_req(ret); 768 record_for_igvn(ret); 769 initial_gvn()->transform_no_reclaim(ret); 770 } 771 772 //------------------------rethrow_exceptions----------------------------------- 773 // Bind all exception states in the list into a single RethrowNode. 774 void Compile::rethrow_exceptions(JVMState* jvms) { 775 GraphKit kit(jvms); 776 if (!kit.has_exceptions()) return; // nothing to generate 777 // Load my combined exception state into the kit, with all phis transformed: 778 SafePointNode* ex_map = kit.combine_and_pop_all_exception_states(); 779 Node* ex_oop = kit.use_exception_state(ex_map); 780 RethrowNode* exit = new (this, TypeFunc::Parms + 1) RethrowNode(kit.control(), 781 kit.i_o(), kit.reset_memory(), 782 kit.frameptr(), kit.returnadr(), 783 // like a return but with exception input 784 ex_oop); 785 // bind to root 786 root()->add_req(exit); 787 record_for_igvn(exit); 788 initial_gvn()->transform_no_reclaim(exit); 789 } 790 791 bool Parse::can_rerun_bytecode() { 792 switch (bc()) { 793 case Bytecodes::_ldc: 794 case Bytecodes::_ldc_w: 795 case Bytecodes::_ldc2_w: 796 case Bytecodes::_getfield: 797 case Bytecodes::_putfield: 798 case Bytecodes::_getstatic: 799 case Bytecodes::_putstatic: 800 case Bytecodes::_arraylength: 801 case Bytecodes::_baload: 802 case Bytecodes::_caload: 803 case Bytecodes::_iaload: 804 case Bytecodes::_saload: 805 case Bytecodes::_faload: 806 case Bytecodes::_aaload: 807 case Bytecodes::_laload: 808 case Bytecodes::_daload: 809 case Bytecodes::_bastore: 810 case Bytecodes::_castore: 811 case Bytecodes::_iastore: 812 case Bytecodes::_sastore: 813 case Bytecodes::_fastore: 814 case Bytecodes::_aastore: 815 case Bytecodes::_lastore: 816 case Bytecodes::_dastore: 817 case Bytecodes::_irem: 818 case Bytecodes::_idiv: 819 case Bytecodes::_lrem: 820 case Bytecodes::_ldiv: 821 case Bytecodes::_frem: 822 case Bytecodes::_fdiv: 823 case Bytecodes::_drem: 824 case Bytecodes::_ddiv: 825 case Bytecodes::_checkcast: 826 case Bytecodes::_instanceof: 827 case Bytecodes::_athrow: 828 case Bytecodes::_anewarray: 829 case Bytecodes::_newarray: 830 case Bytecodes::_multianewarray: 831 case Bytecodes::_new: 832 case Bytecodes::_monitorenter: // can re-run initial null check, only 833 case Bytecodes::_return: 834 return true; 835 break; 836 837 case Bytecodes::_invokestatic: 838 case Bytecodes::_invokedynamic: 839 case Bytecodes::_invokespecial: 840 case Bytecodes::_invokevirtual: 841 case Bytecodes::_invokeinterface: 842 return false; 843 break; 844 845 default: 846 assert(false, "unexpected bytecode produced an exception"); 847 return true; 848 } 849 } 850 851 //---------------------------do_exceptions------------------------------------- 852 // Process exceptions arising from the current bytecode. 853 // Send caught exceptions to the proper handler within this method. 854 // Unhandled exceptions feed into _exit. 855 void Parse::do_exceptions() { 856 if (!has_exceptions()) return; 857 858 if (failing()) { 859 // Pop them all off and throw them away. 860 while (pop_exception_state() != NULL) ; 861 return; 862 } 863 864 // Make sure we can classify this bytecode if we need to. 865 debug_only(can_rerun_bytecode()); 866 867 PreserveJVMState pjvms(this, false); 868 869 SafePointNode* ex_map; 870 while ((ex_map = pop_exception_state()) != NULL) { 871 if (!method()->has_exception_handlers()) { 872 // Common case: Transfer control outward. 873 // Doing it this early allows the exceptions to common up 874 // even between adjacent method calls. 875 throw_to_exit(ex_map); 876 } else { 877 // Have to look at the exception first. 878 assert(stopped(), "catch_inline_exceptions trashes the map"); 879 catch_inline_exceptions(ex_map); 880 stop_and_kill_map(); // we used up this exception state; kill it 881 } 882 } 883 884 // We now return to our regularly scheduled program: 885 } 886 887 //---------------------------throw_to_exit------------------------------------- 888 // Merge the given map into an exception exit from this method. 889 // The exception exit will handle any unlocking of receiver. 890 // The ex_oop must be saved within the ex_map, unlike merge_exception. 891 void Parse::throw_to_exit(SafePointNode* ex_map) { 892 // Pop the JVMS to (a copy of) the caller. 893 GraphKit caller; 894 caller.set_map_clone(_caller->map()); 895 caller.set_bci(_caller->bci()); 896 caller.set_sp(_caller->sp()); 897 // Copy out the standard machine state: 898 for (uint i = 0; i < TypeFunc::Parms; i++) { 899 caller.map()->set_req(i, ex_map->in(i)); 900 } 901 // ...and the exception: 902 Node* ex_oop = saved_ex_oop(ex_map); 903 SafePointNode* caller_ex_map = caller.make_exception_state(ex_oop); 904 // Finally, collect the new exception state in my exits: 905 _exits.add_exception_state(caller_ex_map); 906 } 907 908 //------------------------------do_exits--------------------------------------- 909 void Parse::do_exits() { 910 set_parse_bci(InvocationEntryBci); 911 912 // Now peephole on the return bits 913 Node* region = _exits.control(); 914 _exits.set_control(gvn().transform(region)); 915 916 Node* iophi = _exits.i_o(); 917 _exits.set_i_o(gvn().transform(iophi)); 918 919 if (wrote_final()) { 920 // This method (which must be a constructor by the rules of Java) 921 // wrote a final. The effects of all initializations must be 922 // committed to memory before any code after the constructor 923 // publishes the reference to the newly constructor object. 924 // Rather than wait for the publication, we simply block the 925 // writes here. Rather than put a barrier on only those writes 926 // which are required to complete, we force all writes to complete. 927 // 928 // "All bets are off" unless the first publication occurs after a 929 // normal return from the constructor. We do not attempt to detect 930 // such unusual early publications. But no barrier is needed on 931 // exceptional returns, since they cannot publish normally. 932 // 933 _exits.insert_mem_bar(Op_MemBarRelease); 934 #ifndef PRODUCT 935 if (PrintOpto && (Verbose || WizardMode)) { 936 method()->print_name(); 937 tty->print_cr(" writes finals and needs a memory barrier"); 938 } 939 #endif 940 } 941 942 for (MergeMemStream mms(_exits.merged_memory()); mms.next_non_empty(); ) { 943 // transform each slice of the original memphi: 944 mms.set_memory(_gvn.transform(mms.memory())); 945 } 946 947 if (tf()->range()->cnt() > TypeFunc::Parms) { 948 const Type* ret_type = tf()->range()->field_at(TypeFunc::Parms); 949 Node* ret_phi = _gvn.transform( _exits.argument(0) ); 950 assert(_exits.control()->is_top() || !_gvn.type(ret_phi)->empty(), "return value must be well defined"); 951 _exits.push_node(ret_type->basic_type(), ret_phi); 952 } 953 954 // Note: Logic for creating and optimizing the ReturnNode is in Compile. 955 956 // Unlock along the exceptional paths. 957 // This is done late so that we can common up equivalent exceptions 958 // (e.g., null checks) arising from multiple points within this method. 959 // See GraphKit::add_exception_state, which performs the commoning. 960 bool do_synch = method()->is_synchronized() && GenerateSynchronizationCode; 961 962 // record exit from a method if compiled while Dtrace is turned on. 963 if (do_synch || C->env()->dtrace_method_probes()) { 964 // First move the exception list out of _exits: 965 GraphKit kit(_exits.transfer_exceptions_into_jvms()); 966 SafePointNode* normal_map = kit.map(); // keep this guy safe 967 // Now re-collect the exceptions into _exits: 968 SafePointNode* ex_map; 969 while ((ex_map = kit.pop_exception_state()) != NULL) { 970 Node* ex_oop = kit.use_exception_state(ex_map); 971 // Force the exiting JVM state to have this method at InvocationEntryBci. 972 // The exiting JVM state is otherwise a copy of the calling JVMS. 973 JVMState* caller = kit.jvms(); 974 JVMState* ex_jvms = caller->clone_shallow(C); 975 ex_jvms->set_map(kit.clone_map()); 976 ex_jvms->map()->set_jvms(ex_jvms); 977 ex_jvms->set_bci( InvocationEntryBci); 978 kit.set_jvms(ex_jvms); 979 if (do_synch) { 980 // Add on the synchronized-method box/object combo 981 kit.map()->push_monitor(_synch_lock); 982 // Unlock! 983 kit.shared_unlock(_synch_lock->box_node(), _synch_lock->obj_node()); 984 } 985 if (C->env()->dtrace_method_probes()) { 986 kit.make_dtrace_method_exit(method()); 987 } 988 // Done with exception-path processing. 989 ex_map = kit.make_exception_state(ex_oop); 990 assert(ex_jvms->same_calls_as(ex_map->jvms()), "sanity"); 991 // Pop the last vestige of this method: 992 ex_map->set_jvms(caller->clone_shallow(C)); 993 ex_map->jvms()->set_map(ex_map); 994 _exits.push_exception_state(ex_map); 995 } 996 assert(_exits.map() == normal_map, "keep the same return state"); 997 } 998 999 { 1000 // Capture very early exceptions (receiver null checks) from caller JVMS 1001 GraphKit caller(_caller); 1002 SafePointNode* ex_map; 1003 while ((ex_map = caller.pop_exception_state()) != NULL) { 1004 _exits.add_exception_state(ex_map); 1005 } 1006 } 1007 } 1008 1009 //-----------------------------create_entry_map------------------------------- 1010 // Initialize our parser map to contain the types at method entry. 1011 // For OSR, the map contains a single RawPtr parameter. 1012 // Initial monitor locking for sync. methods is performed by do_method_entry. 1013 SafePointNode* Parse::create_entry_map() { 1014 // Check for really stupid bail-out cases. 1015 uint len = TypeFunc::Parms + method()->max_locals() + method()->max_stack(); 1016 if (len >= 32760) { 1017 C->record_method_not_compilable_all_tiers("too many local variables"); 1018 return NULL; 1019 } 1020 1021 // If this is an inlined method, we may have to do a receiver null check. 1022 if (_caller->has_method() && is_normal_parse() && !method()->is_static()) { 1023 GraphKit kit(_caller); 1024 kit.null_check_receiver(method()); 1025 _caller = kit.transfer_exceptions_into_jvms(); 1026 if (kit.stopped()) { 1027 _exits.add_exception_states_from(_caller); 1028 _exits.set_jvms(_caller); 1029 return NULL; 1030 } 1031 } 1032 1033 assert(method() != NULL, "parser must have a method"); 1034 1035 // Create an initial safepoint to hold JVM state during parsing 1036 JVMState* jvms = new (C) JVMState(method(), _caller->has_method() ? _caller : NULL); 1037 set_map(new (C, len) SafePointNode(len, jvms)); 1038 jvms->set_map(map()); 1039 record_for_igvn(map()); 1040 assert(jvms->endoff() == len, "correct jvms sizing"); 1041 1042 SafePointNode* inmap = _caller->map(); 1043 assert(inmap != NULL, "must have inmap"); 1044 1045 uint i; 1046 1047 // Pass thru the predefined input parameters. 1048 for (i = 0; i < TypeFunc::Parms; i++) { 1049 map()->init_req(i, inmap->in(i)); 1050 } 1051 1052 if (depth() == 1) { 1053 assert(map()->memory()->Opcode() == Op_Parm, ""); 1054 // Insert the memory aliasing node 1055 set_all_memory(reset_memory()); 1056 } 1057 assert(merged_memory(), ""); 1058 1059 // Now add the locals which are initially bound to arguments: 1060 uint arg_size = tf()->domain()->cnt(); 1061 ensure_stack(arg_size - TypeFunc::Parms); // OSR methods have funny args 1062 for (i = TypeFunc::Parms; i < arg_size; i++) { 1063 map()->init_req(i, inmap->argument(_caller, i - TypeFunc::Parms)); 1064 } 1065 1066 // Clear out the rest of the map (locals and stack) 1067 for (i = arg_size; i < len; i++) { 1068 map()->init_req(i, top()); 1069 } 1070 1071 SafePointNode* entry_map = stop(); 1072 return entry_map; 1073 } 1074 1075 //-----------------------------do_method_entry-------------------------------- 1076 // Emit any code needed in the pseudo-block before BCI zero. 1077 // The main thing to do is lock the receiver of a synchronized method. 1078 void Parse::do_method_entry() { 1079 set_parse_bci(InvocationEntryBci); // Pseudo-BCP 1080 set_sp(0); // Java Stack Pointer 1081 1082 NOT_PRODUCT( count_compiled_calls(true/*at_method_entry*/, false/*is_inline*/); ) 1083 1084 if (C->env()->dtrace_method_probes()) { 1085 make_dtrace_method_entry(method()); 1086 } 1087 1088 // If the method is synchronized, we need to construct a lock node, attach 1089 // it to the Start node, and pin it there. 1090 if (method()->is_synchronized()) { 1091 // Insert a FastLockNode right after the Start which takes as arguments 1092 // the current thread pointer, the "this" pointer & the address of the 1093 // stack slot pair used for the lock. The "this" pointer is a projection 1094 // off the start node, but the locking spot has to be constructed by 1095 // creating a ConLNode of 0, and boxing it with a BoxLockNode. The BoxLockNode 1096 // becomes the second argument to the FastLockNode call. The 1097 // FastLockNode becomes the new control parent to pin it to the start. 1098 1099 // Setup Object Pointer 1100 Node *lock_obj = NULL; 1101 if(method()->is_static()) { 1102 ciInstance* mirror = _method->holder()->java_mirror(); 1103 const TypeInstPtr *t_lock = TypeInstPtr::make(mirror); 1104 lock_obj = makecon(t_lock); 1105 } else { // Else pass the "this" pointer, 1106 lock_obj = local(0); // which is Parm0 from StartNode 1107 } 1108 // Clear out dead values from the debug info. 1109 kill_dead_locals(); 1110 // Build the FastLockNode 1111 _synch_lock = shared_lock(lock_obj); 1112 } 1113 1114 if (depth() == 1) { 1115 increment_and_test_invocation_counter(Tier2CompileThreshold); 1116 } 1117 } 1118 1119 //------------------------------init_blocks------------------------------------ 1120 // Initialize our parser map to contain the types/monitors at method entry. 1121 void Parse::init_blocks() { 1122 // Create the blocks. 1123 _block_count = flow()->block_count(); 1124 _blocks = NEW_RESOURCE_ARRAY(Block, _block_count); 1125 Copy::zero_to_bytes(_blocks, sizeof(Block)*_block_count); 1126 1127 int rpo; 1128 1129 // Initialize the structs. 1130 for (rpo = 0; rpo < block_count(); rpo++) { 1131 Block* block = rpo_at(rpo); 1132 block->init_node(this, rpo); 1133 } 1134 1135 // Collect predecessor and successor information. 1136 for (rpo = 0; rpo < block_count(); rpo++) { 1137 Block* block = rpo_at(rpo); 1138 block->init_graph(this); 1139 } 1140 } 1141 1142 //-------------------------------init_node------------------------------------- 1143 void Parse::Block::init_node(Parse* outer, int rpo) { 1144 _flow = outer->flow()->rpo_at(rpo); 1145 _pred_count = 0; 1146 _preds_parsed = 0; 1147 _count = 0; 1148 assert(pred_count() == 0 && preds_parsed() == 0, "sanity"); 1149 assert(!(is_merged() || is_parsed() || is_handler()), "sanity"); 1150 assert(_live_locals.size() == 0, "sanity"); 1151 1152 // entry point has additional predecessor 1153 if (flow()->is_start()) _pred_count++; 1154 assert(flow()->is_start() == (this == outer->start_block()), ""); 1155 } 1156 1157 //-------------------------------init_graph------------------------------------ 1158 void Parse::Block::init_graph(Parse* outer) { 1159 // Create the successor list for this parser block. 1160 GrowableArray<ciTypeFlow::Block*>* tfs = flow()->successors(); 1161 GrowableArray<ciTypeFlow::Block*>* tfe = flow()->exceptions(); 1162 int ns = tfs->length(); 1163 int ne = tfe->length(); 1164 _num_successors = ns; 1165 _all_successors = ns+ne; 1166 _successors = (ns+ne == 0) ? NULL : NEW_RESOURCE_ARRAY(Block*, ns+ne); 1167 int p = 0; 1168 for (int i = 0; i < ns+ne; i++) { 1169 ciTypeFlow::Block* tf2 = (i < ns) ? tfs->at(i) : tfe->at(i-ns); 1170 Block* block2 = outer->rpo_at(tf2->rpo()); 1171 _successors[i] = block2; 1172 1173 // Accumulate pred info for the other block, too. 1174 if (i < ns) { 1175 block2->_pred_count++; 1176 } else { 1177 block2->_is_handler = true; 1178 } 1179 1180 #ifdef ASSERT 1181 // A block's successors must be distinguishable by BCI. 1182 // That is, no bytecode is allowed to branch to two different 1183 // clones of the same code location. 1184 for (int j = 0; j < i; j++) { 1185 Block* block1 = _successors[j]; 1186 if (block1 == block2) continue; // duplicates are OK 1187 assert(block1->start() != block2->start(), "successors have unique bcis"); 1188 } 1189 #endif 1190 } 1191 1192 // Note: We never call next_path_num along exception paths, so they 1193 // never get processed as "ready". Also, the input phis of exception 1194 // handlers get specially processed, so that 1195 } 1196 1197 //---------------------------successor_for_bci--------------------------------- 1198 Parse::Block* Parse::Block::successor_for_bci(int bci) { 1199 for (int i = 0; i < all_successors(); i++) { 1200 Block* block2 = successor_at(i); 1201 if (block2->start() == bci) return block2; 1202 } 1203 // We can actually reach here if ciTypeFlow traps out a block 1204 // due to an unloaded class, and concurrently with compilation the 1205 // class is then loaded, so that a later phase of the parser is 1206 // able to see more of the bytecode CFG. Or, the flow pass and 1207 // the parser can have a minor difference of opinion about executability 1208 // of bytecodes. For example, "obj.field = null" is executable even 1209 // if the field's type is an unloaded class; the flow pass used to 1210 // make a trap for such code. 1211 return NULL; 1212 } 1213 1214 1215 //-----------------------------stack_type_at----------------------------------- 1216 const Type* Parse::Block::stack_type_at(int i) const { 1217 return get_type(flow()->stack_type_at(i)); 1218 } 1219 1220 1221 //-----------------------------local_type_at----------------------------------- 1222 const Type* Parse::Block::local_type_at(int i) const { 1223 // Make dead locals fall to bottom. 1224 if (_live_locals.size() == 0) { 1225 MethodLivenessResult live_locals = flow()->outer()->method()->liveness_at_bci(start()); 1226 // This bitmap can be zero length if we saw a breakpoint. 1227 // In such cases, pretend they are all live. 1228 ((Block*)this)->_live_locals = live_locals; 1229 } 1230 if (_live_locals.size() > 0 && !_live_locals.at(i)) 1231 return Type::BOTTOM; 1232 1233 return get_type(flow()->local_type_at(i)); 1234 } 1235 1236 1237 #ifndef PRODUCT 1238 1239 //----------------------------name_for_bc-------------------------------------- 1240 // helper method for BytecodeParseHistogram 1241 static const char* name_for_bc(int i) { 1242 return Bytecodes::is_defined(i) ? Bytecodes::name(Bytecodes::cast(i)) : "xxxunusedxxx"; 1243 } 1244 1245 //----------------------------BytecodeParseHistogram------------------------------------ 1246 Parse::BytecodeParseHistogram::BytecodeParseHistogram(Parse *p, Compile *c) { 1247 _parser = p; 1248 _compiler = c; 1249 if( ! _initialized ) { _initialized = true; reset(); } 1250 } 1251 1252 //----------------------------current_count------------------------------------ 1253 int Parse::BytecodeParseHistogram::current_count(BPHType bph_type) { 1254 switch( bph_type ) { 1255 case BPH_transforms: { return _parser->gvn().made_progress(); } 1256 case BPH_values: { return _parser->gvn().made_new_values(); } 1257 default: { ShouldNotReachHere(); return 0; } 1258 } 1259 } 1260 1261 //----------------------------initialized-------------------------------------- 1262 bool Parse::BytecodeParseHistogram::initialized() { return _initialized; } 1263 1264 //----------------------------reset-------------------------------------------- 1265 void Parse::BytecodeParseHistogram::reset() { 1266 int i = Bytecodes::number_of_codes; 1267 while (i-- > 0) { _bytecodes_parsed[i] = 0; _nodes_constructed[i] = 0; _nodes_transformed[i] = 0; _new_values[i] = 0; } 1268 } 1269 1270 //----------------------------set_initial_state-------------------------------- 1271 // Record info when starting to parse one bytecode 1272 void Parse::BytecodeParseHistogram::set_initial_state( Bytecodes::Code bc ) { 1273 if( PrintParseStatistics && !_parser->is_osr_parse() ) { 1274 _initial_bytecode = bc; 1275 _initial_node_count = _compiler->unique(); 1276 _initial_transforms = current_count(BPH_transforms); 1277 _initial_values = current_count(BPH_values); 1278 } 1279 } 1280 1281 //----------------------------record_change-------------------------------- 1282 // Record results of parsing one bytecode 1283 void Parse::BytecodeParseHistogram::record_change() { 1284 if( PrintParseStatistics && !_parser->is_osr_parse() ) { 1285 ++_bytecodes_parsed[_initial_bytecode]; 1286 _nodes_constructed [_initial_bytecode] += (_compiler->unique() - _initial_node_count); 1287 _nodes_transformed [_initial_bytecode] += (current_count(BPH_transforms) - _initial_transforms); 1288 _new_values [_initial_bytecode] += (current_count(BPH_values) - _initial_values); 1289 } 1290 } 1291 1292 1293 //----------------------------print-------------------------------------------- 1294 void Parse::BytecodeParseHistogram::print(float cutoff) { 1295 ResourceMark rm; 1296 // print profile 1297 int total = 0; 1298 int i = 0; 1299 for( i = 0; i < Bytecodes::number_of_codes; ++i ) { total += _bytecodes_parsed[i]; } 1300 int abs_sum = 0; 1301 tty->cr(); //0123456789012345678901234567890123456789012345678901234567890123456789 1302 tty->print_cr("Histogram of %d parsed bytecodes:", total); 1303 if( total == 0 ) { return; } 1304 tty->cr(); 1305 tty->print_cr("absolute: count of compiled bytecodes of this type"); 1306 tty->print_cr("relative: percentage contribution to compiled nodes"); 1307 tty->print_cr("nodes : Average number of nodes constructed per bytecode"); 1308 tty->print_cr("rnodes : Significance towards total nodes constructed, (nodes*relative)"); 1309 tty->print_cr("transforms: Average amount of tranform progress per bytecode compiled"); 1310 tty->print_cr("values : Average number of node values improved per bytecode"); 1311 tty->print_cr("name : Bytecode name"); 1312 tty->cr(); 1313 tty->print_cr(" absolute relative nodes rnodes transforms values name"); 1314 tty->print_cr("----------------------------------------------------------------------"); 1315 while (--i > 0) { 1316 int abs = _bytecodes_parsed[i]; 1317 float rel = abs * 100.0F / total; 1318 float nodes = _bytecodes_parsed[i] == 0 ? 0 : (1.0F * _nodes_constructed[i])/_bytecodes_parsed[i]; 1319 float rnodes = _bytecodes_parsed[i] == 0 ? 0 : rel * nodes; 1320 float xforms = _bytecodes_parsed[i] == 0 ? 0 : (1.0F * _nodes_transformed[i])/_bytecodes_parsed[i]; 1321 float values = _bytecodes_parsed[i] == 0 ? 0 : (1.0F * _new_values [i])/_bytecodes_parsed[i]; 1322 if (cutoff <= rel) { 1323 tty->print_cr("%10d %7.2f%% %6.1f %6.2f %6.1f %6.1f %s", abs, rel, nodes, rnodes, xforms, values, name_for_bc(i)); 1324 abs_sum += abs; 1325 } 1326 } 1327 tty->print_cr("----------------------------------------------------------------------"); 1328 float rel_sum = abs_sum * 100.0F / total; 1329 tty->print_cr("%10d %7.2f%% (cutoff = %.2f%%)", abs_sum, rel_sum, cutoff); 1330 tty->print_cr("----------------------------------------------------------------------"); 1331 tty->cr(); 1332 } 1333 #endif 1334 1335 //----------------------------load_state_from---------------------------------- 1336 // Load block/map/sp. But not do not touch iter/bci. 1337 void Parse::load_state_from(Block* block) { 1338 set_block(block); 1339 // load the block's JVM state: 1340 set_map(block->start_map()); 1341 set_sp( block->start_sp()); 1342 } 1343 1344 1345 //-----------------------------record_state------------------------------------ 1346 void Parse::Block::record_state(Parse* p) { 1347 assert(!is_merged(), "can only record state once, on 1st inflow"); 1348 assert(start_sp() == p->sp(), "stack pointer must agree with ciTypeFlow"); 1349 set_start_map(p->stop()); 1350 } 1351 1352 1353 //------------------------------do_one_block----------------------------------- 1354 void Parse::do_one_block() { 1355 if (TraceOptoParse) { 1356 Block *b = block(); 1357 int ns = b->num_successors(); 1358 int nt = b->all_successors(); 1359 1360 tty->print("Parsing block #%d at bci [%d,%d), successors: ", 1361 block()->rpo(), block()->start(), block()->limit()); 1362 for (int i = 0; i < nt; i++) { 1363 tty->print((( i < ns) ? " %d" : " %d(e)"), b->successor_at(i)->rpo()); 1364 } 1365 if (b->is_loop_head()) tty->print(" lphd"); 1366 tty->print_cr(""); 1367 } 1368 1369 assert(block()->is_merged(), "must be merged before being parsed"); 1370 block()->mark_parsed(); 1371 ++_blocks_parsed; 1372 1373 // Set iterator to start of block. 1374 iter().reset_to_bci(block()->start()); 1375 1376 CompileLog* log = C->log(); 1377 1378 // Parse bytecodes 1379 while (!stopped() && !failing()) { 1380 iter().next(); 1381 1382 // Learn the current bci from the iterator: 1383 set_parse_bci(iter().cur_bci()); 1384 1385 if (bci() == block()->limit()) { 1386 // Do not walk into the next block until directed by do_all_blocks. 1387 merge(bci()); 1388 break; 1389 } 1390 assert(bci() < block()->limit(), "bci still in block"); 1391 1392 if (log != NULL) { 1393 // Output an optional context marker, to help place actions 1394 // that occur during parsing of this BC. If there is no log 1395 // output until the next context string, this context string 1396 // will be silently ignored. 1397 log->context()->reset(); 1398 log->context()->print_cr("<bc code='%d' bci='%d'/>", (int)bc(), bci()); 1399 } 1400 1401 if (block()->has_trap_at(bci())) { 1402 // We must respect the flow pass's traps, because it will refuse 1403 // to produce successors for trapping blocks. 1404 int trap_index = block()->flow()->trap_index(); 1405 assert(trap_index != 0, "trap index must be valid"); 1406 uncommon_trap(trap_index); 1407 break; 1408 } 1409 1410 NOT_PRODUCT( parse_histogram()->set_initial_state(bc()); ); 1411 1412 #ifdef ASSERT 1413 int pre_bc_sp = sp(); 1414 int inputs, depth; 1415 bool have_se = !stopped() && compute_stack_effects(inputs, depth); 1416 assert(!have_se || pre_bc_sp >= inputs, "have enough stack to execute this BC"); 1417 #endif //ASSERT 1418 1419 do_one_bytecode(); 1420 1421 assert(!have_se || stopped() || failing() || (sp() - pre_bc_sp) == depth, "correct depth prediction"); 1422 1423 do_exceptions(); 1424 1425 NOT_PRODUCT( parse_histogram()->record_change(); ); 1426 1427 if (log != NULL) log->context()->reset(); // done w/ this one 1428 1429 // Fall into next bytecode. Each bytecode normally has 1 sequential 1430 // successor which is typically made ready by visiting this bytecode. 1431 // If the successor has several predecessors, then it is a merge 1432 // point, starts a new basic block, and is handled like other basic blocks. 1433 } 1434 } 1435 1436 1437 //------------------------------merge------------------------------------------ 1438 void Parse::set_parse_bci(int bci) { 1439 set_bci(bci); 1440 Node_Notes* nn = C->default_node_notes(); 1441 if (nn == NULL) return; 1442 1443 // Collect debug info for inlined calls unless -XX:-DebugInlinedCalls. 1444 if (!DebugInlinedCalls && depth() > 1) { 1445 return; 1446 } 1447 1448 // Update the JVMS annotation, if present. 1449 JVMState* jvms = nn->jvms(); 1450 if (jvms != NULL && jvms->bci() != bci) { 1451 // Update the JVMS. 1452 jvms = jvms->clone_shallow(C); 1453 jvms->set_bci(bci); 1454 nn->set_jvms(jvms); 1455 } 1456 } 1457 1458 //------------------------------merge------------------------------------------ 1459 // Merge the current mapping into the basic block starting at bci 1460 void Parse::merge(int target_bci) { 1461 Block* target = successor_for_bci(target_bci); 1462 if (target == NULL) { handle_missing_successor(target_bci); return; } 1463 assert(!target->is_ready(), "our arrival must be expected"); 1464 int pnum = target->next_path_num(); 1465 merge_common(target, pnum); 1466 } 1467 1468 //-------------------------merge_new_path-------------------------------------- 1469 // Merge the current mapping into the basic block, using a new path 1470 void Parse::merge_new_path(int target_bci) { 1471 Block* target = successor_for_bci(target_bci); 1472 if (target == NULL) { handle_missing_successor(target_bci); return; } 1473 assert(!target->is_ready(), "new path into frozen graph"); 1474 int pnum = target->add_new_path(); 1475 merge_common(target, pnum); 1476 } 1477 1478 //-------------------------merge_exception------------------------------------- 1479 // Merge the current mapping into the basic block starting at bci 1480 // The ex_oop must be pushed on the stack, unlike throw_to_exit. 1481 void Parse::merge_exception(int target_bci) { 1482 assert(sp() == 1, "must have only the throw exception on the stack"); 1483 Block* target = successor_for_bci(target_bci); 1484 if (target == NULL) { handle_missing_successor(target_bci); return; } 1485 assert(target->is_handler(), "exceptions are handled by special blocks"); 1486 int pnum = target->add_new_path(); 1487 merge_common(target, pnum); 1488 } 1489 1490 //--------------------handle_missing_successor--------------------------------- 1491 void Parse::handle_missing_successor(int target_bci) { 1492 #ifndef PRODUCT 1493 Block* b = block(); 1494 int trap_bci = b->flow()->has_trap()? b->flow()->trap_bci(): -1; 1495 tty->print_cr("### Missing successor at bci:%d for block #%d (trap_bci:%d)", target_bci, b->rpo(), trap_bci); 1496 #endif 1497 ShouldNotReachHere(); 1498 } 1499 1500 //--------------------------merge_common--------------------------------------- 1501 void Parse::merge_common(Parse::Block* target, int pnum) { 1502 if (TraceOptoParse) { 1503 tty->print("Merging state at block #%d bci:%d", target->rpo(), target->start()); 1504 } 1505 1506 // Zap extra stack slots to top 1507 assert(sp() == target->start_sp(), ""); 1508 clean_stack(sp()); 1509 1510 if (!target->is_merged()) { // No prior mapping at this bci 1511 if (TraceOptoParse) { tty->print(" with empty state"); } 1512 1513 // If this path is dead, do not bother capturing it as a merge. 1514 // It is "as if" we had 1 fewer predecessors from the beginning. 1515 if (stopped()) { 1516 if (TraceOptoParse) tty->print_cr(", but path is dead and doesn't count"); 1517 return; 1518 } 1519 1520 // Record that a new block has been merged. 1521 ++_blocks_merged; 1522 1523 // Make a region if we know there are multiple or unpredictable inputs. 1524 // (Also, if this is a plain fall-through, we might see another region, 1525 // which must not be allowed into this block's map.) 1526 if (pnum > PhiNode::Input // Known multiple inputs. 1527 || target->is_handler() // These have unpredictable inputs. 1528 || target->is_loop_head() // Known multiple inputs 1529 || control()->is_Region()) { // We must hide this guy. 1530 // Add a Region to start the new basic block. Phis will be added 1531 // later lazily. 1532 int edges = target->pred_count(); 1533 if (edges < pnum) edges = pnum; // might be a new path! 1534 Node *r = new (C, edges+1) RegionNode(edges+1); 1535 gvn().set_type(r, Type::CONTROL); 1536 record_for_igvn(r); 1537 // zap all inputs to NULL for debugging (done in Node(uint) constructor) 1538 // for (int j = 1; j < edges+1; j++) { r->init_req(j, NULL); } 1539 r->init_req(pnum, control()); 1540 set_control(r); 1541 } 1542 1543 // Convert the existing Parser mapping into a mapping at this bci. 1544 store_state_to(target); 1545 assert(target->is_merged(), "do not come here twice"); 1546 1547 } else { // Prior mapping at this bci 1548 if (TraceOptoParse) { tty->print(" with previous state"); } 1549 1550 // We must not manufacture more phis if the target is already parsed. 1551 bool nophi = target->is_parsed(); 1552 1553 SafePointNode* newin = map();// Hang on to incoming mapping 1554 Block* save_block = block(); // Hang on to incoming block; 1555 load_state_from(target); // Get prior mapping 1556 1557 assert(newin->jvms()->locoff() == jvms()->locoff(), "JVMS layouts agree"); 1558 assert(newin->jvms()->stkoff() == jvms()->stkoff(), "JVMS layouts agree"); 1559 assert(newin->jvms()->monoff() == jvms()->monoff(), "JVMS layouts agree"); 1560 assert(newin->jvms()->endoff() == jvms()->endoff(), "JVMS layouts agree"); 1561 1562 // Iterate over my current mapping and the old mapping. 1563 // Where different, insert Phi functions. 1564 // Use any existing Phi functions. 1565 assert(control()->is_Region(), "must be merging to a region"); 1566 RegionNode* r = control()->as_Region(); 1567 1568 // Compute where to merge into 1569 // Merge incoming control path 1570 r->init_req(pnum, newin->control()); 1571 1572 if (pnum == 1) { // Last merge for this Region? 1573 if (!block()->flow()->is_irreducible_entry()) { 1574 Node* result = _gvn.transform_no_reclaim(r); 1575 if (r != result && TraceOptoParse) { 1576 tty->print_cr("Block #%d replace %d with %d", block()->rpo(), r->_idx, result->_idx); 1577 } 1578 } 1579 record_for_igvn(r); 1580 } 1581 1582 // Update all the non-control inputs to map: 1583 assert(TypeFunc::Parms == newin->jvms()->locoff(), "parser map should contain only youngest jvms"); 1584 bool check_elide_phi = target->is_SEL_backedge(save_block); 1585 for (uint j = 1; j < newin->req(); j++) { 1586 Node* m = map()->in(j); // Current state of target. 1587 Node* n = newin->in(j); // Incoming change to target state. 1588 PhiNode* phi; 1589 if (m->is_Phi() && m->as_Phi()->region() == r) 1590 phi = m->as_Phi(); 1591 else 1592 phi = NULL; 1593 if (m != n) { // Different; must merge 1594 switch (j) { 1595 // Frame pointer and Return Address never changes 1596 case TypeFunc::FramePtr:// Drop m, use the original value 1597 case TypeFunc::ReturnAdr: 1598 break; 1599 case TypeFunc::Memory: // Merge inputs to the MergeMem node 1600 assert(phi == NULL, "the merge contains phis, not vice versa"); 1601 merge_memory_edges(n->as_MergeMem(), pnum, nophi); 1602 continue; 1603 default: // All normal stuff 1604 if (phi == NULL) { 1605 if (!check_elide_phi || !target->can_elide_SEL_phi(j)) { 1606 phi = ensure_phi(j, nophi); 1607 } 1608 } 1609 break; 1610 } 1611 } 1612 // At this point, n might be top if: 1613 // - there is no phi (because TypeFlow detected a conflict), or 1614 // - the corresponding control edges is top (a dead incoming path) 1615 // It is a bug if we create a phi which sees a garbage value on a live path. 1616 1617 if (phi != NULL) { 1618 assert(n != top() || r->in(pnum) == top(), "live value must not be garbage"); 1619 assert(phi->region() == r, ""); 1620 phi->set_req(pnum, n); // Then add 'n' to the merge 1621 if (pnum == PhiNode::Input) { 1622 // Last merge for this Phi. 1623 // So far, Phis have had a reasonable type from ciTypeFlow. 1624 // Now _gvn will join that with the meet of current inputs. 1625 // BOTTOM is never permissible here, 'cause pessimistically 1626 // Phis of pointers cannot lose the basic pointer type. 1627 debug_only(const Type* bt1 = phi->bottom_type()); 1628 assert(bt1 != Type::BOTTOM, "should not be building conflict phis"); 1629 map()->set_req(j, _gvn.transform_no_reclaim(phi)); 1630 debug_only(const Type* bt2 = phi->bottom_type()); 1631 assert(bt2->higher_equal(bt1), "must be consistent with type-flow"); 1632 record_for_igvn(phi); 1633 } 1634 } 1635 } // End of for all values to be merged 1636 1637 if (pnum == PhiNode::Input && 1638 !r->in(0)) { // The occasional useless Region 1639 assert(control() == r, ""); 1640 set_control(r->nonnull_req()); 1641 } 1642 1643 // newin has been subsumed into the lazy merge, and is now dead. 1644 set_block(save_block); 1645 1646 stop(); // done with this guy, for now 1647 } 1648 1649 if (TraceOptoParse) { 1650 tty->print_cr(" on path %d", pnum); 1651 } 1652 1653 // Done with this parser state. 1654 assert(stopped(), ""); 1655 } 1656 1657 1658 //--------------------------merge_memory_edges--------------------------------- 1659 void Parse::merge_memory_edges(MergeMemNode* n, int pnum, bool nophi) { 1660 // (nophi means we must not create phis, because we already parsed here) 1661 assert(n != NULL, ""); 1662 // Merge the inputs to the MergeMems 1663 MergeMemNode* m = merged_memory(); 1664 1665 assert(control()->is_Region(), "must be merging to a region"); 1666 RegionNode* r = control()->as_Region(); 1667 1668 PhiNode* base = NULL; 1669 MergeMemNode* remerge = NULL; 1670 for (MergeMemStream mms(m, n); mms.next_non_empty2(); ) { 1671 Node *p = mms.force_memory(); 1672 Node *q = mms.memory2(); 1673 if (mms.is_empty() && nophi) { 1674 // Trouble: No new splits allowed after a loop body is parsed. 1675 // Instead, wire the new split into a MergeMem on the backedge. 1676 // The optimizer will sort it out, slicing the phi. 1677 if (remerge == NULL) { 1678 assert(base != NULL, ""); 1679 assert(base->in(0) != NULL, "should not be xformed away"); 1680 remerge = MergeMemNode::make(C, base->in(pnum)); 1681 gvn().set_type(remerge, Type::MEMORY); 1682 base->set_req(pnum, remerge); 1683 } 1684 remerge->set_memory_at(mms.alias_idx(), q); 1685 continue; 1686 } 1687 assert(!q->is_MergeMem(), ""); 1688 PhiNode* phi; 1689 if (p != q) { 1690 phi = ensure_memory_phi(mms.alias_idx(), nophi); 1691 } else { 1692 if (p->is_Phi() && p->as_Phi()->region() == r) 1693 phi = p->as_Phi(); 1694 else 1695 phi = NULL; 1696 } 1697 // Insert q into local phi 1698 if (phi != NULL) { 1699 assert(phi->region() == r, ""); 1700 p = phi; 1701 phi->set_req(pnum, q); 1702 if (mms.at_base_memory()) { 1703 base = phi; // delay transforming it 1704 } else if (pnum == 1) { 1705 record_for_igvn(phi); 1706 p = _gvn.transform_no_reclaim(phi); 1707 } 1708 mms.set_memory(p);// store back through the iterator 1709 } 1710 } 1711 // Transform base last, in case we must fiddle with remerging. 1712 if (base != NULL && pnum == 1) { 1713 record_for_igvn(base); 1714 m->set_base_memory( _gvn.transform_no_reclaim(base) ); 1715 } 1716 } 1717 1718 1719 //------------------------ensure_phis_everywhere------------------------------- 1720 void Parse::ensure_phis_everywhere() { 1721 ensure_phi(TypeFunc::I_O); 1722 1723 // Ensure a phi on all currently known memories. 1724 for (MergeMemStream mms(merged_memory()); mms.next_non_empty(); ) { 1725 ensure_memory_phi(mms.alias_idx()); 1726 debug_only(mms.set_memory()); // keep the iterator happy 1727 } 1728 1729 // Note: This is our only chance to create phis for memory slices. 1730 // If we miss a slice that crops up later, it will have to be 1731 // merged into the base-memory phi that we are building here. 1732 // Later, the optimizer will comb out the knot, and build separate 1733 // phi-loops for each memory slice that matters. 1734 1735 // Monitors must nest nicely and not get confused amongst themselves. 1736 // Phi-ify everything up to the monitors, though. 1737 uint monoff = map()->jvms()->monoff(); 1738 uint nof_monitors = map()->jvms()->nof_monitors(); 1739 1740 assert(TypeFunc::Parms == map()->jvms()->locoff(), "parser map should contain only youngest jvms"); 1741 bool check_elide_phi = block()->is_SEL_head(); 1742 for (uint i = TypeFunc::Parms; i < monoff; i++) { 1743 if (!check_elide_phi || !block()->can_elide_SEL_phi(i)) { 1744 ensure_phi(i); 1745 } 1746 } 1747 1748 // Even monitors need Phis, though they are well-structured. 1749 // This is true for OSR methods, and also for the rare cases where 1750 // a monitor object is the subject of a replace_in_map operation. 1751 // See bugs 4426707 and 5043395. 1752 for (uint m = 0; m < nof_monitors; m++) { 1753 ensure_phi(map()->jvms()->monitor_obj_offset(m)); 1754 } 1755 } 1756 1757 1758 //-----------------------------add_new_path------------------------------------ 1759 // Add a previously unaccounted predecessor to this block. 1760 int Parse::Block::add_new_path() { 1761 // If there is no map, return the lowest unused path number. 1762 if (!is_merged()) return pred_count()+1; // there will be a map shortly 1763 1764 SafePointNode* map = start_map(); 1765 if (!map->control()->is_Region()) 1766 return pred_count()+1; // there may be a region some day 1767 RegionNode* r = map->control()->as_Region(); 1768 1769 // Add new path to the region. 1770 uint pnum = r->req(); 1771 r->add_req(NULL); 1772 1773 for (uint i = 1; i < map->req(); i++) { 1774 Node* n = map->in(i); 1775 if (i == TypeFunc::Memory) { 1776 // Ensure a phi on all currently known memories. 1777 for (MergeMemStream mms(n->as_MergeMem()); mms.next_non_empty(); ) { 1778 Node* phi = mms.memory(); 1779 if (phi->is_Phi() && phi->as_Phi()->region() == r) { 1780 assert(phi->req() == pnum, "must be same size as region"); 1781 phi->add_req(NULL); 1782 } 1783 } 1784 } else { 1785 if (n->is_Phi() && n->as_Phi()->region() == r) { 1786 assert(n->req() == pnum, "must be same size as region"); 1787 n->add_req(NULL); 1788 } 1789 } 1790 } 1791 1792 return pnum; 1793 } 1794 1795 //------------------------------ensure_phi------------------------------------- 1796 // Turn the idx'th entry of the current map into a Phi 1797 PhiNode *Parse::ensure_phi(int idx, bool nocreate) { 1798 SafePointNode* map = this->map(); 1799 Node* region = map->control(); 1800 assert(region->is_Region(), ""); 1801 1802 Node* o = map->in(idx); 1803 assert(o != NULL, ""); 1804 1805 if (o == top()) return NULL; // TOP always merges into TOP 1806 1807 if (o->is_Phi() && o->as_Phi()->region() == region) { 1808 return o->as_Phi(); 1809 } 1810 1811 // Now use a Phi here for merging 1812 assert(!nocreate, "Cannot build a phi for a block already parsed."); 1813 const JVMState* jvms = map->jvms(); 1814 const Type* t; 1815 if (jvms->is_loc(idx)) { 1816 t = block()->local_type_at(idx - jvms->locoff()); 1817 } else if (jvms->is_stk(idx)) { 1818 t = block()->stack_type_at(idx - jvms->stkoff()); 1819 } else if (jvms->is_mon(idx)) { 1820 assert(!jvms->is_monitor_box(idx), "no phis for boxes"); 1821 t = TypeInstPtr::BOTTOM; // this is sufficient for a lock object 1822 } else if ((uint)idx < TypeFunc::Parms) { 1823 t = o->bottom_type(); // Type::RETURN_ADDRESS or such-like. 1824 } else { 1825 assert(false, "no type information for this phi"); 1826 } 1827 1828 // If the type falls to bottom, then this must be a local that 1829 // is mixing ints and oops or some such. Forcing it to top 1830 // makes it go dead. 1831 if (t == Type::BOTTOM) { 1832 map->set_req(idx, top()); 1833 return NULL; 1834 } 1835 1836 // Do not create phis for top either. 1837 // A top on a non-null control flow must be an unused even after the.phi. 1838 if (t == Type::TOP || t == Type::HALF) { 1839 map->set_req(idx, top()); 1840 return NULL; 1841 } 1842 1843 PhiNode* phi = PhiNode::make(region, o, t); 1844 gvn().set_type(phi, t); 1845 if (C->do_escape_analysis()) record_for_igvn(phi); 1846 map->set_req(idx, phi); 1847 return phi; 1848 } 1849 1850 //--------------------------ensure_memory_phi---------------------------------- 1851 // Turn the idx'th slice of the current memory into a Phi 1852 PhiNode *Parse::ensure_memory_phi(int idx, bool nocreate) { 1853 MergeMemNode* mem = merged_memory(); 1854 Node* region = control(); 1855 assert(region->is_Region(), ""); 1856 1857 Node *o = (idx == Compile::AliasIdxBot)? mem->base_memory(): mem->memory_at(idx); 1858 assert(o != NULL && o != top(), ""); 1859 1860 PhiNode* phi; 1861 if (o->is_Phi() && o->as_Phi()->region() == region) { 1862 phi = o->as_Phi(); 1863 if (phi == mem->base_memory() && idx >= Compile::AliasIdxRaw) { 1864 // clone the shared base memory phi to make a new memory split 1865 assert(!nocreate, "Cannot build a phi for a block already parsed."); 1866 const Type* t = phi->bottom_type(); 1867 const TypePtr* adr_type = C->get_adr_type(idx); 1868 phi = phi->slice_memory(adr_type); 1869 gvn().set_type(phi, t); 1870 } 1871 return phi; 1872 } 1873 1874 // Now use a Phi here for merging 1875 assert(!nocreate, "Cannot build a phi for a block already parsed."); 1876 const Type* t = o->bottom_type(); 1877 const TypePtr* adr_type = C->get_adr_type(idx); 1878 phi = PhiNode::make(region, o, t, adr_type); 1879 gvn().set_type(phi, t); 1880 if (idx == Compile::AliasIdxBot) 1881 mem->set_base_memory(phi); 1882 else 1883 mem->set_memory_at(idx, phi); 1884 return phi; 1885 } 1886 1887 //------------------------------call_register_finalizer----------------------- 1888 // Check the klass of the receiver and call register_finalizer if the 1889 // class need finalization. 1890 void Parse::call_register_finalizer() { 1891 Node* receiver = local(0); 1892 assert(receiver != NULL && receiver->bottom_type()->isa_instptr() != NULL, 1893 "must have non-null instance type"); 1894 1895 const TypeInstPtr *tinst = receiver->bottom_type()->isa_instptr(); 1896 if (tinst != NULL && tinst->klass()->is_loaded() && !tinst->klass_is_exact()) { 1897 // The type isn't known exactly so see if CHA tells us anything. 1898 ciInstanceKlass* ik = tinst->klass()->as_instance_klass(); 1899 if (!Dependencies::has_finalizable_subclass(ik)) { 1900 // No finalizable subclasses so skip the dynamic check. 1901 C->dependencies()->assert_has_no_finalizable_subclasses(ik); 1902 return; 1903 } 1904 } 1905 1906 // Insert a dynamic test for whether the instance needs 1907 // finalization. In general this will fold up since the concrete 1908 // class is often visible so the access flags are constant. 1909 Node* klass_addr = basic_plus_adr( receiver, receiver, oopDesc::klass_offset_in_bytes() ); 1910 Node* klass = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), klass_addr, TypeInstPtr::KLASS) ); 1911 1912 Node* access_flags_addr = basic_plus_adr(klass, klass, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc)); 1913 Node* access_flags = make_load(NULL, access_flags_addr, TypeInt::INT, T_INT); 1914 1915 Node* mask = _gvn.transform(new (C, 3) AndINode(access_flags, intcon(JVM_ACC_HAS_FINALIZER))); 1916 Node* check = _gvn.transform(new (C, 3) CmpINode(mask, intcon(0))); 1917 Node* test = _gvn.transform(new (C, 2) BoolNode(check, BoolTest::ne)); 1918 1919 IfNode* iff = create_and_map_if(control(), test, PROB_MAX, COUNT_UNKNOWN); 1920 1921 RegionNode* result_rgn = new (C, 3) RegionNode(3); 1922 record_for_igvn(result_rgn); 1923 1924 Node *skip_register = _gvn.transform(new (C, 1) IfFalseNode(iff)); 1925 result_rgn->init_req(1, skip_register); 1926 1927 Node *needs_register = _gvn.transform(new (C, 1) IfTrueNode(iff)); 1928 set_control(needs_register); 1929 if (stopped()) { 1930 // There is no slow path. 1931 result_rgn->init_req(2, top()); 1932 } else { 1933 Node *call = make_runtime_call(RC_NO_LEAF, 1934 OptoRuntime::register_finalizer_Type(), 1935 OptoRuntime::register_finalizer_Java(), 1936 NULL, TypePtr::BOTTOM, 1937 receiver); 1938 make_slow_call_ex(call, env()->Throwable_klass(), true); 1939 1940 Node* fast_io = call->in(TypeFunc::I_O); 1941 Node* fast_mem = call->in(TypeFunc::Memory); 1942 // These two phis are pre-filled with copies of of the fast IO and Memory 1943 Node* io_phi = PhiNode::make(result_rgn, fast_io, Type::ABIO); 1944 Node* mem_phi = PhiNode::make(result_rgn, fast_mem, Type::MEMORY, TypePtr::BOTTOM); 1945 1946 result_rgn->init_req(2, control()); 1947 io_phi ->init_req(2, i_o()); 1948 mem_phi ->init_req(2, reset_memory()); 1949 1950 set_all_memory( _gvn.transform(mem_phi) ); 1951 set_i_o( _gvn.transform(io_phi) ); 1952 } 1953 1954 set_control( _gvn.transform(result_rgn) ); 1955 } 1956 1957 //------------------------------return_current--------------------------------- 1958 // Append current _map to _exit_return 1959 void Parse::return_current(Node* value) { 1960 if (RegisterFinalizersAtInit && 1961 method()->intrinsic_id() == vmIntrinsics::_Object_init) { 1962 call_register_finalizer(); 1963 } 1964 1965 // Do not set_parse_bci, so that return goo is credited to the return insn. 1966 set_bci(InvocationEntryBci); 1967 if (method()->is_synchronized() && GenerateSynchronizationCode) { 1968 shared_unlock(_synch_lock->box_node(), _synch_lock->obj_node()); 1969 } 1970 if (C->env()->dtrace_method_probes()) { 1971 make_dtrace_method_exit(method()); 1972 } 1973 SafePointNode* exit_return = _exits.map(); 1974 exit_return->in( TypeFunc::Control )->add_req( control() ); 1975 exit_return->in( TypeFunc::I_O )->add_req( i_o () ); 1976 Node *mem = exit_return->in( TypeFunc::Memory ); 1977 for (MergeMemStream mms(mem->as_MergeMem(), merged_memory()); mms.next_non_empty2(); ) { 1978 if (mms.is_empty()) { 1979 // get a copy of the base memory, and patch just this one input 1980 const TypePtr* adr_type = mms.adr_type(C); 1981 Node* phi = mms.force_memory()->as_Phi()->slice_memory(adr_type); 1982 assert(phi->as_Phi()->region() == mms.base_memory()->in(0), ""); 1983 gvn().set_type_bottom(phi); 1984 phi->del_req(phi->req()-1); // prepare to re-patch 1985 mms.set_memory(phi); 1986 } 1987 mms.memory()->add_req(mms.memory2()); 1988 } 1989 1990 // frame pointer is always same, already captured 1991 if (value != NULL) { 1992 // If returning oops to an interface-return, there is a silent free 1993 // cast from oop to interface allowed by the Verifier. Make it explicit 1994 // here. 1995 Node* phi = _exits.argument(0); 1996 const TypeInstPtr *tr = phi->bottom_type()->isa_instptr(); 1997 if( tr && tr->klass()->is_loaded() && 1998 tr->klass()->is_interface() ) { 1999 const TypeInstPtr *tp = value->bottom_type()->isa_instptr(); 2000 if (tp && tp->klass()->is_loaded() && 2001 !tp->klass()->is_interface()) { 2002 // sharpen the type eagerly; this eases certain assert checking 2003 if (tp->higher_equal(TypeInstPtr::NOTNULL)) 2004 tr = tr->join(TypeInstPtr::NOTNULL)->is_instptr(); 2005 value = _gvn.transform(new (C, 2) CheckCastPPNode(0,value,tr)); 2006 } 2007 } 2008 phi->add_req(value); 2009 } 2010 2011 stop_and_kill_map(); // This CFG path dies here 2012 } 2013 2014 2015 //------------------------------add_safepoint---------------------------------- 2016 void Parse::add_safepoint() { 2017 // See if we can avoid this safepoint. No need for a SafePoint immediately 2018 // after a Call (except Leaf Call) or another SafePoint. 2019 Node *proj = control(); 2020 bool add_poll_param = SafePointNode::needs_polling_address_input(); 2021 uint parms = add_poll_param ? TypeFunc::Parms+1 : TypeFunc::Parms; 2022 if( proj->is_Proj() ) { 2023 Node *n0 = proj->in(0); 2024 if( n0->is_Catch() ) { 2025 n0 = n0->in(0)->in(0); 2026 assert( n0->is_Call(), "expect a call here" ); 2027 } 2028 if( n0->is_Call() ) { 2029 if( n0->as_Call()->guaranteed_safepoint() ) 2030 return; 2031 } else if( n0->is_SafePoint() && n0->req() >= parms ) { 2032 return; 2033 } 2034 } 2035 2036 // Clear out dead values from the debug info. 2037 kill_dead_locals(); 2038 2039 // Clone the JVM State 2040 SafePointNode *sfpnt = new (C, parms) SafePointNode(parms, NULL); 2041 2042 // Capture memory state BEFORE a SafePoint. Since we can block at a 2043 // SafePoint we need our GC state to be safe; i.e. we need all our current 2044 // write barriers (card marks) to not float down after the SafePoint so we 2045 // must read raw memory. Likewise we need all oop stores to match the card 2046 // marks. If deopt can happen, we need ALL stores (we need the correct JVM 2047 // state on a deopt). 2048 2049 // We do not need to WRITE the memory state after a SafePoint. The control 2050 // edge will keep card-marks and oop-stores from floating up from below a 2051 // SafePoint and our true dependency added here will keep them from floating 2052 // down below a SafePoint. 2053 2054 // Clone the current memory state 2055 Node* mem = MergeMemNode::make(C, map()->memory()); 2056 2057 mem = _gvn.transform(mem); 2058 2059 // Pass control through the safepoint 2060 sfpnt->init_req(TypeFunc::Control , control()); 2061 // Fix edges normally used by a call 2062 sfpnt->init_req(TypeFunc::I_O , top() ); 2063 sfpnt->init_req(TypeFunc::Memory , mem ); 2064 sfpnt->init_req(TypeFunc::ReturnAdr, top() ); 2065 sfpnt->init_req(TypeFunc::FramePtr , top() ); 2066 2067 // Create a node for the polling address 2068 if( add_poll_param ) { 2069 Node *polladr = ConPNode::make(C, (address)os::get_polling_page()); 2070 sfpnt->init_req(TypeFunc::Parms+0, _gvn.transform(polladr)); 2071 } 2072 2073 // Fix up the JVM State edges 2074 add_safepoint_edges(sfpnt); 2075 Node *transformed_sfpnt = _gvn.transform(sfpnt); 2076 set_control(transformed_sfpnt); 2077 2078 // Provide an edge from root to safepoint. This makes the safepoint 2079 // appear useful until the parse has completed. 2080 if( OptoRemoveUseless && transformed_sfpnt->is_SafePoint() ) { 2081 assert(C->root() != NULL, "Expect parse is still valid"); 2082 C->root()->add_prec(transformed_sfpnt); 2083 } 2084 } 2085 2086 #ifndef PRODUCT 2087 //------------------------show_parse_info-------------------------------------- 2088 void Parse::show_parse_info() { 2089 InlineTree* ilt = NULL; 2090 if (C->ilt() != NULL) { 2091 JVMState* caller_jvms = is_osr_parse() ? caller()->caller() : caller(); 2092 ilt = InlineTree::find_subtree_from_root(C->ilt(), caller_jvms, method()); 2093 } 2094 if (PrintCompilation && Verbose) { 2095 if (depth() == 1) { 2096 if( ilt->count_inlines() ) { 2097 tty->print(" __inlined %d (%d bytes)", ilt->count_inlines(), 2098 ilt->count_inline_bcs()); 2099 tty->cr(); 2100 } 2101 } else { 2102 if (method()->is_synchronized()) tty->print("s"); 2103 if (method()->has_exception_handlers()) tty->print("!"); 2104 // Check this is not the final compiled version 2105 if (C->trap_can_recompile()) { 2106 tty->print("-"); 2107 } else { 2108 tty->print(" "); 2109 } 2110 method()->print_short_name(); 2111 if (is_osr_parse()) { 2112 tty->print(" @ %d", osr_bci()); 2113 } 2114 tty->print(" (%d bytes)",method()->code_size()); 2115 if (ilt->count_inlines()) { 2116 tty->print(" __inlined %d (%d bytes)", ilt->count_inlines(), 2117 ilt->count_inline_bcs()); 2118 } 2119 tty->cr(); 2120 } 2121 } 2122 if (PrintOpto && (depth() == 1 || PrintOptoInlining)) { 2123 // Print that we succeeded; suppress this message on the first osr parse. 2124 2125 if (method()->is_synchronized()) tty->print("s"); 2126 if (method()->has_exception_handlers()) tty->print("!"); 2127 // Check this is not the final compiled version 2128 if (C->trap_can_recompile() && depth() == 1) { 2129 tty->print("-"); 2130 } else { 2131 tty->print(" "); 2132 } 2133 if( depth() != 1 ) { tty->print(" "); } // missing compile count 2134 for (int i = 1; i < depth(); ++i) { tty->print(" "); } 2135 method()->print_short_name(); 2136 if (is_osr_parse()) { 2137 tty->print(" @ %d", osr_bci()); 2138 } 2139 if (ilt->caller_bci() != -1) { 2140 tty->print(" @ %d", ilt->caller_bci()); 2141 } 2142 tty->print(" (%d bytes)",method()->code_size()); 2143 if (ilt->count_inlines()) { 2144 tty->print(" __inlined %d (%d bytes)", ilt->count_inlines(), 2145 ilt->count_inline_bcs()); 2146 } 2147 tty->cr(); 2148 } 2149 } 2150 2151 2152 //------------------------------dump------------------------------------------- 2153 // Dump information associated with the bytecodes of current _method 2154 void Parse::dump() { 2155 if( method() != NULL ) { 2156 // Iterate over bytecodes 2157 ciBytecodeStream iter(method()); 2158 for( Bytecodes::Code bc = iter.next(); bc != ciBytecodeStream::EOBC() ; bc = iter.next() ) { 2159 dump_bci( iter.cur_bci() ); 2160 tty->cr(); 2161 } 2162 } 2163 } 2164 2165 // Dump information associated with a byte code index, 'bci' 2166 void Parse::dump_bci(int bci) { 2167 // Output info on merge-points, cloning, and within _jsr..._ret 2168 // NYI 2169 tty->print(" bci:%d", bci); 2170 } 2171 2172 #endif