1 /* 2 * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "ci/bcEscapeAnalyzer.hpp" 27 #include "compiler/oopMap.hpp" 28 #include "opto/callGenerator.hpp" 29 #include "opto/callnode.hpp" 30 #include "opto/escape.hpp" 31 #include "opto/locknode.hpp" 32 #include "opto/machnode.hpp" 33 #include "opto/matcher.hpp" 34 #include "opto/parse.hpp" 35 #include "opto/regalloc.hpp" 36 #include "opto/regmask.hpp" 37 #include "opto/rootnode.hpp" 38 #include "opto/runtime.hpp" 39 40 // Portions of code courtesy of Clifford Click 41 42 // Optimization - Graph Style 43 44 //============================================================================= 45 uint StartNode::size_of() const { return sizeof(*this); } 46 uint StartNode::cmp( const Node &n ) const 47 { return _domain == ((StartNode&)n)._domain; } 48 const Type *StartNode::bottom_type() const { return _domain; } 49 const Type *StartNode::Value(PhaseTransform *phase) const { return _domain; } 50 #ifndef PRODUCT 51 void StartNode::dump_spec(outputStream *st) const { st->print(" #"); _domain->dump_on(st);} 52 #endif 53 54 //------------------------------Ideal------------------------------------------ 55 Node *StartNode::Ideal(PhaseGVN *phase, bool can_reshape){ 56 return remove_dead_region(phase, can_reshape) ? this : NULL; 57 } 58 59 //------------------------------calling_convention----------------------------- 60 void StartNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const { 61 Matcher::calling_convention( sig_bt, parm_regs, argcnt, false ); 62 } 63 64 //------------------------------Registers-------------------------------------- 65 const RegMask &StartNode::in_RegMask(uint) const { 66 return RegMask::Empty; 67 } 68 69 //------------------------------match------------------------------------------ 70 // Construct projections for incoming parameters, and their RegMask info 71 Node *StartNode::match( const ProjNode *proj, const Matcher *match ) { 72 switch (proj->_con) { 73 case TypeFunc::Control: 74 case TypeFunc::I_O: 75 case TypeFunc::Memory: 76 return new (match->C) MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj); 77 case TypeFunc::FramePtr: 78 return new (match->C) MachProjNode(this,proj->_con,Matcher::c_frame_ptr_mask, Op_RegP); 79 case TypeFunc::ReturnAdr: 80 return new (match->C) MachProjNode(this,proj->_con,match->_return_addr_mask,Op_RegP); 81 case TypeFunc::Parms: 82 default: { 83 uint parm_num = proj->_con - TypeFunc::Parms; 84 const Type *t = _domain->field_at(proj->_con); 85 if (t->base() == Type::Half) // 2nd half of Longs and Doubles 86 return new (match->C) ConNode(Type::TOP); 87 uint ideal_reg = t->ideal_reg(); 88 RegMask &rm = match->_calling_convention_mask[parm_num]; 89 return new (match->C) MachProjNode(this,proj->_con,rm,ideal_reg); 90 } 91 } 92 return NULL; 93 } 94 95 //------------------------------StartOSRNode---------------------------------- 96 // The method start node for an on stack replacement adapter 97 98 //------------------------------osr_domain----------------------------- 99 const TypeTuple *StartOSRNode::osr_domain() { 100 const Type **fields = TypeTuple::fields(2); 101 fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // address of osr buffer 102 103 return TypeTuple::make(TypeFunc::Parms+1, fields); 104 } 105 106 //============================================================================= 107 const char * const ParmNode::names[TypeFunc::Parms+1] = { 108 "Control", "I_O", "Memory", "FramePtr", "ReturnAdr", "Parms" 109 }; 110 111 #ifndef PRODUCT 112 void ParmNode::dump_spec(outputStream *st) const { 113 if( _con < TypeFunc::Parms ) { 114 st->print(names[_con]); 115 } else { 116 st->print("Parm%d: ",_con-TypeFunc::Parms); 117 // Verbose and WizardMode dump bottom_type for all nodes 118 if( !Verbose && !WizardMode ) bottom_type()->dump_on(st); 119 } 120 } 121 #endif 122 123 uint ParmNode::ideal_reg() const { 124 switch( _con ) { 125 case TypeFunc::Control : // fall through 126 case TypeFunc::I_O : // fall through 127 case TypeFunc::Memory : return 0; 128 case TypeFunc::FramePtr : // fall through 129 case TypeFunc::ReturnAdr: return Op_RegP; 130 default : assert( _con > TypeFunc::Parms, "" ); 131 // fall through 132 case TypeFunc::Parms : { 133 // Type of argument being passed 134 const Type *t = in(0)->as_Start()->_domain->field_at(_con); 135 return t->ideal_reg(); 136 } 137 } 138 ShouldNotReachHere(); 139 return 0; 140 } 141 142 //============================================================================= 143 ReturnNode::ReturnNode(uint edges, Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *retadr ) : Node(edges) { 144 init_req(TypeFunc::Control,cntrl); 145 init_req(TypeFunc::I_O,i_o); 146 init_req(TypeFunc::Memory,memory); 147 init_req(TypeFunc::FramePtr,frameptr); 148 init_req(TypeFunc::ReturnAdr,retadr); 149 } 150 151 Node *ReturnNode::Ideal(PhaseGVN *phase, bool can_reshape){ 152 return remove_dead_region(phase, can_reshape) ? this : NULL; 153 } 154 155 const Type *ReturnNode::Value( PhaseTransform *phase ) const { 156 return ( phase->type(in(TypeFunc::Control)) == Type::TOP) 157 ? Type::TOP 158 : Type::BOTTOM; 159 } 160 161 // Do we Match on this edge index or not? No edges on return nodes 162 uint ReturnNode::match_edge(uint idx) const { 163 return 0; 164 } 165 166 167 #ifndef PRODUCT 168 void ReturnNode::dump_req() const { 169 // Dump the required inputs, enclosed in '(' and ')' 170 uint i; // Exit value of loop 171 for( i=0; i<req(); i++ ) { // For all required inputs 172 if( i == TypeFunc::Parms ) tty->print("returns"); 173 if( in(i) ) tty->print("%c%d ", Compile::current()->node_arena()->contains(in(i)) ? ' ' : 'o', in(i)->_idx); 174 else tty->print("_ "); 175 } 176 } 177 #endif 178 179 //============================================================================= 180 RethrowNode::RethrowNode( 181 Node* cntrl, 182 Node* i_o, 183 Node* memory, 184 Node* frameptr, 185 Node* ret_adr, 186 Node* exception 187 ) : Node(TypeFunc::Parms + 1) { 188 init_req(TypeFunc::Control , cntrl ); 189 init_req(TypeFunc::I_O , i_o ); 190 init_req(TypeFunc::Memory , memory ); 191 init_req(TypeFunc::FramePtr , frameptr ); 192 init_req(TypeFunc::ReturnAdr, ret_adr); 193 init_req(TypeFunc::Parms , exception); 194 } 195 196 Node *RethrowNode::Ideal(PhaseGVN *phase, bool can_reshape){ 197 return remove_dead_region(phase, can_reshape) ? this : NULL; 198 } 199 200 const Type *RethrowNode::Value( PhaseTransform *phase ) const { 201 return (phase->type(in(TypeFunc::Control)) == Type::TOP) 202 ? Type::TOP 203 : Type::BOTTOM; 204 } 205 206 uint RethrowNode::match_edge(uint idx) const { 207 return 0; 208 } 209 210 #ifndef PRODUCT 211 void RethrowNode::dump_req() const { 212 // Dump the required inputs, enclosed in '(' and ')' 213 uint i; // Exit value of loop 214 for( i=0; i<req(); i++ ) { // For all required inputs 215 if( i == TypeFunc::Parms ) tty->print("exception"); 216 if( in(i) ) tty->print("%c%d ", Compile::current()->node_arena()->contains(in(i)) ? ' ' : 'o', in(i)->_idx); 217 else tty->print("_ "); 218 } 219 } 220 #endif 221 222 //============================================================================= 223 // Do we Match on this edge index or not? Match only target address & method 224 uint TailCallNode::match_edge(uint idx) const { 225 return TypeFunc::Parms <= idx && idx <= TypeFunc::Parms+1; 226 } 227 228 //============================================================================= 229 // Do we Match on this edge index or not? Match only target address & oop 230 uint TailJumpNode::match_edge(uint idx) const { 231 return TypeFunc::Parms <= idx && idx <= TypeFunc::Parms+1; 232 } 233 234 //============================================================================= 235 JVMState::JVMState(ciMethod* method, JVMState* caller) : 236 _method(method) { 237 assert(method != NULL, "must be valid call site"); 238 _reexecute = Reexecute_Undefined; 239 debug_only(_bci = -99); // random garbage value 240 debug_only(_map = (SafePointNode*)-1); 241 _caller = caller; 242 _depth = 1 + (caller == NULL ? 0 : caller->depth()); 243 _locoff = TypeFunc::Parms; 244 _stkoff = _locoff + _method->max_locals(); 245 _monoff = _stkoff + _method->max_stack(); 246 _scloff = _monoff; 247 _endoff = _monoff; 248 _sp = 0; 249 } 250 JVMState::JVMState(int stack_size) : 251 _method(NULL) { 252 _bci = InvocationEntryBci; 253 _reexecute = Reexecute_Undefined; 254 debug_only(_map = (SafePointNode*)-1); 255 _caller = NULL; 256 _depth = 1; 257 _locoff = TypeFunc::Parms; 258 _stkoff = _locoff; 259 _monoff = _stkoff + stack_size; 260 _scloff = _monoff; 261 _endoff = _monoff; 262 _sp = 0; 263 } 264 265 //--------------------------------of_depth------------------------------------- 266 JVMState* JVMState::of_depth(int d) const { 267 const JVMState* jvmp = this; 268 assert(0 < d && (uint)d <= depth(), "oob"); 269 for (int skip = depth() - d; skip > 0; skip--) { 270 jvmp = jvmp->caller(); 271 } 272 assert(jvmp->depth() == (uint)d, "found the right one"); 273 return (JVMState*)jvmp; 274 } 275 276 //-----------------------------same_calls_as----------------------------------- 277 bool JVMState::same_calls_as(const JVMState* that) const { 278 if (this == that) return true; 279 if (this->depth() != that->depth()) return false; 280 const JVMState* p = this; 281 const JVMState* q = that; 282 for (;;) { 283 if (p->_method != q->_method) return false; 284 if (p->_method == NULL) return true; // bci is irrelevant 285 if (p->_bci != q->_bci) return false; 286 if (p->_reexecute != q->_reexecute) return false; 287 p = p->caller(); 288 q = q->caller(); 289 if (p == q) return true; 290 assert(p != NULL && q != NULL, "depth check ensures we don't run off end"); 291 } 292 } 293 294 //------------------------------debug_start------------------------------------ 295 uint JVMState::debug_start() const { 296 debug_only(JVMState* jvmroot = of_depth(1)); 297 assert(jvmroot->locoff() <= this->locoff(), "youngest JVMState must be last"); 298 return of_depth(1)->locoff(); 299 } 300 301 //-------------------------------debug_end------------------------------------- 302 uint JVMState::debug_end() const { 303 debug_only(JVMState* jvmroot = of_depth(1)); 304 assert(jvmroot->endoff() <= this->endoff(), "youngest JVMState must be last"); 305 return endoff(); 306 } 307 308 //------------------------------debug_depth------------------------------------ 309 uint JVMState::debug_depth() const { 310 uint total = 0; 311 for (const JVMState* jvmp = this; jvmp != NULL; jvmp = jvmp->caller()) { 312 total += jvmp->debug_size(); 313 } 314 return total; 315 } 316 317 #ifndef PRODUCT 318 319 //------------------------------format_helper---------------------------------- 320 // Given an allocation (a Chaitin object) and a Node decide if the Node carries 321 // any defined value or not. If it does, print out the register or constant. 322 static void format_helper( PhaseRegAlloc *regalloc, outputStream* st, Node *n, const char *msg, uint i, GrowableArray<SafePointScalarObjectNode*> *scobjs ) { 323 if (n == NULL) { st->print(" NULL"); return; } 324 if (n->is_SafePointScalarObject()) { 325 // Scalar replacement. 326 SafePointScalarObjectNode* spobj = n->as_SafePointScalarObject(); 327 scobjs->append_if_missing(spobj); 328 int sco_n = scobjs->find(spobj); 329 assert(sco_n >= 0, ""); 330 st->print(" %s%d]=#ScObj" INT32_FORMAT, msg, i, sco_n); 331 return; 332 } 333 if( OptoReg::is_valid(regalloc->get_reg_first(n))) { // Check for undefined 334 char buf[50]; 335 regalloc->dump_register(n,buf); 336 st->print(" %s%d]=%s",msg,i,buf); 337 } else { // No register, but might be constant 338 const Type *t = n->bottom_type(); 339 switch (t->base()) { 340 case Type::Int: 341 st->print(" %s%d]=#"INT32_FORMAT,msg,i,t->is_int()->get_con()); 342 break; 343 case Type::AnyPtr: 344 assert( t == TypePtr::NULL_PTR, "" ); 345 st->print(" %s%d]=#NULL",msg,i); 346 break; 347 case Type::AryPtr: 348 case Type::InstPtr: 349 st->print(" %s%d]=#Ptr" INTPTR_FORMAT,msg,i,t->isa_oopptr()->const_oop()); 350 break; 351 case Type::KlassPtr: 352 st->print(" %s%d]=#Ptr" INTPTR_FORMAT,msg,i,t->make_ptr()->isa_klassptr()->klass()); 353 break; 354 case Type::MetadataPtr: 355 st->print(" %s%d]=#Ptr" INTPTR_FORMAT,msg,i,t->make_ptr()->isa_metadataptr()->metadata()); 356 break; 357 case Type::NarrowOop: 358 st->print(" %s%d]=#Ptr" INTPTR_FORMAT,msg,i,t->make_ptr()->isa_oopptr()->const_oop()); 359 break; 360 case Type::RawPtr: 361 st->print(" %s%d]=#Raw" INTPTR_FORMAT,msg,i,t->is_rawptr()); 362 break; 363 case Type::DoubleCon: 364 st->print(" %s%d]=#%fD",msg,i,t->is_double_constant()->_d); 365 break; 366 case Type::FloatCon: 367 st->print(" %s%d]=#%fF",msg,i,t->is_float_constant()->_f); 368 break; 369 case Type::Long: 370 st->print(" %s%d]=#"INT64_FORMAT,msg,i,t->is_long()->get_con()); 371 break; 372 case Type::Half: 373 case Type::Top: 374 st->print(" %s%d]=_",msg,i); 375 break; 376 default: ShouldNotReachHere(); 377 } 378 } 379 } 380 381 //------------------------------format----------------------------------------- 382 void JVMState::format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st) const { 383 st->print(" #"); 384 if( _method ) { 385 _method->print_short_name(st); 386 st->print(" @ bci:%d ",_bci); 387 } else { 388 st->print_cr(" runtime stub "); 389 return; 390 } 391 if (n->is_MachSafePoint()) { 392 GrowableArray<SafePointScalarObjectNode*> scobjs; 393 MachSafePointNode *mcall = n->as_MachSafePoint(); 394 uint i; 395 // Print locals 396 for( i = 0; i < (uint)loc_size(); i++ ) 397 format_helper( regalloc, st, mcall->local(this, i), "L[", i, &scobjs ); 398 // Print stack 399 for (i = 0; i < (uint)stk_size(); i++) { 400 if ((uint)(_stkoff + i) >= mcall->len()) 401 st->print(" oob "); 402 else 403 format_helper( regalloc, st, mcall->stack(this, i), "STK[", i, &scobjs ); 404 } 405 for (i = 0; (int)i < nof_monitors(); i++) { 406 Node *box = mcall->monitor_box(this, i); 407 Node *obj = mcall->monitor_obj(this, i); 408 if ( OptoReg::is_valid(regalloc->get_reg_first(box)) ) { 409 box = BoxLockNode::box_node(box); 410 format_helper( regalloc, st, box, "MON-BOX[", i, &scobjs ); 411 } else { 412 OptoReg::Name box_reg = BoxLockNode::reg(box); 413 st->print(" MON-BOX%d=%s+%d", 414 i, 415 OptoReg::regname(OptoReg::c_frame_pointer), 416 regalloc->reg2offset(box_reg)); 417 } 418 const char* obj_msg = "MON-OBJ["; 419 if (EliminateLocks) { 420 if (BoxLockNode::box_node(box)->is_eliminated()) 421 obj_msg = "MON-OBJ(LOCK ELIMINATED)["; 422 } 423 format_helper( regalloc, st, obj, obj_msg, i, &scobjs ); 424 } 425 426 for (i = 0; i < (uint)scobjs.length(); i++) { 427 // Scalar replaced objects. 428 st->print_cr(""); 429 st->print(" # ScObj" INT32_FORMAT " ", i); 430 SafePointScalarObjectNode* spobj = scobjs.at(i); 431 ciKlass* cik = spobj->bottom_type()->is_oopptr()->klass(); 432 assert(cik->is_instance_klass() || 433 cik->is_array_klass(), "Not supported allocation."); 434 ciInstanceKlass *iklass = NULL; 435 if (cik->is_instance_klass()) { 436 cik->print_name_on(st); 437 iklass = cik->as_instance_klass(); 438 } else if (cik->is_type_array_klass()) { 439 cik->as_array_klass()->base_element_type()->print_name_on(st); 440 st->print("[%d]", spobj->n_fields()); 441 } else if (cik->is_obj_array_klass()) { 442 ciKlass* cie = cik->as_obj_array_klass()->base_element_klass(); 443 if (cie->is_instance_klass()) { 444 cie->print_name_on(st); 445 } else if (cie->is_type_array_klass()) { 446 cie->as_array_klass()->base_element_type()->print_name_on(st); 447 } else { 448 ShouldNotReachHere(); 449 } 450 st->print("[%d]", spobj->n_fields()); 451 int ndim = cik->as_array_klass()->dimension() - 1; 452 while (ndim-- > 0) { 453 st->print("[]"); 454 } 455 } 456 st->print("={"); 457 uint nf = spobj->n_fields(); 458 if (nf > 0) { 459 uint first_ind = spobj->first_index(); 460 Node* fld_node = mcall->in(first_ind); 461 ciField* cifield; 462 if (iklass != NULL) { 463 st->print(" ["); 464 cifield = iklass->nonstatic_field_at(0); 465 cifield->print_name_on(st); 466 format_helper( regalloc, st, fld_node, ":", 0, &scobjs ); 467 } else { 468 format_helper( regalloc, st, fld_node, "[", 0, &scobjs ); 469 } 470 for (uint j = 1; j < nf; j++) { 471 fld_node = mcall->in(first_ind+j); 472 if (iklass != NULL) { 473 st->print(", ["); 474 cifield = iklass->nonstatic_field_at(j); 475 cifield->print_name_on(st); 476 format_helper( regalloc, st, fld_node, ":", j, &scobjs ); 477 } else { 478 format_helper( regalloc, st, fld_node, ", [", j, &scobjs ); 479 } 480 } 481 } 482 st->print(" }"); 483 } 484 } 485 st->print_cr(""); 486 if (caller() != NULL) caller()->format(regalloc, n, st); 487 } 488 489 490 void JVMState::dump_spec(outputStream *st) const { 491 if (_method != NULL) { 492 bool printed = false; 493 if (!Verbose) { 494 // The JVMS dumps make really, really long lines. 495 // Take out the most boring parts, which are the package prefixes. 496 char buf[500]; 497 stringStream namest(buf, sizeof(buf)); 498 _method->print_short_name(&namest); 499 if (namest.count() < sizeof(buf)) { 500 const char* name = namest.base(); 501 if (name[0] == ' ') ++name; 502 const char* endcn = strchr(name, ':'); // end of class name 503 if (endcn == NULL) endcn = strchr(name, '('); 504 if (endcn == NULL) endcn = name + strlen(name); 505 while (endcn > name && endcn[-1] != '.' && endcn[-1] != '/') 506 --endcn; 507 st->print(" %s", endcn); 508 printed = true; 509 } 510 } 511 if (!printed) 512 _method->print_short_name(st); 513 st->print(" @ bci:%d",_bci); 514 if(_reexecute == Reexecute_True) 515 st->print(" reexecute"); 516 } else { 517 st->print(" runtime stub"); 518 } 519 if (caller() != NULL) caller()->dump_spec(st); 520 } 521 522 523 void JVMState::dump_on(outputStream* st) const { 524 if (_map && !((uintptr_t)_map & 1)) { 525 if (_map->len() > _map->req()) { // _map->has_exceptions() 526 Node* ex = _map->in(_map->req()); // _map->next_exception() 527 // skip the first one; it's already being printed 528 while (ex != NULL && ex->len() > ex->req()) { 529 ex = ex->in(ex->req()); // ex->next_exception() 530 ex->dump(1); 531 } 532 } 533 _map->dump(2); 534 } 535 st->print("JVMS depth=%d loc=%d stk=%d arg=%d mon=%d scalar=%d end=%d mondepth=%d sp=%d bci=%d reexecute=%s method=", 536 depth(), locoff(), stkoff(), argoff(), monoff(), scloff(), endoff(), monitor_depth(), sp(), bci(), should_reexecute()?"true":"false"); 537 if (_method == NULL) { 538 st->print_cr("(none)"); 539 } else { 540 _method->print_name(st); 541 st->cr(); 542 if (bci() >= 0 && bci() < _method->code_size()) { 543 st->print(" bc: "); 544 _method->print_codes_on(bci(), bci()+1, st); 545 } 546 } 547 if (caller() != NULL) { 548 caller()->dump_on(st); 549 } 550 } 551 552 // Extra way to dump a jvms from the debugger, 553 // to avoid a bug with C++ member function calls. 554 void dump_jvms(JVMState* jvms) { 555 jvms->dump(); 556 } 557 #endif 558 559 //--------------------------clone_shallow-------------------------------------- 560 JVMState* JVMState::clone_shallow(Compile* C) const { 561 JVMState* n = has_method() ? new (C) JVMState(_method, _caller) : new (C) JVMState(0); 562 n->set_bci(_bci); 563 n->_reexecute = _reexecute; 564 n->set_locoff(_locoff); 565 n->set_stkoff(_stkoff); 566 n->set_monoff(_monoff); 567 n->set_scloff(_scloff); 568 n->set_endoff(_endoff); 569 n->set_sp(_sp); 570 n->set_map(_map); 571 return n; 572 } 573 574 //---------------------------clone_deep---------------------------------------- 575 JVMState* JVMState::clone_deep(Compile* C) const { 576 JVMState* n = clone_shallow(C); 577 for (JVMState* p = n; p->_caller != NULL; p = p->_caller) { 578 p->_caller = p->_caller->clone_shallow(C); 579 } 580 assert(n->depth() == depth(), "sanity"); 581 assert(n->debug_depth() == debug_depth(), "sanity"); 582 return n; 583 } 584 585 //============================================================================= 586 uint CallNode::cmp( const Node &n ) const 587 { return _tf == ((CallNode&)n)._tf && _jvms == ((CallNode&)n)._jvms; } 588 #ifndef PRODUCT 589 void CallNode::dump_req() const { 590 // Dump the required inputs, enclosed in '(' and ')' 591 uint i; // Exit value of loop 592 for( i=0; i<req(); i++ ) { // For all required inputs 593 if( i == TypeFunc::Parms ) tty->print("("); 594 if( in(i) ) tty->print("%c%d ", Compile::current()->node_arena()->contains(in(i)) ? ' ' : 'o', in(i)->_idx); 595 else tty->print("_ "); 596 } 597 tty->print(")"); 598 } 599 600 void CallNode::dump_spec(outputStream *st) const { 601 st->print(" "); 602 tf()->dump_on(st); 603 if (_cnt != COUNT_UNKNOWN) st->print(" C=%f",_cnt); 604 if (jvms() != NULL) jvms()->dump_spec(st); 605 } 606 #endif 607 608 const Type *CallNode::bottom_type() const { return tf()->range(); } 609 const Type *CallNode::Value(PhaseTransform *phase) const { 610 if (phase->type(in(0)) == Type::TOP) return Type::TOP; 611 return tf()->range(); 612 } 613 614 //------------------------------calling_convention----------------------------- 615 void CallNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const { 616 // Use the standard compiler calling convention 617 Matcher::calling_convention( sig_bt, parm_regs, argcnt, true ); 618 } 619 620 621 //------------------------------match------------------------------------------ 622 // Construct projections for control, I/O, memory-fields, ..., and 623 // return result(s) along with their RegMask info 624 Node *CallNode::match( const ProjNode *proj, const Matcher *match ) { 625 switch (proj->_con) { 626 case TypeFunc::Control: 627 case TypeFunc::I_O: 628 case TypeFunc::Memory: 629 return new (match->C) MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj); 630 631 case TypeFunc::Parms+1: // For LONG & DOUBLE returns 632 assert(tf()->_range->field_at(TypeFunc::Parms+1) == Type::HALF, ""); 633 // 2nd half of doubles and longs 634 return new (match->C) MachProjNode(this,proj->_con, RegMask::Empty, (uint)OptoReg::Bad); 635 636 case TypeFunc::Parms: { // Normal returns 637 uint ideal_reg = tf()->range()->field_at(TypeFunc::Parms)->ideal_reg(); 638 OptoRegPair regs = is_CallRuntime() 639 ? match->c_return_value(ideal_reg,true) // Calls into C runtime 640 : match-> return_value(ideal_reg,true); // Calls into compiled Java code 641 RegMask rm = RegMask(regs.first()); 642 if( OptoReg::is_valid(regs.second()) ) 643 rm.Insert( regs.second() ); 644 return new (match->C) MachProjNode(this,proj->_con,rm,ideal_reg); 645 } 646 647 case TypeFunc::ReturnAdr: 648 case TypeFunc::FramePtr: 649 default: 650 ShouldNotReachHere(); 651 } 652 return NULL; 653 } 654 655 // Do we Match on this edge index or not? Match no edges 656 uint CallNode::match_edge(uint idx) const { 657 return 0; 658 } 659 660 // 661 // Determine whether the call could modify the field of the specified 662 // instance at the specified offset. 663 // 664 bool CallNode::may_modify(const TypePtr *addr_t, PhaseTransform *phase) { 665 const TypeOopPtr *adrInst_t = addr_t->isa_oopptr(); 666 667 // If not an OopPtr or not an instance type, assume the worst. 668 // Note: currently this method is called only for instance types. 669 if (adrInst_t == NULL || !adrInst_t->is_known_instance()) { 670 return true; 671 } 672 // The instance_id is set only for scalar-replaceable allocations which 673 // are not passed as arguments according to Escape Analysis. 674 return false; 675 } 676 677 // Does this call have a direct reference to n other than debug information? 678 bool CallNode::has_non_debug_use(Node *n) { 679 const TypeTuple * d = tf()->domain(); 680 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 681 Node *arg = in(i); 682 if (arg == n) { 683 return true; 684 } 685 } 686 return false; 687 } 688 689 // Returns the unique CheckCastPP of a call 690 // or 'this' if there are several CheckCastPP 691 // or returns NULL if there is no one. 692 Node *CallNode::result_cast() { 693 Node *cast = NULL; 694 695 Node *p = proj_out(TypeFunc::Parms); 696 if (p == NULL) 697 return NULL; 698 699 for (DUIterator_Fast imax, i = p->fast_outs(imax); i < imax; i++) { 700 Node *use = p->fast_out(i); 701 if (use->is_CheckCastPP()) { 702 if (cast != NULL) { 703 return this; // more than 1 CheckCastPP 704 } 705 cast = use; 706 } 707 } 708 return cast; 709 } 710 711 712 void CallNode::extract_projections(CallProjections* projs, bool separate_io_proj) { 713 projs->fallthrough_proj = NULL; 714 projs->fallthrough_catchproj = NULL; 715 projs->fallthrough_ioproj = NULL; 716 projs->catchall_ioproj = NULL; 717 projs->catchall_catchproj = NULL; 718 projs->fallthrough_memproj = NULL; 719 projs->catchall_memproj = NULL; 720 projs->resproj = NULL; 721 projs->exobj = NULL; 722 723 for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) { 724 ProjNode *pn = fast_out(i)->as_Proj(); 725 if (pn->outcnt() == 0) continue; 726 switch (pn->_con) { 727 case TypeFunc::Control: 728 { 729 // For Control (fallthrough) and I_O (catch_all_index) we have CatchProj -> Catch -> Proj 730 projs->fallthrough_proj = pn; 731 DUIterator_Fast jmax, j = pn->fast_outs(jmax); 732 const Node *cn = pn->fast_out(j); 733 if (cn->is_Catch()) { 734 ProjNode *cpn = NULL; 735 for (DUIterator_Fast kmax, k = cn->fast_outs(kmax); k < kmax; k++) { 736 cpn = cn->fast_out(k)->as_Proj(); 737 assert(cpn->is_CatchProj(), "must be a CatchProjNode"); 738 if (cpn->_con == CatchProjNode::fall_through_index) 739 projs->fallthrough_catchproj = cpn; 740 else { 741 assert(cpn->_con == CatchProjNode::catch_all_index, "must be correct index."); 742 projs->catchall_catchproj = cpn; 743 } 744 } 745 } 746 break; 747 } 748 case TypeFunc::I_O: 749 if (pn->_is_io_use) 750 projs->catchall_ioproj = pn; 751 else 752 projs->fallthrough_ioproj = pn; 753 for (DUIterator j = pn->outs(); pn->has_out(j); j++) { 754 Node* e = pn->out(j); 755 if (e->Opcode() == Op_CreateEx && e->in(0)->is_CatchProj() && e->outcnt() > 0) { 756 assert(projs->exobj == NULL, "only one"); 757 projs->exobj = e; 758 } 759 } 760 break; 761 case TypeFunc::Memory: 762 if (pn->_is_io_use) 763 projs->catchall_memproj = pn; 764 else 765 projs->fallthrough_memproj = pn; 766 break; 767 case TypeFunc::Parms: 768 projs->resproj = pn; 769 break; 770 default: 771 assert(false, "unexpected projection from allocation node."); 772 } 773 } 774 775 // The resproj may not exist because the result couuld be ignored 776 // and the exception object may not exist if an exception handler 777 // swallows the exception but all the other must exist and be found. 778 assert(projs->fallthrough_proj != NULL, "must be found"); 779 assert(Compile::current()->inlining_incrementally() || projs->fallthrough_catchproj != NULL, "must be found"); 780 assert(Compile::current()->inlining_incrementally() || projs->fallthrough_memproj != NULL, "must be found"); 781 assert(Compile::current()->inlining_incrementally() || projs->fallthrough_ioproj != NULL, "must be found"); 782 assert(Compile::current()->inlining_incrementally() || projs->catchall_catchproj != NULL, "must be found"); 783 if (separate_io_proj) { 784 assert(Compile::current()->inlining_incrementally() || projs->catchall_memproj != NULL, "must be found"); 785 assert(Compile::current()->inlining_incrementally() || projs->catchall_ioproj != NULL, "must be found"); 786 } 787 } 788 789 Node *CallNode::Ideal(PhaseGVN *phase, bool can_reshape) { 790 if (can_reshape && _cg != NULL && _cg->is_mh_late_inline() && !_cg->already_attempted()) { 791 // Check whether this MH handle call becomes a candidate for inlining 792 ciMethod* callee = _cg->method(); 793 vmIntrinsics::ID iid = callee->intrinsic_id(); 794 if (iid == vmIntrinsics::_invokeBasic) { 795 if (in(TypeFunc::Parms)->Opcode() == Op_ConP) { 796 Compile::current()->prepend_late_inline(_cg); 797 _cg = NULL; 798 } 799 } else { 800 if (in(TypeFunc::Parms + callee->arg_size() - 1)->Opcode() == Op_ConP) { 801 Compile::current()->prepend_late_inline(_cg); 802 _cg = NULL; 803 } 804 } 805 } 806 return SafePointNode::Ideal(phase, can_reshape); 807 } 808 809 810 //============================================================================= 811 uint CallJavaNode::size_of() const { return sizeof(*this); } 812 uint CallJavaNode::cmp( const Node &n ) const { 813 CallJavaNode &call = (CallJavaNode&)n; 814 return CallNode::cmp(call) && _method == call._method; 815 } 816 #ifndef PRODUCT 817 void CallJavaNode::dump_spec(outputStream *st) const { 818 if( _method ) _method->print_short_name(st); 819 CallNode::dump_spec(st); 820 } 821 #endif 822 823 //============================================================================= 824 uint CallStaticJavaNode::size_of() const { return sizeof(*this); } 825 uint CallStaticJavaNode::cmp( const Node &n ) const { 826 CallStaticJavaNode &call = (CallStaticJavaNode&)n; 827 return CallJavaNode::cmp(call); 828 } 829 830 //----------------------------uncommon_trap_request---------------------------- 831 // If this is an uncommon trap, return the request code, else zero. 832 int CallStaticJavaNode::uncommon_trap_request() const { 833 if (_name != NULL && !strcmp(_name, "uncommon_trap")) { 834 return extract_uncommon_trap_request(this); 835 } 836 return 0; 837 } 838 int CallStaticJavaNode::extract_uncommon_trap_request(const Node* call) { 839 #ifndef PRODUCT 840 if (!(call->req() > TypeFunc::Parms && 841 call->in(TypeFunc::Parms) != NULL && 842 call->in(TypeFunc::Parms)->is_Con())) { 843 assert(_in_dump_cnt != 0, "OK if dumping"); 844 tty->print("[bad uncommon trap]"); 845 return 0; 846 } 847 #endif 848 return call->in(TypeFunc::Parms)->bottom_type()->is_int()->get_con(); 849 } 850 851 #ifndef PRODUCT 852 void CallStaticJavaNode::dump_spec(outputStream *st) const { 853 st->print("# Static "); 854 if (_name != NULL) { 855 st->print("%s", _name); 856 int trap_req = uncommon_trap_request(); 857 if (trap_req != 0) { 858 char buf[100]; 859 st->print("(%s)", 860 Deoptimization::format_trap_request(buf, sizeof(buf), 861 trap_req)); 862 } 863 st->print(" "); 864 } 865 CallJavaNode::dump_spec(st); 866 } 867 #endif 868 869 //============================================================================= 870 uint CallDynamicJavaNode::size_of() const { return sizeof(*this); } 871 uint CallDynamicJavaNode::cmp( const Node &n ) const { 872 CallDynamicJavaNode &call = (CallDynamicJavaNode&)n; 873 return CallJavaNode::cmp(call); 874 } 875 #ifndef PRODUCT 876 void CallDynamicJavaNode::dump_spec(outputStream *st) const { 877 st->print("# Dynamic "); 878 CallJavaNode::dump_spec(st); 879 } 880 #endif 881 882 //============================================================================= 883 uint CallRuntimeNode::size_of() const { return sizeof(*this); } 884 uint CallRuntimeNode::cmp( const Node &n ) const { 885 CallRuntimeNode &call = (CallRuntimeNode&)n; 886 return CallNode::cmp(call) && !strcmp(_name,call._name); 887 } 888 #ifndef PRODUCT 889 void CallRuntimeNode::dump_spec(outputStream *st) const { 890 st->print("# "); 891 st->print(_name); 892 CallNode::dump_spec(st); 893 } 894 #endif 895 896 //------------------------------calling_convention----------------------------- 897 void CallRuntimeNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const { 898 Matcher::c_calling_convention( sig_bt, parm_regs, argcnt ); 899 } 900 901 //============================================================================= 902 //------------------------------calling_convention----------------------------- 903 904 905 //============================================================================= 906 #ifndef PRODUCT 907 void CallLeafNode::dump_spec(outputStream *st) const { 908 st->print("# "); 909 st->print(_name); 910 CallNode::dump_spec(st); 911 } 912 #endif 913 914 //============================================================================= 915 916 void SafePointNode::set_local(JVMState* jvms, uint idx, Node *c) { 917 assert(verify_jvms(jvms), "jvms must match"); 918 int loc = jvms->locoff() + idx; 919 if (in(loc)->is_top() && idx > 0 && !c->is_top() ) { 920 // If current local idx is top then local idx - 1 could 921 // be a long/double that needs to be killed since top could 922 // represent the 2nd half ofthe long/double. 923 uint ideal = in(loc -1)->ideal_reg(); 924 if (ideal == Op_RegD || ideal == Op_RegL) { 925 // set other (low index) half to top 926 set_req(loc - 1, in(loc)); 927 } 928 } 929 set_req(loc, c); 930 } 931 932 uint SafePointNode::size_of() const { return sizeof(*this); } 933 uint SafePointNode::cmp( const Node &n ) const { 934 return (&n == this); // Always fail except on self 935 } 936 937 //-------------------------set_next_exception---------------------------------- 938 void SafePointNode::set_next_exception(SafePointNode* n) { 939 assert(n == NULL || n->Opcode() == Op_SafePoint, "correct value for next_exception"); 940 if (len() == req()) { 941 if (n != NULL) add_prec(n); 942 } else { 943 set_prec(req(), n); 944 } 945 } 946 947 948 //----------------------------next_exception----------------------------------- 949 SafePointNode* SafePointNode::next_exception() const { 950 if (len() == req()) { 951 return NULL; 952 } else { 953 Node* n = in(req()); 954 assert(n == NULL || n->Opcode() == Op_SafePoint, "no other uses of prec edges"); 955 return (SafePointNode*) n; 956 } 957 } 958 959 960 //------------------------------Ideal------------------------------------------ 961 // Skip over any collapsed Regions 962 Node *SafePointNode::Ideal(PhaseGVN *phase, bool can_reshape) { 963 return remove_dead_region(phase, can_reshape) ? this : NULL; 964 } 965 966 //------------------------------Identity--------------------------------------- 967 // Remove obviously duplicate safepoints 968 Node *SafePointNode::Identity( PhaseTransform *phase ) { 969 970 // If you have back to back safepoints, remove one 971 if( in(TypeFunc::Control)->is_SafePoint() ) 972 return in(TypeFunc::Control); 973 974 if( in(0)->is_Proj() ) { 975 Node *n0 = in(0)->in(0); 976 // Check if he is a call projection (except Leaf Call) 977 if( n0->is_Catch() ) { 978 n0 = n0->in(0)->in(0); 979 assert( n0->is_Call(), "expect a call here" ); 980 } 981 if( n0->is_Call() && n0->as_Call()->guaranteed_safepoint() ) { 982 // Useless Safepoint, so remove it 983 return in(TypeFunc::Control); 984 } 985 } 986 987 return this; 988 } 989 990 //------------------------------Value------------------------------------------ 991 const Type *SafePointNode::Value( PhaseTransform *phase ) const { 992 if( phase->type(in(0)) == Type::TOP ) return Type::TOP; 993 if( phase->eqv( in(0), this ) ) return Type::TOP; // Dead infinite loop 994 return Type::CONTROL; 995 } 996 997 #ifndef PRODUCT 998 void SafePointNode::dump_spec(outputStream *st) const { 999 st->print(" SafePoint "); 1000 } 1001 #endif 1002 1003 const RegMask &SafePointNode::in_RegMask(uint idx) const { 1004 if( idx < TypeFunc::Parms ) return RegMask::Empty; 1005 // Values outside the domain represent debug info 1006 return *(Compile::current()->matcher()->idealreg2debugmask[in(idx)->ideal_reg()]); 1007 } 1008 const RegMask &SafePointNode::out_RegMask() const { 1009 return RegMask::Empty; 1010 } 1011 1012 1013 void SafePointNode::grow_stack(JVMState* jvms, uint grow_by) { 1014 assert((int)grow_by > 0, "sanity"); 1015 int monoff = jvms->monoff(); 1016 int scloff = jvms->scloff(); 1017 int endoff = jvms->endoff(); 1018 assert(endoff == (int)req(), "no other states or debug info after me"); 1019 Node* top = Compile::current()->top(); 1020 for (uint i = 0; i < grow_by; i++) { 1021 ins_req(monoff, top); 1022 } 1023 jvms->set_monoff(monoff + grow_by); 1024 jvms->set_scloff(scloff + grow_by); 1025 jvms->set_endoff(endoff + grow_by); 1026 } 1027 1028 void SafePointNode::push_monitor(const FastLockNode *lock) { 1029 // Add a LockNode, which points to both the original BoxLockNode (the 1030 // stack space for the monitor) and the Object being locked. 1031 const int MonitorEdges = 2; 1032 assert(JVMState::logMonitorEdges == exact_log2(MonitorEdges), "correct MonitorEdges"); 1033 assert(req() == jvms()->endoff(), "correct sizing"); 1034 int nextmon = jvms()->scloff(); 1035 if (GenerateSynchronizationCode) { 1036 add_req(lock->box_node()); 1037 add_req(lock->obj_node()); 1038 } else { 1039 Node* top = Compile::current()->top(); 1040 add_req(top); 1041 add_req(top); 1042 } 1043 jvms()->set_scloff(nextmon+MonitorEdges); 1044 jvms()->set_endoff(req()); 1045 } 1046 1047 void SafePointNode::pop_monitor() { 1048 // Delete last monitor from debug info 1049 debug_only(int num_before_pop = jvms()->nof_monitors()); 1050 const int MonitorEdges = (1<<JVMState::logMonitorEdges); 1051 int scloff = jvms()->scloff(); 1052 int endoff = jvms()->endoff(); 1053 int new_scloff = scloff - MonitorEdges; 1054 int new_endoff = endoff - MonitorEdges; 1055 jvms()->set_scloff(new_scloff); 1056 jvms()->set_endoff(new_endoff); 1057 while (scloff > new_scloff) del_req(--scloff); 1058 assert(jvms()->nof_monitors() == num_before_pop-1, ""); 1059 } 1060 1061 Node *SafePointNode::peek_monitor_box() const { 1062 int mon = jvms()->nof_monitors() - 1; 1063 assert(mon >= 0, "most have a monitor"); 1064 return monitor_box(jvms(), mon); 1065 } 1066 1067 Node *SafePointNode::peek_monitor_obj() const { 1068 int mon = jvms()->nof_monitors() - 1; 1069 assert(mon >= 0, "most have a monitor"); 1070 return monitor_obj(jvms(), mon); 1071 } 1072 1073 // Do we Match on this edge index or not? Match no edges 1074 uint SafePointNode::match_edge(uint idx) const { 1075 if( !needs_polling_address_input() ) 1076 return 0; 1077 1078 return (TypeFunc::Parms == idx); 1079 } 1080 1081 //============== SafePointScalarObjectNode ============== 1082 1083 SafePointScalarObjectNode::SafePointScalarObjectNode(const TypeOopPtr* tp, 1084 #ifdef ASSERT 1085 AllocateNode* alloc, 1086 #endif 1087 uint first_index, 1088 uint n_fields) : 1089 TypeNode(tp, 1), // 1 control input -- seems required. Get from root. 1090 #ifdef ASSERT 1091 _alloc(alloc), 1092 #endif 1093 _first_index(first_index), 1094 _n_fields(n_fields) 1095 { 1096 init_class_id(Class_SafePointScalarObject); 1097 } 1098 1099 // Do not allow value-numbering for SafePointScalarObject node. 1100 uint SafePointScalarObjectNode::hash() const { return NO_HASH; } 1101 uint SafePointScalarObjectNode::cmp( const Node &n ) const { 1102 return (&n == this); // Always fail except on self 1103 } 1104 1105 uint SafePointScalarObjectNode::ideal_reg() const { 1106 return 0; // No matching to machine instruction 1107 } 1108 1109 const RegMask &SafePointScalarObjectNode::in_RegMask(uint idx) const { 1110 return *(Compile::current()->matcher()->idealreg2debugmask[in(idx)->ideal_reg()]); 1111 } 1112 1113 const RegMask &SafePointScalarObjectNode::out_RegMask() const { 1114 return RegMask::Empty; 1115 } 1116 1117 uint SafePointScalarObjectNode::match_edge(uint idx) const { 1118 return 0; 1119 } 1120 1121 SafePointScalarObjectNode* 1122 SafePointScalarObjectNode::clone(int jvms_adj, Dict* sosn_map) const { 1123 void* cached = (*sosn_map)[(void*)this]; 1124 if (cached != NULL) { 1125 return (SafePointScalarObjectNode*)cached; 1126 } 1127 SafePointScalarObjectNode* res = (SafePointScalarObjectNode*)Node::clone(); 1128 res->_first_index += jvms_adj; 1129 sosn_map->Insert((void*)this, (void*)res); 1130 return res; 1131 } 1132 1133 1134 #ifndef PRODUCT 1135 void SafePointScalarObjectNode::dump_spec(outputStream *st) const { 1136 st->print(" # fields@[%d..%d]", first_index(), 1137 first_index() + n_fields() - 1); 1138 } 1139 1140 #endif 1141 1142 //============================================================================= 1143 uint AllocateNode::size_of() const { return sizeof(*this); } 1144 1145 AllocateNode::AllocateNode(Compile* C, const TypeFunc *atype, 1146 Node *ctrl, Node *mem, Node *abio, 1147 Node *size, Node *klass_node, Node *initial_test) 1148 : CallNode(atype, NULL, TypeRawPtr::BOTTOM) 1149 { 1150 init_class_id(Class_Allocate); 1151 init_flags(Flag_is_macro); 1152 _is_scalar_replaceable = false; 1153 Node *topnode = C->top(); 1154 1155 init_req( TypeFunc::Control , ctrl ); 1156 init_req( TypeFunc::I_O , abio ); 1157 init_req( TypeFunc::Memory , mem ); 1158 init_req( TypeFunc::ReturnAdr, topnode ); 1159 init_req( TypeFunc::FramePtr , topnode ); 1160 init_req( AllocSize , size); 1161 init_req( KlassNode , klass_node); 1162 init_req( InitialTest , initial_test); 1163 init_req( ALength , topnode); 1164 C->add_macro_node(this); 1165 } 1166 1167 //============================================================================= 1168 uint AllocateArrayNode::size_of() const { return sizeof(*this); } 1169 1170 Node* AllocateArrayNode::Ideal(PhaseGVN *phase, bool can_reshape) { 1171 if (remove_dead_region(phase, can_reshape)) return this; 1172 // Don't bother trying to transform a dead node 1173 if (in(0) && in(0)->is_top()) return NULL; 1174 1175 const Type* type = phase->type(Ideal_length()); 1176 if (type->isa_int() && type->is_int()->_hi < 0) { 1177 if (can_reshape) { 1178 PhaseIterGVN *igvn = phase->is_IterGVN(); 1179 // Unreachable fall through path (negative array length), 1180 // the allocation can only throw so disconnect it. 1181 Node* proj = proj_out(TypeFunc::Control); 1182 Node* catchproj = NULL; 1183 if (proj != NULL) { 1184 for (DUIterator_Fast imax, i = proj->fast_outs(imax); i < imax; i++) { 1185 Node *cn = proj->fast_out(i); 1186 if (cn->is_Catch()) { 1187 catchproj = cn->as_Multi()->proj_out(CatchProjNode::fall_through_index); 1188 break; 1189 } 1190 } 1191 } 1192 if (catchproj != NULL && catchproj->outcnt() > 0 && 1193 (catchproj->outcnt() > 1 || 1194 catchproj->unique_out()->Opcode() != Op_Halt)) { 1195 assert(catchproj->is_CatchProj(), "must be a CatchProjNode"); 1196 Node* nproj = catchproj->clone(); 1197 igvn->register_new_node_with_optimizer(nproj); 1198 1199 Node *frame = new (phase->C) ParmNode( phase->C->start(), TypeFunc::FramePtr ); 1200 frame = phase->transform(frame); 1201 // Halt & Catch Fire 1202 Node *halt = new (phase->C) HaltNode( nproj, frame ); 1203 phase->C->root()->add_req(halt); 1204 phase->transform(halt); 1205 1206 igvn->replace_node(catchproj, phase->C->top()); 1207 return this; 1208 } 1209 } else { 1210 // Can't correct it during regular GVN so register for IGVN 1211 phase->C->record_for_igvn(this); 1212 } 1213 } 1214 return NULL; 1215 } 1216 1217 // Retrieve the length from the AllocateArrayNode. Narrow the type with a 1218 // CastII, if appropriate. If we are not allowed to create new nodes, and 1219 // a CastII is appropriate, return NULL. 1220 Node *AllocateArrayNode::make_ideal_length(const TypeOopPtr* oop_type, PhaseTransform *phase, bool allow_new_nodes) { 1221 Node *length = in(AllocateNode::ALength); 1222 assert(length != NULL, "length is not null"); 1223 1224 const TypeInt* length_type = phase->find_int_type(length); 1225 const TypeAryPtr* ary_type = oop_type->isa_aryptr(); 1226 1227 if (ary_type != NULL && length_type != NULL) { 1228 const TypeInt* narrow_length_type = ary_type->narrow_size_type(length_type); 1229 if (narrow_length_type != length_type) { 1230 // Assert one of: 1231 // - the narrow_length is 0 1232 // - the narrow_length is not wider than length 1233 assert(narrow_length_type == TypeInt::ZERO || 1234 (narrow_length_type->_hi <= length_type->_hi && 1235 narrow_length_type->_lo >= length_type->_lo), 1236 "narrow type must be narrower than length type"); 1237 1238 // Return NULL if new nodes are not allowed 1239 if (!allow_new_nodes) return NULL; 1240 // Create a cast which is control dependent on the initialization to 1241 // propagate the fact that the array length must be positive. 1242 length = new (phase->C) CastIINode(length, narrow_length_type); 1243 length->set_req(0, initialization()->proj_out(0)); 1244 } 1245 } 1246 1247 return length; 1248 } 1249 1250 //============================================================================= 1251 uint LockNode::size_of() const { return sizeof(*this); } 1252 1253 // Redundant lock elimination 1254 // 1255 // There are various patterns of locking where we release and 1256 // immediately reacquire a lock in a piece of code where no operations 1257 // occur in between that would be observable. In those cases we can 1258 // skip releasing and reacquiring the lock without violating any 1259 // fairness requirements. Doing this around a loop could cause a lock 1260 // to be held for a very long time so we concentrate on non-looping 1261 // control flow. We also require that the operations are fully 1262 // redundant meaning that we don't introduce new lock operations on 1263 // some paths so to be able to eliminate it on others ala PRE. This 1264 // would probably require some more extensive graph manipulation to 1265 // guarantee that the memory edges were all handled correctly. 1266 // 1267 // Assuming p is a simple predicate which can't trap in any way and s 1268 // is a synchronized method consider this code: 1269 // 1270 // s(); 1271 // if (p) 1272 // s(); 1273 // else 1274 // s(); 1275 // s(); 1276 // 1277 // 1. The unlocks of the first call to s can be eliminated if the 1278 // locks inside the then and else branches are eliminated. 1279 // 1280 // 2. The unlocks of the then and else branches can be eliminated if 1281 // the lock of the final call to s is eliminated. 1282 // 1283 // Either of these cases subsumes the simple case of sequential control flow 1284 // 1285 // Addtionally we can eliminate versions without the else case: 1286 // 1287 // s(); 1288 // if (p) 1289 // s(); 1290 // s(); 1291 // 1292 // 3. In this case we eliminate the unlock of the first s, the lock 1293 // and unlock in the then case and the lock in the final s. 1294 // 1295 // Note also that in all these cases the then/else pieces don't have 1296 // to be trivial as long as they begin and end with synchronization 1297 // operations. 1298 // 1299 // s(); 1300 // if (p) 1301 // s(); 1302 // f(); 1303 // s(); 1304 // s(); 1305 // 1306 // The code will work properly for this case, leaving in the unlock 1307 // before the call to f and the relock after it. 1308 // 1309 // A potentially interesting case which isn't handled here is when the 1310 // locking is partially redundant. 1311 // 1312 // s(); 1313 // if (p) 1314 // s(); 1315 // 1316 // This could be eliminated putting unlocking on the else case and 1317 // eliminating the first unlock and the lock in the then side. 1318 // Alternatively the unlock could be moved out of the then side so it 1319 // was after the merge and the first unlock and second lock 1320 // eliminated. This might require less manipulation of the memory 1321 // state to get correct. 1322 // 1323 // Additionally we might allow work between a unlock and lock before 1324 // giving up eliminating the locks. The current code disallows any 1325 // conditional control flow between these operations. A formulation 1326 // similar to partial redundancy elimination computing the 1327 // availability of unlocking and the anticipatability of locking at a 1328 // program point would allow detection of fully redundant locking with 1329 // some amount of work in between. I'm not sure how often I really 1330 // think that would occur though. Most of the cases I've seen 1331 // indicate it's likely non-trivial work would occur in between. 1332 // There may be other more complicated constructs where we could 1333 // eliminate locking but I haven't seen any others appear as hot or 1334 // interesting. 1335 // 1336 // Locking and unlocking have a canonical form in ideal that looks 1337 // roughly like this: 1338 // 1339 // <obj> 1340 // | \\------+ 1341 // | \ \ 1342 // | BoxLock \ 1343 // | | | \ 1344 // | | \ \ 1345 // | | FastLock 1346 // | | / 1347 // | | / 1348 // | | | 1349 // 1350 // Lock 1351 // | 1352 // Proj #0 1353 // | 1354 // MembarAcquire 1355 // | 1356 // Proj #0 1357 // 1358 // MembarRelease 1359 // | 1360 // Proj #0 1361 // | 1362 // Unlock 1363 // | 1364 // Proj #0 1365 // 1366 // 1367 // This code proceeds by processing Lock nodes during PhaseIterGVN 1368 // and searching back through its control for the proper code 1369 // patterns. Once it finds a set of lock and unlock operations to 1370 // eliminate they are marked as eliminatable which causes the 1371 // expansion of the Lock and Unlock macro nodes to make the operation a NOP 1372 // 1373 //============================================================================= 1374 1375 // 1376 // Utility function to skip over uninteresting control nodes. Nodes skipped are: 1377 // - copy regions. (These may not have been optimized away yet.) 1378 // - eliminated locking nodes 1379 // 1380 static Node *next_control(Node *ctrl) { 1381 if (ctrl == NULL) 1382 return NULL; 1383 while (1) { 1384 if (ctrl->is_Region()) { 1385 RegionNode *r = ctrl->as_Region(); 1386 Node *n = r->is_copy(); 1387 if (n == NULL) 1388 break; // hit a region, return it 1389 else 1390 ctrl = n; 1391 } else if (ctrl->is_Proj()) { 1392 Node *in0 = ctrl->in(0); 1393 if (in0->is_AbstractLock() && in0->as_AbstractLock()->is_eliminated()) { 1394 ctrl = in0->in(0); 1395 } else { 1396 break; 1397 } 1398 } else { 1399 break; // found an interesting control 1400 } 1401 } 1402 return ctrl; 1403 } 1404 // 1405 // Given a control, see if it's the control projection of an Unlock which 1406 // operating on the same object as lock. 1407 // 1408 bool AbstractLockNode::find_matching_unlock(const Node* ctrl, LockNode* lock, 1409 GrowableArray<AbstractLockNode*> &lock_ops) { 1410 ProjNode *ctrl_proj = (ctrl->is_Proj()) ? ctrl->as_Proj() : NULL; 1411 if (ctrl_proj != NULL && ctrl_proj->_con == TypeFunc::Control) { 1412 Node *n = ctrl_proj->in(0); 1413 if (n != NULL && n->is_Unlock()) { 1414 UnlockNode *unlock = n->as_Unlock(); 1415 if (lock->obj_node()->eqv_uncast(unlock->obj_node()) && 1416 BoxLockNode::same_slot(lock->box_node(), unlock->box_node()) && 1417 !unlock->is_eliminated()) { 1418 lock_ops.append(unlock); 1419 return true; 1420 } 1421 } 1422 } 1423 return false; 1424 } 1425 1426 // 1427 // Find the lock matching an unlock. Returns null if a safepoint 1428 // or complicated control is encountered first. 1429 LockNode *AbstractLockNode::find_matching_lock(UnlockNode* unlock) { 1430 LockNode *lock_result = NULL; 1431 // find the matching lock, or an intervening safepoint 1432 Node *ctrl = next_control(unlock->in(0)); 1433 while (1) { 1434 assert(ctrl != NULL, "invalid control graph"); 1435 assert(!ctrl->is_Start(), "missing lock for unlock"); 1436 if (ctrl->is_top()) break; // dead control path 1437 if (ctrl->is_Proj()) ctrl = ctrl->in(0); 1438 if (ctrl->is_SafePoint()) { 1439 break; // found a safepoint (may be the lock we are searching for) 1440 } else if (ctrl->is_Region()) { 1441 // Check for a simple diamond pattern. Punt on anything more complicated 1442 if (ctrl->req() == 3 && ctrl->in(1) != NULL && ctrl->in(2) != NULL) { 1443 Node *in1 = next_control(ctrl->in(1)); 1444 Node *in2 = next_control(ctrl->in(2)); 1445 if (((in1->is_IfTrue() && in2->is_IfFalse()) || 1446 (in2->is_IfTrue() && in1->is_IfFalse())) && (in1->in(0) == in2->in(0))) { 1447 ctrl = next_control(in1->in(0)->in(0)); 1448 } else { 1449 break; 1450 } 1451 } else { 1452 break; 1453 } 1454 } else { 1455 ctrl = next_control(ctrl->in(0)); // keep searching 1456 } 1457 } 1458 if (ctrl->is_Lock()) { 1459 LockNode *lock = ctrl->as_Lock(); 1460 if (lock->obj_node()->eqv_uncast(unlock->obj_node()) && 1461 BoxLockNode::same_slot(lock->box_node(), unlock->box_node())) { 1462 lock_result = lock; 1463 } 1464 } 1465 return lock_result; 1466 } 1467 1468 // This code corresponds to case 3 above. 1469 1470 bool AbstractLockNode::find_lock_and_unlock_through_if(Node* node, LockNode* lock, 1471 GrowableArray<AbstractLockNode*> &lock_ops) { 1472 Node* if_node = node->in(0); 1473 bool if_true = node->is_IfTrue(); 1474 1475 if (if_node->is_If() && if_node->outcnt() == 2 && (if_true || node->is_IfFalse())) { 1476 Node *lock_ctrl = next_control(if_node->in(0)); 1477 if (find_matching_unlock(lock_ctrl, lock, lock_ops)) { 1478 Node* lock1_node = NULL; 1479 ProjNode* proj = if_node->as_If()->proj_out(!if_true); 1480 if (if_true) { 1481 if (proj->is_IfFalse() && proj->outcnt() == 1) { 1482 lock1_node = proj->unique_out(); 1483 } 1484 } else { 1485 if (proj->is_IfTrue() && proj->outcnt() == 1) { 1486 lock1_node = proj->unique_out(); 1487 } 1488 } 1489 if (lock1_node != NULL && lock1_node->is_Lock()) { 1490 LockNode *lock1 = lock1_node->as_Lock(); 1491 if (lock->obj_node()->eqv_uncast(lock1->obj_node()) && 1492 BoxLockNode::same_slot(lock->box_node(), lock1->box_node()) && 1493 !lock1->is_eliminated()) { 1494 lock_ops.append(lock1); 1495 return true; 1496 } 1497 } 1498 } 1499 } 1500 1501 lock_ops.trunc_to(0); 1502 return false; 1503 } 1504 1505 bool AbstractLockNode::find_unlocks_for_region(const RegionNode* region, LockNode* lock, 1506 GrowableArray<AbstractLockNode*> &lock_ops) { 1507 // check each control merging at this point for a matching unlock. 1508 // in(0) should be self edge so skip it. 1509 for (int i = 1; i < (int)region->req(); i++) { 1510 Node *in_node = next_control(region->in(i)); 1511 if (in_node != NULL) { 1512 if (find_matching_unlock(in_node, lock, lock_ops)) { 1513 // found a match so keep on checking. 1514 continue; 1515 } else if (find_lock_and_unlock_through_if(in_node, lock, lock_ops)) { 1516 continue; 1517 } 1518 1519 // If we fall through to here then it was some kind of node we 1520 // don't understand or there wasn't a matching unlock, so give 1521 // up trying to merge locks. 1522 lock_ops.trunc_to(0); 1523 return false; 1524 } 1525 } 1526 return true; 1527 1528 } 1529 1530 #ifndef PRODUCT 1531 // 1532 // Create a counter which counts the number of times this lock is acquired 1533 // 1534 void AbstractLockNode::create_lock_counter(JVMState* state) { 1535 _counter = OptoRuntime::new_named_counter(state, NamedCounter::LockCounter); 1536 } 1537 1538 void AbstractLockNode::set_eliminated_lock_counter() { 1539 if (_counter) { 1540 // Update the counter to indicate that this lock was eliminated. 1541 // The counter update code will stay around even though the 1542 // optimizer will eliminate the lock operation itself. 1543 _counter->set_tag(NamedCounter::EliminatedLockCounter); 1544 } 1545 } 1546 #endif 1547 1548 //============================================================================= 1549 Node *LockNode::Ideal(PhaseGVN *phase, bool can_reshape) { 1550 1551 // perform any generic optimizations first (returns 'this' or NULL) 1552 Node *result = SafePointNode::Ideal(phase, can_reshape); 1553 if (result != NULL) return result; 1554 // Don't bother trying to transform a dead node 1555 if (in(0) && in(0)->is_top()) return NULL; 1556 1557 // Now see if we can optimize away this lock. We don't actually 1558 // remove the locking here, we simply set the _eliminate flag which 1559 // prevents macro expansion from expanding the lock. Since we don't 1560 // modify the graph, the value returned from this function is the 1561 // one computed above. 1562 if (can_reshape && EliminateLocks && !is_non_esc_obj()) { 1563 // 1564 // If we are locking an unescaped object, the lock/unlock is unnecessary 1565 // 1566 ConnectionGraph *cgr = phase->C->congraph(); 1567 if (cgr != NULL && cgr->not_global_escape(obj_node())) { 1568 assert(!is_eliminated() || is_coarsened(), "sanity"); 1569 // The lock could be marked eliminated by lock coarsening 1570 // code during first IGVN before EA. Replace coarsened flag 1571 // to eliminate all associated locks/unlocks. 1572 this->set_non_esc_obj(); 1573 return result; 1574 } 1575 1576 // 1577 // Try lock coarsening 1578 // 1579 PhaseIterGVN* iter = phase->is_IterGVN(); 1580 if (iter != NULL && !is_eliminated()) { 1581 1582 GrowableArray<AbstractLockNode*> lock_ops; 1583 1584 Node *ctrl = next_control(in(0)); 1585 1586 // now search back for a matching Unlock 1587 if (find_matching_unlock(ctrl, this, lock_ops)) { 1588 // found an unlock directly preceding this lock. This is the 1589 // case of single unlock directly control dependent on a 1590 // single lock which is the trivial version of case 1 or 2. 1591 } else if (ctrl->is_Region() ) { 1592 if (find_unlocks_for_region(ctrl->as_Region(), this, lock_ops)) { 1593 // found lock preceded by multiple unlocks along all paths 1594 // joining at this point which is case 3 in description above. 1595 } 1596 } else { 1597 // see if this lock comes from either half of an if and the 1598 // predecessors merges unlocks and the other half of the if 1599 // performs a lock. 1600 if (find_lock_and_unlock_through_if(ctrl, this, lock_ops)) { 1601 // found unlock splitting to an if with locks on both branches. 1602 } 1603 } 1604 1605 if (lock_ops.length() > 0) { 1606 // add ourselves to the list of locks to be eliminated. 1607 lock_ops.append(this); 1608 1609 #ifndef PRODUCT 1610 if (PrintEliminateLocks) { 1611 int locks = 0; 1612 int unlocks = 0; 1613 for (int i = 0; i < lock_ops.length(); i++) { 1614 AbstractLockNode* lock = lock_ops.at(i); 1615 if (lock->Opcode() == Op_Lock) 1616 locks++; 1617 else 1618 unlocks++; 1619 if (Verbose) { 1620 lock->dump(1); 1621 } 1622 } 1623 tty->print_cr("***Eliminated %d unlocks and %d locks", unlocks, locks); 1624 } 1625 #endif 1626 1627 // for each of the identified locks, mark them 1628 // as eliminatable 1629 for (int i = 0; i < lock_ops.length(); i++) { 1630 AbstractLockNode* lock = lock_ops.at(i); 1631 1632 // Mark it eliminated by coarsening and update any counters 1633 lock->set_coarsened(); 1634 } 1635 } else if (ctrl->is_Region() && 1636 iter->_worklist.member(ctrl)) { 1637 // We weren't able to find any opportunities but the region this 1638 // lock is control dependent on hasn't been processed yet so put 1639 // this lock back on the worklist so we can check again once any 1640 // region simplification has occurred. 1641 iter->_worklist.push(this); 1642 } 1643 } 1644 } 1645 1646 return result; 1647 } 1648 1649 //============================================================================= 1650 bool LockNode::is_nested_lock_region() { 1651 BoxLockNode* box = box_node()->as_BoxLock(); 1652 int stk_slot = box->stack_slot(); 1653 if (stk_slot <= 0) 1654 return false; // External lock or it is not Box (Phi node). 1655 1656 // Ignore complex cases: merged locks or multiple locks. 1657 Node* obj = obj_node(); 1658 LockNode* unique_lock = NULL; 1659 if (!box->is_simple_lock_region(&unique_lock, obj) || 1660 (unique_lock != this)) { 1661 return false; 1662 } 1663 1664 // Look for external lock for the same object. 1665 SafePointNode* sfn = this->as_SafePoint(); 1666 JVMState* youngest_jvms = sfn->jvms(); 1667 int max_depth = youngest_jvms->depth(); 1668 for (int depth = 1; depth <= max_depth; depth++) { 1669 JVMState* jvms = youngest_jvms->of_depth(depth); 1670 int num_mon = jvms->nof_monitors(); 1671 // Loop over monitors 1672 for (int idx = 0; idx < num_mon; idx++) { 1673 Node* obj_node = sfn->monitor_obj(jvms, idx); 1674 BoxLockNode* box_node = sfn->monitor_box(jvms, idx)->as_BoxLock(); 1675 if ((box_node->stack_slot() < stk_slot) && obj_node->eqv_uncast(obj)) { 1676 return true; 1677 } 1678 } 1679 } 1680 return false; 1681 } 1682 1683 //============================================================================= 1684 uint UnlockNode::size_of() const { return sizeof(*this); } 1685 1686 //============================================================================= 1687 Node *UnlockNode::Ideal(PhaseGVN *phase, bool can_reshape) { 1688 1689 // perform any generic optimizations first (returns 'this' or NULL) 1690 Node *result = SafePointNode::Ideal(phase, can_reshape); 1691 if (result != NULL) return result; 1692 // Don't bother trying to transform a dead node 1693 if (in(0) && in(0)->is_top()) return NULL; 1694 1695 // Now see if we can optimize away this unlock. We don't actually 1696 // remove the unlocking here, we simply set the _eliminate flag which 1697 // prevents macro expansion from expanding the unlock. Since we don't 1698 // modify the graph, the value returned from this function is the 1699 // one computed above. 1700 // Escape state is defined after Parse phase. 1701 if (can_reshape && EliminateLocks && !is_non_esc_obj()) { 1702 // 1703 // If we are unlocking an unescaped object, the lock/unlock is unnecessary. 1704 // 1705 ConnectionGraph *cgr = phase->C->congraph(); 1706 if (cgr != NULL && cgr->not_global_escape(obj_node())) { 1707 assert(!is_eliminated() || is_coarsened(), "sanity"); 1708 // The lock could be marked eliminated by lock coarsening 1709 // code during first IGVN before EA. Replace coarsened flag 1710 // to eliminate all associated locks/unlocks. 1711 this->set_non_esc_obj(); 1712 } 1713 } 1714 return result; 1715 }