1 /* 2 * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, 20 * CA 95054 USA or visit www.sun.com if you need additional information or 21 * have any questions. 22 * 23 */ 24 25 // Portions of code courtesy of Clifford Click 26 27 // Optimization - Graph Style 28 29 #include "incls/_precompiled.incl" 30 #include "incls/_callnode.cpp.incl" 31 32 //============================================================================= 33 uint StartNode::size_of() const { return sizeof(*this); } 34 uint StartNode::cmp( const Node &n ) const 35 { return _domain == ((StartNode&)n)._domain; } 36 const Type *StartNode::bottom_type() const { return _domain; } 37 const Type *StartNode::Value(PhaseTransform *phase) const { return _domain; } 38 #ifndef PRODUCT 39 void StartNode::dump_spec(outputStream *st) const { st->print(" #"); _domain->dump_on(st);} 40 #endif 41 42 //------------------------------Ideal------------------------------------------ 43 Node *StartNode::Ideal(PhaseGVN *phase, bool can_reshape){ 44 return remove_dead_region(phase, can_reshape) ? this : NULL; 45 } 46 47 //------------------------------calling_convention----------------------------- 48 void StartNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const { 49 Matcher::calling_convention( sig_bt, parm_regs, argcnt, false ); 50 } 51 52 //------------------------------Registers-------------------------------------- 53 const RegMask &StartNode::in_RegMask(uint) const { 54 return RegMask::Empty; 55 } 56 57 //------------------------------match------------------------------------------ 58 // Construct projections for incoming parameters, and their RegMask info 59 Node *StartNode::match( const ProjNode *proj, const Matcher *match ) { 60 switch (proj->_con) { 61 case TypeFunc::Control: 62 case TypeFunc::I_O: 63 case TypeFunc::Memory: 64 return new (match->C, 1) MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj); 65 case TypeFunc::FramePtr: 66 return new (match->C, 1) MachProjNode(this,proj->_con,Matcher::c_frame_ptr_mask, Op_RegP); 67 case TypeFunc::ReturnAdr: 68 return new (match->C, 1) MachProjNode(this,proj->_con,match->_return_addr_mask,Op_RegP); 69 case TypeFunc::Parms: 70 default: { 71 uint parm_num = proj->_con - TypeFunc::Parms; 72 const Type *t = _domain->field_at(proj->_con); 73 if (t->base() == Type::Half) // 2nd half of Longs and Doubles 74 return new (match->C, 1) ConNode(Type::TOP); 75 uint ideal_reg = Matcher::base2reg[t->base()]; 76 RegMask &rm = match->_calling_convention_mask[parm_num]; 77 return new (match->C, 1) MachProjNode(this,proj->_con,rm,ideal_reg); 78 } 79 } 80 return NULL; 81 } 82 83 //------------------------------StartOSRNode---------------------------------- 84 // The method start node for an on stack replacement adapter 85 86 //------------------------------osr_domain----------------------------- 87 const TypeTuple *StartOSRNode::osr_domain() { 88 const Type **fields = TypeTuple::fields(2); 89 fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // address of osr buffer 90 91 return TypeTuple::make(TypeFunc::Parms+1, fields); 92 } 93 94 //============================================================================= 95 const char * const ParmNode::names[TypeFunc::Parms+1] = { 96 "Control", "I_O", "Memory", "FramePtr", "ReturnAdr", "Parms" 97 }; 98 99 #ifndef PRODUCT 100 void ParmNode::dump_spec(outputStream *st) const { 101 if( _con < TypeFunc::Parms ) { 102 st->print(names[_con]); 103 } else { 104 st->print("Parm%d: ",_con-TypeFunc::Parms); 105 // Verbose and WizardMode dump bottom_type for all nodes 106 if( !Verbose && !WizardMode ) bottom_type()->dump_on(st); 107 } 108 } 109 #endif 110 111 uint ParmNode::ideal_reg() const { 112 switch( _con ) { 113 case TypeFunc::Control : // fall through 114 case TypeFunc::I_O : // fall through 115 case TypeFunc::Memory : return 0; 116 case TypeFunc::FramePtr : // fall through 117 case TypeFunc::ReturnAdr: return Op_RegP; 118 default : assert( _con > TypeFunc::Parms, "" ); 119 // fall through 120 case TypeFunc::Parms : { 121 // Type of argument being passed 122 const Type *t = in(0)->as_Start()->_domain->field_at(_con); 123 return Matcher::base2reg[t->base()]; 124 } 125 } 126 ShouldNotReachHere(); 127 return 0; 128 } 129 130 //============================================================================= 131 ReturnNode::ReturnNode(uint edges, Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *retadr ) : Node(edges) { 132 init_req(TypeFunc::Control,cntrl); 133 init_req(TypeFunc::I_O,i_o); 134 init_req(TypeFunc::Memory,memory); 135 init_req(TypeFunc::FramePtr,frameptr); 136 init_req(TypeFunc::ReturnAdr,retadr); 137 } 138 139 Node *ReturnNode::Ideal(PhaseGVN *phase, bool can_reshape){ 140 return remove_dead_region(phase, can_reshape) ? this : NULL; 141 } 142 143 const Type *ReturnNode::Value( PhaseTransform *phase ) const { 144 return ( phase->type(in(TypeFunc::Control)) == Type::TOP) 145 ? Type::TOP 146 : Type::BOTTOM; 147 } 148 149 // Do we Match on this edge index or not? No edges on return nodes 150 uint ReturnNode::match_edge(uint idx) const { 151 return 0; 152 } 153 154 155 #ifndef PRODUCT 156 void ReturnNode::dump_req() const { 157 // Dump the required inputs, enclosed in '(' and ')' 158 uint i; // Exit value of loop 159 for( i=0; i<req(); i++ ) { // For all required inputs 160 if( i == TypeFunc::Parms ) tty->print("returns"); 161 if( in(i) ) tty->print("%c%d ", Compile::current()->node_arena()->contains(in(i)) ? ' ' : 'o', in(i)->_idx); 162 else tty->print("_ "); 163 } 164 } 165 #endif 166 167 //============================================================================= 168 RethrowNode::RethrowNode( 169 Node* cntrl, 170 Node* i_o, 171 Node* memory, 172 Node* frameptr, 173 Node* ret_adr, 174 Node* exception 175 ) : Node(TypeFunc::Parms + 1) { 176 init_req(TypeFunc::Control , cntrl ); 177 init_req(TypeFunc::I_O , i_o ); 178 init_req(TypeFunc::Memory , memory ); 179 init_req(TypeFunc::FramePtr , frameptr ); 180 init_req(TypeFunc::ReturnAdr, ret_adr); 181 init_req(TypeFunc::Parms , exception); 182 } 183 184 Node *RethrowNode::Ideal(PhaseGVN *phase, bool can_reshape){ 185 return remove_dead_region(phase, can_reshape) ? this : NULL; 186 } 187 188 const Type *RethrowNode::Value( PhaseTransform *phase ) const { 189 return (phase->type(in(TypeFunc::Control)) == Type::TOP) 190 ? Type::TOP 191 : Type::BOTTOM; 192 } 193 194 uint RethrowNode::match_edge(uint idx) const { 195 return 0; 196 } 197 198 #ifndef PRODUCT 199 void RethrowNode::dump_req() const { 200 // Dump the required inputs, enclosed in '(' and ')' 201 uint i; // Exit value of loop 202 for( i=0; i<req(); i++ ) { // For all required inputs 203 if( i == TypeFunc::Parms ) tty->print("exception"); 204 if( in(i) ) tty->print("%c%d ", Compile::current()->node_arena()->contains(in(i)) ? ' ' : 'o', in(i)->_idx); 205 else tty->print("_ "); 206 } 207 } 208 #endif 209 210 //============================================================================= 211 // Do we Match on this edge index or not? Match only target address & method 212 uint TailCallNode::match_edge(uint idx) const { 213 return TypeFunc::Parms <= idx && idx <= TypeFunc::Parms+1; 214 } 215 216 //============================================================================= 217 // Do we Match on this edge index or not? Match only target address & oop 218 uint TailJumpNode::match_edge(uint idx) const { 219 return TypeFunc::Parms <= idx && idx <= TypeFunc::Parms+1; 220 } 221 222 //============================================================================= 223 JVMState::JVMState(ciMethod* method, JVMState* caller) { 224 assert(method != NULL, "must be valid call site"); 225 _method = method; 226 _reexecute = Reexecute_Undefined; 227 debug_only(_bci = -99); // random garbage value 228 debug_only(_map = (SafePointNode*)-1); 229 _caller = caller; 230 _depth = 1 + (caller == NULL ? 0 : caller->depth()); 231 _locoff = TypeFunc::Parms; 232 _stkoff = _locoff + _method->max_locals(); 233 _monoff = _stkoff + _method->max_stack(); 234 _scloff = _monoff; 235 _endoff = _monoff; 236 _sp = 0; 237 } 238 JVMState::JVMState(int stack_size) { 239 _method = NULL; 240 _bci = InvocationEntryBci; 241 _reexecute = Reexecute_Undefined; 242 debug_only(_map = (SafePointNode*)-1); 243 _caller = NULL; 244 _depth = 1; 245 _locoff = TypeFunc::Parms; 246 _stkoff = _locoff; 247 _monoff = _stkoff + stack_size; 248 _scloff = _monoff; 249 _endoff = _monoff; 250 _sp = 0; 251 } 252 253 //--------------------------------of_depth------------------------------------- 254 JVMState* JVMState::of_depth(int d) const { 255 const JVMState* jvmp = this; 256 assert(0 < d && (uint)d <= depth(), "oob"); 257 for (int skip = depth() - d; skip > 0; skip--) { 258 jvmp = jvmp->caller(); 259 } 260 assert(jvmp->depth() == (uint)d, "found the right one"); 261 return (JVMState*)jvmp; 262 } 263 264 //-----------------------------same_calls_as----------------------------------- 265 bool JVMState::same_calls_as(const JVMState* that) const { 266 if (this == that) return true; 267 if (this->depth() != that->depth()) return false; 268 const JVMState* p = this; 269 const JVMState* q = that; 270 for (;;) { 271 if (p->_method != q->_method) return false; 272 if (p->_method == NULL) return true; // bci is irrelevant 273 if (p->_bci != q->_bci) return false; 274 p = p->caller(); 275 q = q->caller(); 276 if (p == q) return true; 277 assert(p != NULL && q != NULL, "depth check ensures we don't run off end"); 278 } 279 } 280 281 //------------------------------debug_start------------------------------------ 282 uint JVMState::debug_start() const { 283 debug_only(JVMState* jvmroot = of_depth(1)); 284 assert(jvmroot->locoff() <= this->locoff(), "youngest JVMState must be last"); 285 return of_depth(1)->locoff(); 286 } 287 288 //-------------------------------debug_end------------------------------------- 289 uint JVMState::debug_end() const { 290 debug_only(JVMState* jvmroot = of_depth(1)); 291 assert(jvmroot->endoff() <= this->endoff(), "youngest JVMState must be last"); 292 return endoff(); 293 } 294 295 //------------------------------debug_depth------------------------------------ 296 uint JVMState::debug_depth() const { 297 uint total = 0; 298 for (const JVMState* jvmp = this; jvmp != NULL; jvmp = jvmp->caller()) { 299 total += jvmp->debug_size(); 300 } 301 return total; 302 } 303 304 #ifndef PRODUCT 305 306 //------------------------------format_helper---------------------------------- 307 // Given an allocation (a Chaitin object) and a Node decide if the Node carries 308 // any defined value or not. If it does, print out the register or constant. 309 static void format_helper( PhaseRegAlloc *regalloc, outputStream* st, Node *n, const char *msg, uint i, GrowableArray<SafePointScalarObjectNode*> *scobjs ) { 310 if (n == NULL) { st->print(" NULL"); return; } 311 if (n->is_SafePointScalarObject()) { 312 // Scalar replacement. 313 SafePointScalarObjectNode* spobj = n->as_SafePointScalarObject(); 314 scobjs->append_if_missing(spobj); 315 int sco_n = scobjs->find(spobj); 316 assert(sco_n >= 0, ""); 317 st->print(" %s%d]=#ScObj" INT32_FORMAT, msg, i, sco_n); 318 return; 319 } 320 if( OptoReg::is_valid(regalloc->get_reg_first(n))) { // Check for undefined 321 char buf[50]; 322 regalloc->dump_register(n,buf); 323 st->print(" %s%d]=%s",msg,i,buf); 324 } else { // No register, but might be constant 325 const Type *t = n->bottom_type(); 326 switch (t->base()) { 327 case Type::Int: 328 st->print(" %s%d]=#"INT32_FORMAT,msg,i,t->is_int()->get_con()); 329 break; 330 case Type::AnyPtr: 331 assert( t == TypePtr::NULL_PTR, "" ); 332 st->print(" %s%d]=#NULL",msg,i); 333 break; 334 case Type::AryPtr: 335 case Type::KlassPtr: 336 case Type::InstPtr: 337 st->print(" %s%d]=#Ptr" INTPTR_FORMAT,msg,i,t->isa_oopptr()->const_oop()); 338 break; 339 case Type::NarrowOop: 340 st->print(" %s%d]=#Ptr" INTPTR_FORMAT,msg,i,t->make_ptr()->isa_oopptr()->const_oop()); 341 break; 342 case Type::RawPtr: 343 st->print(" %s%d]=#Raw" INTPTR_FORMAT,msg,i,t->is_rawptr()); 344 break; 345 case Type::DoubleCon: 346 st->print(" %s%d]=#%fD",msg,i,t->is_double_constant()->_d); 347 break; 348 case Type::FloatCon: 349 st->print(" %s%d]=#%fF",msg,i,t->is_float_constant()->_f); 350 break; 351 case Type::Long: 352 st->print(" %s%d]=#"INT64_FORMAT,msg,i,t->is_long()->get_con()); 353 break; 354 case Type::Half: 355 case Type::Top: 356 st->print(" %s%d]=_",msg,i); 357 break; 358 default: ShouldNotReachHere(); 359 } 360 } 361 } 362 363 //------------------------------format----------------------------------------- 364 void JVMState::format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st) const { 365 st->print(" #"); 366 if( _method ) { 367 _method->print_short_name(st); 368 st->print(" @ bci:%d ",_bci); 369 } else { 370 st->print_cr(" runtime stub "); 371 return; 372 } 373 if (n->is_MachSafePoint()) { 374 GrowableArray<SafePointScalarObjectNode*> scobjs; 375 MachSafePointNode *mcall = n->as_MachSafePoint(); 376 uint i; 377 // Print locals 378 for( i = 0; i < (uint)loc_size(); i++ ) 379 format_helper( regalloc, st, mcall->local(this, i), "L[", i, &scobjs ); 380 // Print stack 381 for (i = 0; i < (uint)stk_size(); i++) { 382 if ((uint)(_stkoff + i) >= mcall->len()) 383 st->print(" oob "); 384 else 385 format_helper( regalloc, st, mcall->stack(this, i), "STK[", i, &scobjs ); 386 } 387 for (i = 0; (int)i < nof_monitors(); i++) { 388 Node *box = mcall->monitor_box(this, i); 389 Node *obj = mcall->monitor_obj(this, i); 390 if ( OptoReg::is_valid(regalloc->get_reg_first(box)) ) { 391 while( !box->is_BoxLock() ) box = box->in(1); 392 format_helper( regalloc, st, box, "MON-BOX[", i, &scobjs ); 393 } else { 394 OptoReg::Name box_reg = BoxLockNode::stack_slot(box); 395 st->print(" MON-BOX%d=%s+%d", 396 i, 397 OptoReg::regname(OptoReg::c_frame_pointer), 398 regalloc->reg2offset(box_reg)); 399 } 400 const char* obj_msg = "MON-OBJ["; 401 if (EliminateLocks) { 402 while( !box->is_BoxLock() ) box = box->in(1); 403 if (box->as_BoxLock()->is_eliminated()) 404 obj_msg = "MON-OBJ(LOCK ELIMINATED)["; 405 } 406 format_helper( regalloc, st, obj, obj_msg, i, &scobjs ); 407 } 408 409 for (i = 0; i < (uint)scobjs.length(); i++) { 410 // Scalar replaced objects. 411 st->print_cr(""); 412 st->print(" # ScObj" INT32_FORMAT " ", i); 413 SafePointScalarObjectNode* spobj = scobjs.at(i); 414 ciKlass* cik = spobj->bottom_type()->is_oopptr()->klass(); 415 assert(cik->is_instance_klass() || 416 cik->is_array_klass(), "Not supported allocation."); 417 ciInstanceKlass *iklass = NULL; 418 if (cik->is_instance_klass()) { 419 cik->print_name_on(st); 420 iklass = cik->as_instance_klass(); 421 } else if (cik->is_type_array_klass()) { 422 cik->as_array_klass()->base_element_type()->print_name_on(st); 423 st->print("[%d]=", spobj->n_fields()); 424 } else if (cik->is_obj_array_klass()) { 425 ciType* cie = cik->as_array_klass()->base_element_type(); 426 int ndim = 1; 427 while (cie->is_obj_array_klass()) { 428 ndim += 1; 429 cie = cie->as_array_klass()->base_element_type(); 430 } 431 cie->print_name_on(st); 432 while (ndim-- > 0) { 433 st->print("[]"); 434 } 435 st->print("[%d]=", spobj->n_fields()); 436 } 437 st->print("{"); 438 uint nf = spobj->n_fields(); 439 if (nf > 0) { 440 uint first_ind = spobj->first_index(); 441 Node* fld_node = mcall->in(first_ind); 442 ciField* cifield; 443 if (iklass != NULL) { 444 st->print(" ["); 445 cifield = iklass->nonstatic_field_at(0); 446 cifield->print_name_on(st); 447 format_helper( regalloc, st, fld_node, ":", 0, &scobjs ); 448 } else { 449 format_helper( regalloc, st, fld_node, "[", 0, &scobjs ); 450 } 451 for (uint j = 1; j < nf; j++) { 452 fld_node = mcall->in(first_ind+j); 453 if (iklass != NULL) { 454 st->print(", ["); 455 cifield = iklass->nonstatic_field_at(j); 456 cifield->print_name_on(st); 457 format_helper( regalloc, st, fld_node, ":", j, &scobjs ); 458 } else { 459 format_helper( regalloc, st, fld_node, ", [", j, &scobjs ); 460 } 461 } 462 } 463 st->print(" }"); 464 } 465 } 466 st->print_cr(""); 467 if (caller() != NULL) caller()->format(regalloc, n, st); 468 } 469 470 471 void JVMState::dump_spec(outputStream *st) const { 472 if (_method != NULL) { 473 bool printed = false; 474 if (!Verbose) { 475 // The JVMS dumps make really, really long lines. 476 // Take out the most boring parts, which are the package prefixes. 477 char buf[500]; 478 stringStream namest(buf, sizeof(buf)); 479 _method->print_short_name(&namest); 480 if (namest.count() < sizeof(buf)) { 481 const char* name = namest.base(); 482 if (name[0] == ' ') ++name; 483 const char* endcn = strchr(name, ':'); // end of class name 484 if (endcn == NULL) endcn = strchr(name, '('); 485 if (endcn == NULL) endcn = name + strlen(name); 486 while (endcn > name && endcn[-1] != '.' && endcn[-1] != '/') 487 --endcn; 488 st->print(" %s", endcn); 489 printed = true; 490 } 491 } 492 if (!printed) 493 _method->print_short_name(st); 494 st->print(" @ bci:%d",_bci); 495 st->print(" reexecute:%s", _reexecute==Reexecute_True?"true":"false"); 496 } else { 497 st->print(" runtime stub"); 498 } 499 if (caller() != NULL) caller()->dump_spec(st); 500 } 501 502 503 void JVMState::dump_on(outputStream* st) const { 504 if (_map && !((uintptr_t)_map & 1)) { 505 if (_map->len() > _map->req()) { // _map->has_exceptions() 506 Node* ex = _map->in(_map->req()); // _map->next_exception() 507 // skip the first one; it's already being printed 508 while (ex != NULL && ex->len() > ex->req()) { 509 ex = ex->in(ex->req()); // ex->next_exception() 510 ex->dump(1); 511 } 512 } 513 _map->dump(2); 514 } 515 st->print("JVMS depth=%d loc=%d stk=%d mon=%d scalar=%d end=%d mondepth=%d sp=%d bci=%d reexecute=%s method=", 516 depth(), locoff(), stkoff(), monoff(), scloff(), endoff(), monitor_depth(), sp(), bci(), should_reexecute()?"true":"false"); 517 if (_method == NULL) { 518 st->print_cr("(none)"); 519 } else { 520 _method->print_name(st); 521 st->cr(); 522 if (bci() >= 0 && bci() < _method->code_size()) { 523 st->print(" bc: "); 524 _method->print_codes_on(bci(), bci()+1, st); 525 } 526 } 527 if (caller() != NULL) { 528 caller()->dump_on(st); 529 } 530 } 531 532 // Extra way to dump a jvms from the debugger, 533 // to avoid a bug with C++ member function calls. 534 void dump_jvms(JVMState* jvms) { 535 jvms->dump(); 536 } 537 #endif 538 539 //--------------------------clone_shallow-------------------------------------- 540 JVMState* JVMState::clone_shallow(Compile* C) const { 541 JVMState* n = has_method() ? new (C) JVMState(_method, _caller) : new (C) JVMState(0); 542 n->set_bci(_bci); 543 n->_reexecute = _reexecute; 544 n->set_locoff(_locoff); 545 n->set_stkoff(_stkoff); 546 n->set_monoff(_monoff); 547 n->set_scloff(_scloff); 548 n->set_endoff(_endoff); 549 n->set_sp(_sp); 550 n->set_map(_map); 551 return n; 552 } 553 554 //---------------------------clone_deep---------------------------------------- 555 JVMState* JVMState::clone_deep(Compile* C) const { 556 JVMState* n = clone_shallow(C); 557 for (JVMState* p = n; p->_caller != NULL; p = p->_caller) { 558 p->_caller = p->_caller->clone_shallow(C); 559 } 560 assert(n->depth() == depth(), "sanity"); 561 assert(n->debug_depth() == debug_depth(), "sanity"); 562 return n; 563 } 564 565 //============================================================================= 566 uint CallNode::cmp( const Node &n ) const 567 { return _tf == ((CallNode&)n)._tf && _jvms == ((CallNode&)n)._jvms; } 568 #ifndef PRODUCT 569 void CallNode::dump_req() const { 570 // Dump the required inputs, enclosed in '(' and ')' 571 uint i; // Exit value of loop 572 for( i=0; i<req(); i++ ) { // For all required inputs 573 if( i == TypeFunc::Parms ) tty->print("("); 574 if( in(i) ) tty->print("%c%d ", Compile::current()->node_arena()->contains(in(i)) ? ' ' : 'o', in(i)->_idx); 575 else tty->print("_ "); 576 } 577 tty->print(")"); 578 } 579 580 void CallNode::dump_spec(outputStream *st) const { 581 st->print(" "); 582 tf()->dump_on(st); 583 if (_cnt != COUNT_UNKNOWN) st->print(" C=%f",_cnt); 584 if (jvms() != NULL) jvms()->dump_spec(st); 585 } 586 #endif 587 588 const Type *CallNode::bottom_type() const { return tf()->range(); } 589 const Type *CallNode::Value(PhaseTransform *phase) const { 590 if (phase->type(in(0)) == Type::TOP) return Type::TOP; 591 return tf()->range(); 592 } 593 594 //------------------------------calling_convention----------------------------- 595 void CallNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const { 596 // Use the standard compiler calling convention 597 Matcher::calling_convention( sig_bt, parm_regs, argcnt, true ); 598 } 599 600 601 //------------------------------match------------------------------------------ 602 // Construct projections for control, I/O, memory-fields, ..., and 603 // return result(s) along with their RegMask info 604 Node *CallNode::match( const ProjNode *proj, const Matcher *match ) { 605 switch (proj->_con) { 606 case TypeFunc::Control: 607 case TypeFunc::I_O: 608 case TypeFunc::Memory: 609 return new (match->C, 1) MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj); 610 611 case TypeFunc::Parms+1: // For LONG & DOUBLE returns 612 assert(tf()->_range->field_at(TypeFunc::Parms+1) == Type::HALF, ""); 613 // 2nd half of doubles and longs 614 return new (match->C, 1) MachProjNode(this,proj->_con, RegMask::Empty, (uint)OptoReg::Bad); 615 616 case TypeFunc::Parms: { // Normal returns 617 uint ideal_reg = Matcher::base2reg[tf()->range()->field_at(TypeFunc::Parms)->base()]; 618 OptoRegPair regs = is_CallRuntime() 619 ? match->c_return_value(ideal_reg,true) // Calls into C runtime 620 : match-> return_value(ideal_reg,true); // Calls into compiled Java code 621 RegMask rm = RegMask(regs.first()); 622 if( OptoReg::is_valid(regs.second()) ) 623 rm.Insert( regs.second() ); 624 return new (match->C, 1) MachProjNode(this,proj->_con,rm,ideal_reg); 625 } 626 627 case TypeFunc::ReturnAdr: 628 case TypeFunc::FramePtr: 629 default: 630 ShouldNotReachHere(); 631 } 632 return NULL; 633 } 634 635 // Do we Match on this edge index or not? Match no edges 636 uint CallNode::match_edge(uint idx) const { 637 return 0; 638 } 639 640 // 641 // Determine whether the call could modify the field of the specified 642 // instance at the specified offset. 643 // 644 bool CallNode::may_modify(const TypePtr *addr_t, PhaseTransform *phase) { 645 const TypeOopPtr *adrInst_t = addr_t->isa_oopptr(); 646 647 // If not an OopPtr or not an instance type, assume the worst. 648 // Note: currently this method is called only for instance types. 649 if (adrInst_t == NULL || !adrInst_t->is_known_instance()) { 650 return true; 651 } 652 // The instance_id is set only for scalar-replaceable allocations which 653 // are not passed as arguments according to Escape Analysis. 654 return false; 655 } 656 657 // Does this call have a direct reference to n other than debug information? 658 bool CallNode::has_non_debug_use(Node *n) { 659 const TypeTuple * d = tf()->domain(); 660 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 661 Node *arg = in(i); 662 if (arg == n) { 663 return true; 664 } 665 } 666 return false; 667 } 668 669 // Returns the unique CheckCastPP of a call 670 // or 'this' if there are several CheckCastPP 671 // or returns NULL if there is no one. 672 Node *CallNode::result_cast() { 673 Node *cast = NULL; 674 675 Node *p = proj_out(TypeFunc::Parms); 676 if (p == NULL) 677 return NULL; 678 679 for (DUIterator_Fast imax, i = p->fast_outs(imax); i < imax; i++) { 680 Node *use = p->fast_out(i); 681 if (use->is_CheckCastPP()) { 682 if (cast != NULL) { 683 return this; // more than 1 CheckCastPP 684 } 685 cast = use; 686 } 687 } 688 return cast; 689 } 690 691 692 //============================================================================= 693 uint CallJavaNode::size_of() const { return sizeof(*this); } 694 uint CallJavaNode::cmp( const Node &n ) const { 695 CallJavaNode &call = (CallJavaNode&)n; 696 return CallNode::cmp(call) && _method == call._method; 697 } 698 #ifndef PRODUCT 699 void CallJavaNode::dump_spec(outputStream *st) const { 700 if( _method ) _method->print_short_name(st); 701 CallNode::dump_spec(st); 702 } 703 #endif 704 705 //============================================================================= 706 uint CallStaticJavaNode::size_of() const { return sizeof(*this); } 707 uint CallStaticJavaNode::cmp( const Node &n ) const { 708 CallStaticJavaNode &call = (CallStaticJavaNode&)n; 709 return CallJavaNode::cmp(call); 710 } 711 712 //----------------------------uncommon_trap_request---------------------------- 713 // If this is an uncommon trap, return the request code, else zero. 714 int CallStaticJavaNode::uncommon_trap_request() const { 715 if (_name != NULL && !strcmp(_name, "uncommon_trap")) { 716 return extract_uncommon_trap_request(this); 717 } 718 return 0; 719 } 720 int CallStaticJavaNode::extract_uncommon_trap_request(const Node* call) { 721 #ifndef PRODUCT 722 if (!(call->req() > TypeFunc::Parms && 723 call->in(TypeFunc::Parms) != NULL && 724 call->in(TypeFunc::Parms)->is_Con())) { 725 assert(_in_dump_cnt != 0, "OK if dumping"); 726 tty->print("[bad uncommon trap]"); 727 return 0; 728 } 729 #endif 730 return call->in(TypeFunc::Parms)->bottom_type()->is_int()->get_con(); 731 } 732 733 #ifndef PRODUCT 734 void CallStaticJavaNode::dump_spec(outputStream *st) const { 735 st->print("# Static "); 736 if (_name != NULL) { 737 st->print("%s", _name); 738 int trap_req = uncommon_trap_request(); 739 if (trap_req != 0) { 740 char buf[100]; 741 st->print("(%s)", 742 Deoptimization::format_trap_request(buf, sizeof(buf), 743 trap_req)); 744 } 745 st->print(" "); 746 } 747 CallJavaNode::dump_spec(st); 748 } 749 #endif 750 751 //============================================================================= 752 uint CallDynamicJavaNode::size_of() const { return sizeof(*this); } 753 uint CallDynamicJavaNode::cmp( const Node &n ) const { 754 CallDynamicJavaNode &call = (CallDynamicJavaNode&)n; 755 return CallJavaNode::cmp(call); 756 } 757 #ifndef PRODUCT 758 void CallDynamicJavaNode::dump_spec(outputStream *st) const { 759 st->print("# Dynamic "); 760 CallJavaNode::dump_spec(st); 761 } 762 #endif 763 764 //============================================================================= 765 uint CallRuntimeNode::size_of() const { return sizeof(*this); } 766 uint CallRuntimeNode::cmp( const Node &n ) const { 767 CallRuntimeNode &call = (CallRuntimeNode&)n; 768 return CallNode::cmp(call) && !strcmp(_name,call._name); 769 } 770 #ifndef PRODUCT 771 void CallRuntimeNode::dump_spec(outputStream *st) const { 772 st->print("# "); 773 st->print(_name); 774 CallNode::dump_spec(st); 775 } 776 #endif 777 778 //------------------------------calling_convention----------------------------- 779 void CallRuntimeNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const { 780 Matcher::c_calling_convention( sig_bt, parm_regs, argcnt ); 781 } 782 783 //============================================================================= 784 //------------------------------calling_convention----------------------------- 785 786 787 //============================================================================= 788 #ifndef PRODUCT 789 void CallLeafNode::dump_spec(outputStream *st) const { 790 st->print("# "); 791 st->print(_name); 792 CallNode::dump_spec(st); 793 } 794 #endif 795 796 //============================================================================= 797 798 void SafePointNode::set_local(JVMState* jvms, uint idx, Node *c) { 799 assert(verify_jvms(jvms), "jvms must match"); 800 int loc = jvms->locoff() + idx; 801 if (in(loc)->is_top() && idx > 0 && !c->is_top() ) { 802 // If current local idx is top then local idx - 1 could 803 // be a long/double that needs to be killed since top could 804 // represent the 2nd half ofthe long/double. 805 uint ideal = in(loc -1)->ideal_reg(); 806 if (ideal == Op_RegD || ideal == Op_RegL) { 807 // set other (low index) half to top 808 set_req(loc - 1, in(loc)); 809 } 810 } 811 set_req(loc, c); 812 } 813 814 uint SafePointNode::size_of() const { return sizeof(*this); } 815 uint SafePointNode::cmp( const Node &n ) const { 816 return (&n == this); // Always fail except on self 817 } 818 819 //-------------------------set_next_exception---------------------------------- 820 void SafePointNode::set_next_exception(SafePointNode* n) { 821 assert(n == NULL || n->Opcode() == Op_SafePoint, "correct value for next_exception"); 822 if (len() == req()) { 823 if (n != NULL) add_prec(n); 824 } else { 825 set_prec(req(), n); 826 } 827 } 828 829 830 //----------------------------next_exception----------------------------------- 831 SafePointNode* SafePointNode::next_exception() const { 832 if (len() == req()) { 833 return NULL; 834 } else { 835 Node* n = in(req()); 836 assert(n == NULL || n->Opcode() == Op_SafePoint, "no other uses of prec edges"); 837 return (SafePointNode*) n; 838 } 839 } 840 841 842 //------------------------------Ideal------------------------------------------ 843 // Skip over any collapsed Regions 844 Node *SafePointNode::Ideal(PhaseGVN *phase, bool can_reshape) { 845 return remove_dead_region(phase, can_reshape) ? this : NULL; 846 } 847 848 //------------------------------Identity--------------------------------------- 849 // Remove obviously duplicate safepoints 850 Node *SafePointNode::Identity( PhaseTransform *phase ) { 851 852 // If you have back to back safepoints, remove one 853 if( in(TypeFunc::Control)->is_SafePoint() ) 854 return in(TypeFunc::Control); 855 856 if( in(0)->is_Proj() ) { 857 Node *n0 = in(0)->in(0); 858 // Check if he is a call projection (except Leaf Call) 859 if( n0->is_Catch() ) { 860 n0 = n0->in(0)->in(0); 861 assert( n0->is_Call(), "expect a call here" ); 862 } 863 if( n0->is_Call() && n0->as_Call()->guaranteed_safepoint() ) { 864 // Useless Safepoint, so remove it 865 return in(TypeFunc::Control); 866 } 867 } 868 869 return this; 870 } 871 872 //------------------------------Value------------------------------------------ 873 const Type *SafePointNode::Value( PhaseTransform *phase ) const { 874 if( phase->type(in(0)) == Type::TOP ) return Type::TOP; 875 if( phase->eqv( in(0), this ) ) return Type::TOP; // Dead infinite loop 876 return Type::CONTROL; 877 } 878 879 #ifndef PRODUCT 880 void SafePointNode::dump_spec(outputStream *st) const { 881 st->print(" SafePoint "); 882 } 883 #endif 884 885 const RegMask &SafePointNode::in_RegMask(uint idx) const { 886 if( idx < TypeFunc::Parms ) return RegMask::Empty; 887 // Values outside the domain represent debug info 888 return *(Compile::current()->matcher()->idealreg2debugmask[in(idx)->ideal_reg()]); 889 } 890 const RegMask &SafePointNode::out_RegMask() const { 891 return RegMask::Empty; 892 } 893 894 895 void SafePointNode::grow_stack(JVMState* jvms, uint grow_by) { 896 assert((int)grow_by > 0, "sanity"); 897 int monoff = jvms->monoff(); 898 int scloff = jvms->scloff(); 899 int endoff = jvms->endoff(); 900 assert(endoff == (int)req(), "no other states or debug info after me"); 901 Node* top = Compile::current()->top(); 902 for (uint i = 0; i < grow_by; i++) { 903 ins_req(monoff, top); 904 } 905 jvms->set_monoff(monoff + grow_by); 906 jvms->set_scloff(scloff + grow_by); 907 jvms->set_endoff(endoff + grow_by); 908 } 909 910 void SafePointNode::push_monitor(const FastLockNode *lock) { 911 // Add a LockNode, which points to both the original BoxLockNode (the 912 // stack space for the monitor) and the Object being locked. 913 const int MonitorEdges = 2; 914 assert(JVMState::logMonitorEdges == exact_log2(MonitorEdges), "correct MonitorEdges"); 915 assert(req() == jvms()->endoff(), "correct sizing"); 916 int nextmon = jvms()->scloff(); 917 if (GenerateSynchronizationCode) { 918 add_req(lock->box_node()); 919 add_req(lock->obj_node()); 920 } else { 921 Node* top = Compile::current()->top(); 922 add_req(top); 923 add_req(top); 924 } 925 jvms()->set_scloff(nextmon+MonitorEdges); 926 jvms()->set_endoff(req()); 927 } 928 929 void SafePointNode::pop_monitor() { 930 // Delete last monitor from debug info 931 debug_only(int num_before_pop = jvms()->nof_monitors()); 932 const int MonitorEdges = (1<<JVMState::logMonitorEdges); 933 int scloff = jvms()->scloff(); 934 int endoff = jvms()->endoff(); 935 int new_scloff = scloff - MonitorEdges; 936 int new_endoff = endoff - MonitorEdges; 937 jvms()->set_scloff(new_scloff); 938 jvms()->set_endoff(new_endoff); 939 while (scloff > new_scloff) del_req(--scloff); 940 assert(jvms()->nof_monitors() == num_before_pop-1, ""); 941 } 942 943 Node *SafePointNode::peek_monitor_box() const { 944 int mon = jvms()->nof_monitors() - 1; 945 assert(mon >= 0, "most have a monitor"); 946 return monitor_box(jvms(), mon); 947 } 948 949 Node *SafePointNode::peek_monitor_obj() const { 950 int mon = jvms()->nof_monitors() - 1; 951 assert(mon >= 0, "most have a monitor"); 952 return monitor_obj(jvms(), mon); 953 } 954 955 // Do we Match on this edge index or not? Match no edges 956 uint SafePointNode::match_edge(uint idx) const { 957 if( !needs_polling_address_input() ) 958 return 0; 959 960 return (TypeFunc::Parms == idx); 961 } 962 963 //============== SafePointScalarObjectNode ============== 964 965 SafePointScalarObjectNode::SafePointScalarObjectNode(const TypeOopPtr* tp, 966 #ifdef ASSERT 967 AllocateNode* alloc, 968 #endif 969 uint first_index, 970 uint n_fields) : 971 TypeNode(tp, 1), // 1 control input -- seems required. Get from root. 972 #ifdef ASSERT 973 _alloc(alloc), 974 #endif 975 _first_index(first_index), 976 _n_fields(n_fields) 977 { 978 init_class_id(Class_SafePointScalarObject); 979 } 980 981 bool SafePointScalarObjectNode::pinned() const { return true; } 982 bool SafePointScalarObjectNode::depends_only_on_test() const { return false; } 983 984 uint SafePointScalarObjectNode::ideal_reg() const { 985 return 0; // No matching to machine instruction 986 } 987 988 const RegMask &SafePointScalarObjectNode::in_RegMask(uint idx) const { 989 return *(Compile::current()->matcher()->idealreg2debugmask[in(idx)->ideal_reg()]); 990 } 991 992 const RegMask &SafePointScalarObjectNode::out_RegMask() const { 993 return RegMask::Empty; 994 } 995 996 uint SafePointScalarObjectNode::match_edge(uint idx) const { 997 return 0; 998 } 999 1000 SafePointScalarObjectNode* 1001 SafePointScalarObjectNode::clone(int jvms_adj, Dict* sosn_map) const { 1002 void* cached = (*sosn_map)[(void*)this]; 1003 if (cached != NULL) { 1004 return (SafePointScalarObjectNode*)cached; 1005 } 1006 Compile* C = Compile::current(); 1007 SafePointScalarObjectNode* res = (SafePointScalarObjectNode*)Node::clone(); 1008 res->_first_index += jvms_adj; 1009 sosn_map->Insert((void*)this, (void*)res); 1010 return res; 1011 } 1012 1013 1014 #ifndef PRODUCT 1015 void SafePointScalarObjectNode::dump_spec(outputStream *st) const { 1016 st->print(" # fields@[%d..%d]", first_index(), 1017 first_index() + n_fields() - 1); 1018 } 1019 1020 #endif 1021 1022 //============================================================================= 1023 uint AllocateNode::size_of() const { return sizeof(*this); } 1024 1025 AllocateNode::AllocateNode(Compile* C, const TypeFunc *atype, 1026 Node *ctrl, Node *mem, Node *abio, 1027 Node *size, Node *klass_node, Node *initial_test) 1028 : CallNode(atype, NULL, TypeRawPtr::BOTTOM) 1029 { 1030 init_class_id(Class_Allocate); 1031 init_flags(Flag_is_macro); 1032 _is_scalar_replaceable = false; 1033 Node *topnode = C->top(); 1034 1035 init_req( TypeFunc::Control , ctrl ); 1036 init_req( TypeFunc::I_O , abio ); 1037 init_req( TypeFunc::Memory , mem ); 1038 init_req( TypeFunc::ReturnAdr, topnode ); 1039 init_req( TypeFunc::FramePtr , topnode ); 1040 init_req( AllocSize , size); 1041 init_req( KlassNode , klass_node); 1042 init_req( InitialTest , initial_test); 1043 init_req( ALength , topnode); 1044 C->add_macro_node(this); 1045 } 1046 1047 //============================================================================= 1048 uint AllocateArrayNode::size_of() const { return sizeof(*this); } 1049 1050 Node* AllocateArrayNode::Ideal(PhaseGVN *phase, bool can_reshape) { 1051 if (remove_dead_region(phase, can_reshape)) return this; 1052 1053 const Type* type = phase->type(Ideal_length()); 1054 if (type->isa_int() && type->is_int()->_hi < 0) { 1055 if (can_reshape) { 1056 PhaseIterGVN *igvn = phase->is_IterGVN(); 1057 // Unreachable fall through path (negative array length), 1058 // the allocation can only throw so disconnect it. 1059 Node* proj = proj_out(TypeFunc::Control); 1060 Node* catchproj = NULL; 1061 if (proj != NULL) { 1062 for (DUIterator_Fast imax, i = proj->fast_outs(imax); i < imax; i++) { 1063 Node *cn = proj->fast_out(i); 1064 if (cn->is_Catch()) { 1065 catchproj = cn->as_Multi()->proj_out(CatchProjNode::fall_through_index); 1066 break; 1067 } 1068 } 1069 } 1070 if (catchproj != NULL && catchproj->outcnt() > 0 && 1071 (catchproj->outcnt() > 1 || 1072 catchproj->unique_out()->Opcode() != Op_Halt)) { 1073 assert(catchproj->is_CatchProj(), "must be a CatchProjNode"); 1074 Node* nproj = catchproj->clone(); 1075 igvn->register_new_node_with_optimizer(nproj); 1076 1077 Node *frame = new (phase->C, 1) ParmNode( phase->C->start(), TypeFunc::FramePtr ); 1078 frame = phase->transform(frame); 1079 // Halt & Catch Fire 1080 Node *halt = new (phase->C, TypeFunc::Parms) HaltNode( nproj, frame ); 1081 phase->C->root()->add_req(halt); 1082 phase->transform(halt); 1083 1084 igvn->replace_node(catchproj, phase->C->top()); 1085 return this; 1086 } 1087 } else { 1088 // Can't correct it during regular GVN so register for IGVN 1089 phase->C->record_for_igvn(this); 1090 } 1091 } 1092 return NULL; 1093 } 1094 1095 // Retrieve the length from the AllocateArrayNode. Narrow the type with a 1096 // CastII, if appropriate. If we are not allowed to create new nodes, and 1097 // a CastII is appropriate, return NULL. 1098 Node *AllocateArrayNode::make_ideal_length(const TypeOopPtr* oop_type, PhaseTransform *phase, bool allow_new_nodes) { 1099 Node *length = in(AllocateNode::ALength); 1100 assert(length != NULL, "length is not null"); 1101 1102 const TypeInt* length_type = phase->find_int_type(length); 1103 const TypeAryPtr* ary_type = oop_type->isa_aryptr(); 1104 1105 if (ary_type != NULL && length_type != NULL) { 1106 const TypeInt* narrow_length_type = ary_type->narrow_size_type(length_type); 1107 if (narrow_length_type != length_type) { 1108 // Assert one of: 1109 // - the narrow_length is 0 1110 // - the narrow_length is not wider than length 1111 assert(narrow_length_type == TypeInt::ZERO || 1112 (narrow_length_type->_hi <= length_type->_hi && 1113 narrow_length_type->_lo >= length_type->_lo), 1114 "narrow type must be narrower than length type"); 1115 1116 // Return NULL if new nodes are not allowed 1117 if (!allow_new_nodes) return NULL; 1118 // Create a cast which is control dependent on the initialization to 1119 // propagate the fact that the array length must be positive. 1120 length = new (phase->C, 2) CastIINode(length, narrow_length_type); 1121 length->set_req(0, initialization()->proj_out(0)); 1122 } 1123 } 1124 1125 return length; 1126 } 1127 1128 //============================================================================= 1129 uint LockNode::size_of() const { return sizeof(*this); } 1130 1131 // Redundant lock elimination 1132 // 1133 // There are various patterns of locking where we release and 1134 // immediately reacquire a lock in a piece of code where no operations 1135 // occur in between that would be observable. In those cases we can 1136 // skip releasing and reacquiring the lock without violating any 1137 // fairness requirements. Doing this around a loop could cause a lock 1138 // to be held for a very long time so we concentrate on non-looping 1139 // control flow. We also require that the operations are fully 1140 // redundant meaning that we don't introduce new lock operations on 1141 // some paths so to be able to eliminate it on others ala PRE. This 1142 // would probably require some more extensive graph manipulation to 1143 // guarantee that the memory edges were all handled correctly. 1144 // 1145 // Assuming p is a simple predicate which can't trap in any way and s 1146 // is a synchronized method consider this code: 1147 // 1148 // s(); 1149 // if (p) 1150 // s(); 1151 // else 1152 // s(); 1153 // s(); 1154 // 1155 // 1. The unlocks of the first call to s can be eliminated if the 1156 // locks inside the then and else branches are eliminated. 1157 // 1158 // 2. The unlocks of the then and else branches can be eliminated if 1159 // the lock of the final call to s is eliminated. 1160 // 1161 // Either of these cases subsumes the simple case of sequential control flow 1162 // 1163 // Addtionally we can eliminate versions without the else case: 1164 // 1165 // s(); 1166 // if (p) 1167 // s(); 1168 // s(); 1169 // 1170 // 3. In this case we eliminate the unlock of the first s, the lock 1171 // and unlock in the then case and the lock in the final s. 1172 // 1173 // Note also that in all these cases the then/else pieces don't have 1174 // to be trivial as long as they begin and end with synchronization 1175 // operations. 1176 // 1177 // s(); 1178 // if (p) 1179 // s(); 1180 // f(); 1181 // s(); 1182 // s(); 1183 // 1184 // The code will work properly for this case, leaving in the unlock 1185 // before the call to f and the relock after it. 1186 // 1187 // A potentially interesting case which isn't handled here is when the 1188 // locking is partially redundant. 1189 // 1190 // s(); 1191 // if (p) 1192 // s(); 1193 // 1194 // This could be eliminated putting unlocking on the else case and 1195 // eliminating the first unlock and the lock in the then side. 1196 // Alternatively the unlock could be moved out of the then side so it 1197 // was after the merge and the first unlock and second lock 1198 // eliminated. This might require less manipulation of the memory 1199 // state to get correct. 1200 // 1201 // Additionally we might allow work between a unlock and lock before 1202 // giving up eliminating the locks. The current code disallows any 1203 // conditional control flow between these operations. A formulation 1204 // similar to partial redundancy elimination computing the 1205 // availability of unlocking and the anticipatability of locking at a 1206 // program point would allow detection of fully redundant locking with 1207 // some amount of work in between. I'm not sure how often I really 1208 // think that would occur though. Most of the cases I've seen 1209 // indicate it's likely non-trivial work would occur in between. 1210 // There may be other more complicated constructs where we could 1211 // eliminate locking but I haven't seen any others appear as hot or 1212 // interesting. 1213 // 1214 // Locking and unlocking have a canonical form in ideal that looks 1215 // roughly like this: 1216 // 1217 // <obj> 1218 // | \\------+ 1219 // | \ \ 1220 // | BoxLock \ 1221 // | | | \ 1222 // | | \ \ 1223 // | | FastLock 1224 // | | / 1225 // | | / 1226 // | | | 1227 // 1228 // Lock 1229 // | 1230 // Proj #0 1231 // | 1232 // MembarAcquire 1233 // | 1234 // Proj #0 1235 // 1236 // MembarRelease 1237 // | 1238 // Proj #0 1239 // | 1240 // Unlock 1241 // | 1242 // Proj #0 1243 // 1244 // 1245 // This code proceeds by processing Lock nodes during PhaseIterGVN 1246 // and searching back through its control for the proper code 1247 // patterns. Once it finds a set of lock and unlock operations to 1248 // eliminate they are marked as eliminatable which causes the 1249 // expansion of the Lock and Unlock macro nodes to make the operation a NOP 1250 // 1251 //============================================================================= 1252 1253 // 1254 // Utility function to skip over uninteresting control nodes. Nodes skipped are: 1255 // - copy regions. (These may not have been optimized away yet.) 1256 // - eliminated locking nodes 1257 // 1258 static Node *next_control(Node *ctrl) { 1259 if (ctrl == NULL) 1260 return NULL; 1261 while (1) { 1262 if (ctrl->is_Region()) { 1263 RegionNode *r = ctrl->as_Region(); 1264 Node *n = r->is_copy(); 1265 if (n == NULL) 1266 break; // hit a region, return it 1267 else 1268 ctrl = n; 1269 } else if (ctrl->is_Proj()) { 1270 Node *in0 = ctrl->in(0); 1271 if (in0->is_AbstractLock() && in0->as_AbstractLock()->is_eliminated()) { 1272 ctrl = in0->in(0); 1273 } else { 1274 break; 1275 } 1276 } else { 1277 break; // found an interesting control 1278 } 1279 } 1280 return ctrl; 1281 } 1282 // 1283 // Given a control, see if it's the control projection of an Unlock which 1284 // operating on the same object as lock. 1285 // 1286 bool AbstractLockNode::find_matching_unlock(const Node* ctrl, LockNode* lock, 1287 GrowableArray<AbstractLockNode*> &lock_ops) { 1288 ProjNode *ctrl_proj = (ctrl->is_Proj()) ? ctrl->as_Proj() : NULL; 1289 if (ctrl_proj != NULL && ctrl_proj->_con == TypeFunc::Control) { 1290 Node *n = ctrl_proj->in(0); 1291 if (n != NULL && n->is_Unlock()) { 1292 UnlockNode *unlock = n->as_Unlock(); 1293 if ((lock->obj_node() == unlock->obj_node()) && 1294 (lock->box_node() == unlock->box_node()) && !unlock->is_eliminated()) { 1295 lock_ops.append(unlock); 1296 return true; 1297 } 1298 } 1299 } 1300 return false; 1301 } 1302 1303 // 1304 // Find the lock matching an unlock. Returns null if a safepoint 1305 // or complicated control is encountered first. 1306 LockNode *AbstractLockNode::find_matching_lock(UnlockNode* unlock) { 1307 LockNode *lock_result = NULL; 1308 // find the matching lock, or an intervening safepoint 1309 Node *ctrl = next_control(unlock->in(0)); 1310 while (1) { 1311 assert(ctrl != NULL, "invalid control graph"); 1312 assert(!ctrl->is_Start(), "missing lock for unlock"); 1313 if (ctrl->is_top()) break; // dead control path 1314 if (ctrl->is_Proj()) ctrl = ctrl->in(0); 1315 if (ctrl->is_SafePoint()) { 1316 break; // found a safepoint (may be the lock we are searching for) 1317 } else if (ctrl->is_Region()) { 1318 // Check for a simple diamond pattern. Punt on anything more complicated 1319 if (ctrl->req() == 3 && ctrl->in(1) != NULL && ctrl->in(2) != NULL) { 1320 Node *in1 = next_control(ctrl->in(1)); 1321 Node *in2 = next_control(ctrl->in(2)); 1322 if (((in1->is_IfTrue() && in2->is_IfFalse()) || 1323 (in2->is_IfTrue() && in1->is_IfFalse())) && (in1->in(0) == in2->in(0))) { 1324 ctrl = next_control(in1->in(0)->in(0)); 1325 } else { 1326 break; 1327 } 1328 } else { 1329 break; 1330 } 1331 } else { 1332 ctrl = next_control(ctrl->in(0)); // keep searching 1333 } 1334 } 1335 if (ctrl->is_Lock()) { 1336 LockNode *lock = ctrl->as_Lock(); 1337 if ((lock->obj_node() == unlock->obj_node()) && 1338 (lock->box_node() == unlock->box_node())) { 1339 lock_result = lock; 1340 } 1341 } 1342 return lock_result; 1343 } 1344 1345 // This code corresponds to case 3 above. 1346 1347 bool AbstractLockNode::find_lock_and_unlock_through_if(Node* node, LockNode* lock, 1348 GrowableArray<AbstractLockNode*> &lock_ops) { 1349 Node* if_node = node->in(0); 1350 bool if_true = node->is_IfTrue(); 1351 1352 if (if_node->is_If() && if_node->outcnt() == 2 && (if_true || node->is_IfFalse())) { 1353 Node *lock_ctrl = next_control(if_node->in(0)); 1354 if (find_matching_unlock(lock_ctrl, lock, lock_ops)) { 1355 Node* lock1_node = NULL; 1356 ProjNode* proj = if_node->as_If()->proj_out(!if_true); 1357 if (if_true) { 1358 if (proj->is_IfFalse() && proj->outcnt() == 1) { 1359 lock1_node = proj->unique_out(); 1360 } 1361 } else { 1362 if (proj->is_IfTrue() && proj->outcnt() == 1) { 1363 lock1_node = proj->unique_out(); 1364 } 1365 } 1366 if (lock1_node != NULL && lock1_node->is_Lock()) { 1367 LockNode *lock1 = lock1_node->as_Lock(); 1368 if ((lock->obj_node() == lock1->obj_node()) && 1369 (lock->box_node() == lock1->box_node()) && !lock1->is_eliminated()) { 1370 lock_ops.append(lock1); 1371 return true; 1372 } 1373 } 1374 } 1375 } 1376 1377 lock_ops.trunc_to(0); 1378 return false; 1379 } 1380 1381 bool AbstractLockNode::find_unlocks_for_region(const RegionNode* region, LockNode* lock, 1382 GrowableArray<AbstractLockNode*> &lock_ops) { 1383 // check each control merging at this point for a matching unlock. 1384 // in(0) should be self edge so skip it. 1385 for (int i = 1; i < (int)region->req(); i++) { 1386 Node *in_node = next_control(region->in(i)); 1387 if (in_node != NULL) { 1388 if (find_matching_unlock(in_node, lock, lock_ops)) { 1389 // found a match so keep on checking. 1390 continue; 1391 } else if (find_lock_and_unlock_through_if(in_node, lock, lock_ops)) { 1392 continue; 1393 } 1394 1395 // If we fall through to here then it was some kind of node we 1396 // don't understand or there wasn't a matching unlock, so give 1397 // up trying to merge locks. 1398 lock_ops.trunc_to(0); 1399 return false; 1400 } 1401 } 1402 return true; 1403 1404 } 1405 1406 #ifndef PRODUCT 1407 // 1408 // Create a counter which counts the number of times this lock is acquired 1409 // 1410 void AbstractLockNode::create_lock_counter(JVMState* state) { 1411 _counter = OptoRuntime::new_named_counter(state, NamedCounter::LockCounter); 1412 } 1413 #endif 1414 1415 void AbstractLockNode::set_eliminated() { 1416 _eliminate = true; 1417 #ifndef PRODUCT 1418 if (_counter) { 1419 // Update the counter to indicate that this lock was eliminated. 1420 // The counter update code will stay around even though the 1421 // optimizer will eliminate the lock operation itself. 1422 _counter->set_tag(NamedCounter::EliminatedLockCounter); 1423 } 1424 #endif 1425 } 1426 1427 //============================================================================= 1428 Node *LockNode::Ideal(PhaseGVN *phase, bool can_reshape) { 1429 1430 // perform any generic optimizations first (returns 'this' or NULL) 1431 Node *result = SafePointNode::Ideal(phase, can_reshape); 1432 1433 // Now see if we can optimize away this lock. We don't actually 1434 // remove the locking here, we simply set the _eliminate flag which 1435 // prevents macro expansion from expanding the lock. Since we don't 1436 // modify the graph, the value returned from this function is the 1437 // one computed above. 1438 if (result == NULL && can_reshape && EliminateLocks && !is_eliminated()) { 1439 // 1440 // If we are locking an unescaped object, the lock/unlock is unnecessary 1441 // 1442 ConnectionGraph *cgr = phase->C->congraph(); 1443 PointsToNode::EscapeState es = PointsToNode::GlobalEscape; 1444 if (cgr != NULL) 1445 es = cgr->escape_state(obj_node(), phase); 1446 if (es != PointsToNode::UnknownEscape && es != PointsToNode::GlobalEscape) { 1447 // Mark it eliminated to update any counters 1448 this->set_eliminated(); 1449 return result; 1450 } 1451 1452 // 1453 // Try lock coarsening 1454 // 1455 PhaseIterGVN* iter = phase->is_IterGVN(); 1456 if (iter != NULL) { 1457 1458 GrowableArray<AbstractLockNode*> lock_ops; 1459 1460 Node *ctrl = next_control(in(0)); 1461 1462 // now search back for a matching Unlock 1463 if (find_matching_unlock(ctrl, this, lock_ops)) { 1464 // found an unlock directly preceding this lock. This is the 1465 // case of single unlock directly control dependent on a 1466 // single lock which is the trivial version of case 1 or 2. 1467 } else if (ctrl->is_Region() ) { 1468 if (find_unlocks_for_region(ctrl->as_Region(), this, lock_ops)) { 1469 // found lock preceded by multiple unlocks along all paths 1470 // joining at this point which is case 3 in description above. 1471 } 1472 } else { 1473 // see if this lock comes from either half of an if and the 1474 // predecessors merges unlocks and the other half of the if 1475 // performs a lock. 1476 if (find_lock_and_unlock_through_if(ctrl, this, lock_ops)) { 1477 // found unlock splitting to an if with locks on both branches. 1478 } 1479 } 1480 1481 if (lock_ops.length() > 0) { 1482 // add ourselves to the list of locks to be eliminated. 1483 lock_ops.append(this); 1484 1485 #ifndef PRODUCT 1486 if (PrintEliminateLocks) { 1487 int locks = 0; 1488 int unlocks = 0; 1489 for (int i = 0; i < lock_ops.length(); i++) { 1490 AbstractLockNode* lock = lock_ops.at(i); 1491 if (lock->Opcode() == Op_Lock) 1492 locks++; 1493 else 1494 unlocks++; 1495 if (Verbose) { 1496 lock->dump(1); 1497 } 1498 } 1499 tty->print_cr("***Eliminated %d unlocks and %d locks", unlocks, locks); 1500 } 1501 #endif 1502 1503 // for each of the identified locks, mark them 1504 // as eliminatable 1505 for (int i = 0; i < lock_ops.length(); i++) { 1506 AbstractLockNode* lock = lock_ops.at(i); 1507 1508 // Mark it eliminated to update any counters 1509 lock->set_eliminated(); 1510 lock->set_coarsened(); 1511 } 1512 } else if (result != NULL && ctrl->is_Region() && 1513 iter->_worklist.member(ctrl)) { 1514 // We weren't able to find any opportunities but the region this 1515 // lock is control dependent on hasn't been processed yet so put 1516 // this lock back on the worklist so we can check again once any 1517 // region simplification has occurred. 1518 iter->_worklist.push(this); 1519 } 1520 } 1521 } 1522 1523 return result; 1524 } 1525 1526 //============================================================================= 1527 uint UnlockNode::size_of() const { return sizeof(*this); } 1528 1529 //============================================================================= 1530 Node *UnlockNode::Ideal(PhaseGVN *phase, bool can_reshape) { 1531 1532 // perform any generic optimizations first (returns 'this' or NULL) 1533 Node * result = SafePointNode::Ideal(phase, can_reshape); 1534 1535 // Now see if we can optimize away this unlock. We don't actually 1536 // remove the unlocking here, we simply set the _eliminate flag which 1537 // prevents macro expansion from expanding the unlock. Since we don't 1538 // modify the graph, the value returned from this function is the 1539 // one computed above. 1540 // Escape state is defined after Parse phase. 1541 if (result == NULL && can_reshape && EliminateLocks && !is_eliminated()) { 1542 // 1543 // If we are unlocking an unescaped object, the lock/unlock is unnecessary. 1544 // 1545 ConnectionGraph *cgr = phase->C->congraph(); 1546 PointsToNode::EscapeState es = PointsToNode::GlobalEscape; 1547 if (cgr != NULL) 1548 es = cgr->escape_state(obj_node(), phase); 1549 if (es != PointsToNode::UnknownEscape && es != PointsToNode::GlobalEscape) { 1550 // Mark it eliminated to update any counters 1551 this->set_eliminated(); 1552 } 1553 } 1554 return result; 1555 }