1 /* 2 * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "ci/bcEscapeAnalyzer.hpp" 27 #include "compiler/oopMap.hpp" 28 #include "opto/callnode.hpp" 29 #include "opto/escape.hpp" 30 #include "opto/locknode.hpp" 31 #include "opto/machnode.hpp" 32 #include "opto/matcher.hpp" 33 #include "opto/parse.hpp" 34 #include "opto/regalloc.hpp" 35 #include "opto/regmask.hpp" 36 #include "opto/rootnode.hpp" 37 #include "opto/runtime.hpp" 38 39 // Portions of code courtesy of Clifford Click 40 41 // Optimization - Graph Style 42 43 //============================================================================= 44 uint StartNode::size_of() const { return sizeof(*this); } 45 uint StartNode::cmp( const Node &n ) const 46 { return _domain == ((StartNode&)n)._domain; } 47 const Type *StartNode::bottom_type() const { return _domain; } 48 const Type *StartNode::Value(PhaseTransform *phase) const { return _domain; } 49 #ifndef PRODUCT 50 void StartNode::dump_spec(outputStream *st) const { st->print(" #"); _domain->dump_on(st);} 51 #endif 52 53 //------------------------------Ideal------------------------------------------ 54 Node *StartNode::Ideal(PhaseGVN *phase, bool can_reshape){ 55 return remove_dead_region(phase, can_reshape) ? this : NULL; 56 } 57 58 //------------------------------calling_convention----------------------------- 59 void StartNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const { 60 Matcher::calling_convention( sig_bt, parm_regs, argcnt, false ); 61 } 62 63 //------------------------------Registers-------------------------------------- 64 const RegMask &StartNode::in_RegMask(uint) const { 65 return RegMask::Empty; 66 } 67 68 //------------------------------match------------------------------------------ 69 // Construct projections for incoming parameters, and their RegMask info 70 Node *StartNode::match( const ProjNode *proj, const Matcher *match ) { 71 switch (proj->_con) { 72 case TypeFunc::Control: 73 case TypeFunc::I_O: 74 case TypeFunc::Memory: 75 return new (match->C, 1) MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj); 76 case TypeFunc::FramePtr: 77 return new (match->C, 1) MachProjNode(this,proj->_con,Matcher::c_frame_ptr_mask, Op_RegP); 78 case TypeFunc::ReturnAdr: 79 return new (match->C, 1) MachProjNode(this,proj->_con,match->_return_addr_mask,Op_RegP); 80 case TypeFunc::Parms: 81 default: { 82 uint parm_num = proj->_con - TypeFunc::Parms; 83 const Type *t = _domain->field_at(proj->_con); 84 if (t->base() == Type::Half) // 2nd half of Longs and Doubles 85 return new (match->C, 1) ConNode(Type::TOP); 86 uint ideal_reg = Matcher::base2reg[t->base()]; 87 RegMask &rm = match->_calling_convention_mask[parm_num]; 88 return new (match->C, 1) MachProjNode(this,proj->_con,rm,ideal_reg); 89 } 90 } 91 return NULL; 92 } 93 94 //------------------------------StartOSRNode---------------------------------- 95 // The method start node for an on stack replacement adapter 96 97 //------------------------------osr_domain----------------------------- 98 const TypeTuple *StartOSRNode::osr_domain() { 99 const Type **fields = TypeTuple::fields(2); 100 fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // address of osr buffer 101 102 return TypeTuple::make(TypeFunc::Parms+1, fields); 103 } 104 105 //============================================================================= 106 const char * const ParmNode::names[TypeFunc::Parms+1] = { 107 "Control", "I_O", "Memory", "FramePtr", "ReturnAdr", "Parms" 108 }; 109 110 #ifndef PRODUCT 111 void ParmNode::dump_spec(outputStream *st) const { 112 if( _con < TypeFunc::Parms ) { 113 st->print(names[_con]); 114 } else { 115 st->print("Parm%d: ",_con-TypeFunc::Parms); 116 // Verbose and WizardMode dump bottom_type for all nodes 117 if( !Verbose && !WizardMode ) bottom_type()->dump_on(st); 118 } 119 } 120 #endif 121 122 uint ParmNode::ideal_reg() const { 123 switch( _con ) { 124 case TypeFunc::Control : // fall through 125 case TypeFunc::I_O : // fall through 126 case TypeFunc::Memory : return 0; 127 case TypeFunc::FramePtr : // fall through 128 case TypeFunc::ReturnAdr: return Op_RegP; 129 default : assert( _con > TypeFunc::Parms, "" ); 130 // fall through 131 case TypeFunc::Parms : { 132 // Type of argument being passed 133 const Type *t = in(0)->as_Start()->_domain->field_at(_con); 134 return Matcher::base2reg[t->base()]; 135 } 136 } 137 ShouldNotReachHere(); 138 return 0; 139 } 140 141 //============================================================================= 142 ReturnNode::ReturnNode(uint edges, Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *retadr ) : Node(edges) { 143 init_req(TypeFunc::Control,cntrl); 144 init_req(TypeFunc::I_O,i_o); 145 init_req(TypeFunc::Memory,memory); 146 init_req(TypeFunc::FramePtr,frameptr); 147 init_req(TypeFunc::ReturnAdr,retadr); 148 } 149 150 Node *ReturnNode::Ideal(PhaseGVN *phase, bool can_reshape){ 151 return remove_dead_region(phase, can_reshape) ? this : NULL; 152 } 153 154 const Type *ReturnNode::Value( PhaseTransform *phase ) const { 155 return ( phase->type(in(TypeFunc::Control)) == Type::TOP) 156 ? Type::TOP 157 : Type::BOTTOM; 158 } 159 160 // Do we Match on this edge index or not? No edges on return nodes 161 uint ReturnNode::match_edge(uint idx) const { 162 return 0; 163 } 164 165 166 #ifndef PRODUCT 167 void ReturnNode::dump_req() const { 168 // Dump the required inputs, enclosed in '(' and ')' 169 uint i; // Exit value of loop 170 for( i=0; i<req(); i++ ) { // For all required inputs 171 if( i == TypeFunc::Parms ) tty->print("returns"); 172 if( in(i) ) tty->print("%c%d ", Compile::current()->node_arena()->contains(in(i)) ? ' ' : 'o', in(i)->_idx); 173 else tty->print("_ "); 174 } 175 } 176 #endif 177 178 //============================================================================= 179 RethrowNode::RethrowNode( 180 Node* cntrl, 181 Node* i_o, 182 Node* memory, 183 Node* frameptr, 184 Node* ret_adr, 185 Node* exception 186 ) : Node(TypeFunc::Parms + 1) { 187 init_req(TypeFunc::Control , cntrl ); 188 init_req(TypeFunc::I_O , i_o ); 189 init_req(TypeFunc::Memory , memory ); 190 init_req(TypeFunc::FramePtr , frameptr ); 191 init_req(TypeFunc::ReturnAdr, ret_adr); 192 init_req(TypeFunc::Parms , exception); 193 } 194 195 Node *RethrowNode::Ideal(PhaseGVN *phase, bool can_reshape){ 196 return remove_dead_region(phase, can_reshape) ? this : NULL; 197 } 198 199 const Type *RethrowNode::Value( PhaseTransform *phase ) const { 200 return (phase->type(in(TypeFunc::Control)) == Type::TOP) 201 ? Type::TOP 202 : Type::BOTTOM; 203 } 204 205 uint RethrowNode::match_edge(uint idx) const { 206 return 0; 207 } 208 209 #ifndef PRODUCT 210 void RethrowNode::dump_req() const { 211 // Dump the required inputs, enclosed in '(' and ')' 212 uint i; // Exit value of loop 213 for( i=0; i<req(); i++ ) { // For all required inputs 214 if( i == TypeFunc::Parms ) tty->print("exception"); 215 if( in(i) ) tty->print("%c%d ", Compile::current()->node_arena()->contains(in(i)) ? ' ' : 'o', in(i)->_idx); 216 else tty->print("_ "); 217 } 218 } 219 #endif 220 221 //============================================================================= 222 // Do we Match on this edge index or not? Match only target address & method 223 uint TailCallNode::match_edge(uint idx) const { 224 return TypeFunc::Parms <= idx && idx <= TypeFunc::Parms+1; 225 } 226 227 //============================================================================= 228 // Do we Match on this edge index or not? Match only target address & oop 229 uint TailJumpNode::match_edge(uint idx) const { 230 return TypeFunc::Parms <= idx && idx <= TypeFunc::Parms+1; 231 } 232 233 //============================================================================= 234 JVMState::JVMState(ciMethod* method, JVMState* caller) { 235 assert(method != NULL, "must be valid call site"); 236 _method = method; 237 _reexecute = Reexecute_Undefined; 238 debug_only(_bci = -99); // random garbage value 239 debug_only(_map = (SafePointNode*)-1); 240 _caller = caller; 241 _depth = 1 + (caller == NULL ? 0 : caller->depth()); 242 _locoff = TypeFunc::Parms; 243 _stkoff = _locoff + _method->max_locals(); 244 _monoff = _stkoff + _method->max_stack(); 245 _scloff = _monoff; 246 _endoff = _monoff; 247 _sp = 0; 248 } 249 JVMState::JVMState(int stack_size) { 250 _method = NULL; 251 _bci = InvocationEntryBci; 252 _reexecute = Reexecute_Undefined; 253 debug_only(_map = (SafePointNode*)-1); 254 _caller = NULL; 255 _depth = 1; 256 _locoff = TypeFunc::Parms; 257 _stkoff = _locoff; 258 _monoff = _stkoff + stack_size; 259 _scloff = _monoff; 260 _endoff = _monoff; 261 _sp = 0; 262 } 263 264 //--------------------------------of_depth------------------------------------- 265 JVMState* JVMState::of_depth(int d) const { 266 const JVMState* jvmp = this; 267 assert(0 < d && (uint)d <= depth(), "oob"); 268 for (int skip = depth() - d; skip > 0; skip--) { 269 jvmp = jvmp->caller(); 270 } 271 assert(jvmp->depth() == (uint)d, "found the right one"); 272 return (JVMState*)jvmp; 273 } 274 275 //-----------------------------same_calls_as----------------------------------- 276 bool JVMState::same_calls_as(const JVMState* that) const { 277 if (this == that) return true; 278 if (this->depth() != that->depth()) return false; 279 const JVMState* p = this; 280 const JVMState* q = that; 281 for (;;) { 282 if (p->_method != q->_method) return false; 283 if (p->_method == NULL) return true; // bci is irrelevant 284 if (p->_bci != q->_bci) return false; 285 if (p->_reexecute != q->_reexecute) return false; 286 p = p->caller(); 287 q = q->caller(); 288 if (p == q) return true; 289 assert(p != NULL && q != NULL, "depth check ensures we don't run off end"); 290 } 291 } 292 293 //------------------------------debug_start------------------------------------ 294 uint JVMState::debug_start() const { 295 debug_only(JVMState* jvmroot = of_depth(1)); 296 assert(jvmroot->locoff() <= this->locoff(), "youngest JVMState must be last"); 297 return of_depth(1)->locoff(); 298 } 299 300 //-------------------------------debug_end------------------------------------- 301 uint JVMState::debug_end() const { 302 debug_only(JVMState* jvmroot = of_depth(1)); 303 assert(jvmroot->endoff() <= this->endoff(), "youngest JVMState must be last"); 304 return endoff(); 305 } 306 307 //------------------------------debug_depth------------------------------------ 308 uint JVMState::debug_depth() const { 309 uint total = 0; 310 for (const JVMState* jvmp = this; jvmp != NULL; jvmp = jvmp->caller()) { 311 total += jvmp->debug_size(); 312 } 313 return total; 314 } 315 316 #ifndef PRODUCT 317 318 //------------------------------format_helper---------------------------------- 319 // Given an allocation (a Chaitin object) and a Node decide if the Node carries 320 // any defined value or not. If it does, print out the register or constant. 321 static void format_helper( PhaseRegAlloc *regalloc, outputStream* st, Node *n, const char *msg, uint i, GrowableArray<SafePointScalarObjectNode*> *scobjs ) { 322 if (n == NULL) { st->print(" NULL"); return; } 323 if (n->is_SafePointScalarObject()) { 324 // Scalar replacement. 325 SafePointScalarObjectNode* spobj = n->as_SafePointScalarObject(); 326 scobjs->append_if_missing(spobj); 327 int sco_n = scobjs->find(spobj); 328 assert(sco_n >= 0, ""); 329 st->print(" %s%d]=#ScObj" INT32_FORMAT, msg, i, sco_n); 330 return; 331 } 332 if( OptoReg::is_valid(regalloc->get_reg_first(n))) { // Check for undefined 333 char buf[50]; 334 regalloc->dump_register(n,buf); 335 st->print(" %s%d]=%s",msg,i,buf); 336 } else { // No register, but might be constant 337 const Type *t = n->bottom_type(); 338 switch (t->base()) { 339 case Type::Int: 340 st->print(" %s%d]=#"INT32_FORMAT,msg,i,t->is_int()->get_con()); 341 break; 342 case Type::AnyPtr: 343 assert( t == TypePtr::NULL_PTR, "" ); 344 st->print(" %s%d]=#NULL",msg,i); 345 break; 346 case Type::AryPtr: 347 case Type::KlassPtr: 348 case Type::InstPtr: 349 st->print(" %s%d]=#Ptr" INTPTR_FORMAT,msg,i,t->isa_oopptr()->const_oop()); 350 break; 351 case Type::NarrowOop: 352 st->print(" %s%d]=#Ptr" INTPTR_FORMAT,msg,i,t->make_ptr()->isa_oopptr()->const_oop()); 353 break; 354 case Type::RawPtr: 355 st->print(" %s%d]=#Raw" INTPTR_FORMAT,msg,i,t->is_rawptr()); 356 break; 357 case Type::DoubleCon: 358 st->print(" %s%d]=#%fD",msg,i,t->is_double_constant()->_d); 359 break; 360 case Type::FloatCon: 361 st->print(" %s%d]=#%fF",msg,i,t->is_float_constant()->_f); 362 break; 363 case Type::Long: 364 st->print(" %s%d]=#"INT64_FORMAT,msg,i,t->is_long()->get_con()); 365 break; 366 case Type::Half: 367 case Type::Top: 368 st->print(" %s%d]=_",msg,i); 369 break; 370 default: ShouldNotReachHere(); 371 } 372 } 373 } 374 375 //------------------------------format----------------------------------------- 376 void JVMState::format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st) const { 377 st->print(" #"); 378 if( _method ) { 379 _method->print_short_name(st); 380 st->print(" @ bci:%d ",_bci); 381 } else { 382 st->print_cr(" runtime stub "); 383 return; 384 } 385 if (n->is_MachSafePoint()) { 386 GrowableArray<SafePointScalarObjectNode*> scobjs; 387 MachSafePointNode *mcall = n->as_MachSafePoint(); 388 uint i; 389 // Print locals 390 for( i = 0; i < (uint)loc_size(); i++ ) 391 format_helper( regalloc, st, mcall->local(this, i), "L[", i, &scobjs ); 392 // Print stack 393 for (i = 0; i < (uint)stk_size(); i++) { 394 if ((uint)(_stkoff + i) >= mcall->len()) 395 st->print(" oob "); 396 else 397 format_helper( regalloc, st, mcall->stack(this, i), "STK[", i, &scobjs ); 398 } 399 for (i = 0; (int)i < nof_monitors(); i++) { 400 Node *box = mcall->monitor_box(this, i); 401 Node *obj = mcall->monitor_obj(this, i); 402 if ( OptoReg::is_valid(regalloc->get_reg_first(box)) ) { 403 box = BoxLockNode::box_node(box); 404 format_helper( regalloc, st, box, "MON-BOX[", i, &scobjs ); 405 } else { 406 OptoReg::Name box_reg = BoxLockNode::reg(box); 407 st->print(" MON-BOX%d=%s+%d", 408 i, 409 OptoReg::regname(OptoReg::c_frame_pointer), 410 regalloc->reg2offset(box_reg)); 411 } 412 const char* obj_msg = "MON-OBJ["; 413 if (EliminateLocks) { 414 if (BoxLockNode::box_node(box)->is_eliminated()) 415 obj_msg = "MON-OBJ(LOCK ELIMINATED)["; 416 } 417 format_helper( regalloc, st, obj, obj_msg, i, &scobjs ); 418 } 419 420 for (i = 0; i < (uint)scobjs.length(); i++) { 421 // Scalar replaced objects. 422 st->print_cr(""); 423 st->print(" # ScObj" INT32_FORMAT " ", i); 424 SafePointScalarObjectNode* spobj = scobjs.at(i); 425 ciKlass* cik = spobj->bottom_type()->is_oopptr()->klass(); 426 assert(cik->is_instance_klass() || 427 cik->is_array_klass(), "Not supported allocation."); 428 ciInstanceKlass *iklass = NULL; 429 if (cik->is_instance_klass()) { 430 cik->print_name_on(st); 431 iklass = cik->as_instance_klass(); 432 } else if (cik->is_type_array_klass()) { 433 cik->as_array_klass()->base_element_type()->print_name_on(st); 434 st->print("[%d]", spobj->n_fields()); 435 } else if (cik->is_obj_array_klass()) { 436 ciKlass* cie = cik->as_obj_array_klass()->base_element_klass(); 437 if (cie->is_instance_klass()) { 438 cie->print_name_on(st); 439 } else if (cie->is_type_array_klass()) { 440 cie->as_array_klass()->base_element_type()->print_name_on(st); 441 } else { 442 ShouldNotReachHere(); 443 } 444 st->print("[%d]", spobj->n_fields()); 445 int ndim = cik->as_array_klass()->dimension() - 1; 446 while (ndim-- > 0) { 447 st->print("[]"); 448 } 449 } 450 st->print("={"); 451 uint nf = spobj->n_fields(); 452 if (nf > 0) { 453 uint first_ind = spobj->first_index(); 454 Node* fld_node = mcall->in(first_ind); 455 ciField* cifield; 456 if (iklass != NULL) { 457 st->print(" ["); 458 cifield = iklass->nonstatic_field_at(0); 459 cifield->print_name_on(st); 460 format_helper( regalloc, st, fld_node, ":", 0, &scobjs ); 461 } else { 462 format_helper( regalloc, st, fld_node, "[", 0, &scobjs ); 463 } 464 for (uint j = 1; j < nf; j++) { 465 fld_node = mcall->in(first_ind+j); 466 if (iklass != NULL) { 467 st->print(", ["); 468 cifield = iklass->nonstatic_field_at(j); 469 cifield->print_name_on(st); 470 format_helper( regalloc, st, fld_node, ":", j, &scobjs ); 471 } else { 472 format_helper( regalloc, st, fld_node, ", [", j, &scobjs ); 473 } 474 } 475 } 476 st->print(" }"); 477 } 478 } 479 st->print_cr(""); 480 if (caller() != NULL) caller()->format(regalloc, n, st); 481 } 482 483 484 void JVMState::dump_spec(outputStream *st) const { 485 if (_method != NULL) { 486 bool printed = false; 487 if (!Verbose) { 488 // The JVMS dumps make really, really long lines. 489 // Take out the most boring parts, which are the package prefixes. 490 char buf[500]; 491 stringStream namest(buf, sizeof(buf)); 492 _method->print_short_name(&namest); 493 if (namest.count() < sizeof(buf)) { 494 const char* name = namest.base(); 495 if (name[0] == ' ') ++name; 496 const char* endcn = strchr(name, ':'); // end of class name 497 if (endcn == NULL) endcn = strchr(name, '('); 498 if (endcn == NULL) endcn = name + strlen(name); 499 while (endcn > name && endcn[-1] != '.' && endcn[-1] != '/') 500 --endcn; 501 st->print(" %s", endcn); 502 printed = true; 503 } 504 } 505 if (!printed) 506 _method->print_short_name(st); 507 st->print(" @ bci:%d",_bci); 508 if(_reexecute == Reexecute_True) 509 st->print(" reexecute"); 510 } else { 511 st->print(" runtime stub"); 512 } 513 if (caller() != NULL) caller()->dump_spec(st); 514 } 515 516 517 void JVMState::dump_on(outputStream* st) const { 518 if (_map && !((uintptr_t)_map & 1)) { 519 if (_map->len() > _map->req()) { // _map->has_exceptions() 520 Node* ex = _map->in(_map->req()); // _map->next_exception() 521 // skip the first one; it's already being printed 522 while (ex != NULL && ex->len() > ex->req()) { 523 ex = ex->in(ex->req()); // ex->next_exception() 524 ex->dump(1); 525 } 526 } 527 _map->dump(2); 528 } 529 st->print("JVMS depth=%d loc=%d stk=%d mon=%d scalar=%d end=%d mondepth=%d sp=%d bci=%d reexecute=%s method=", 530 depth(), locoff(), stkoff(), monoff(), scloff(), endoff(), monitor_depth(), sp(), bci(), should_reexecute()?"true":"false"); 531 if (_method == NULL) { 532 st->print_cr("(none)"); 533 } else { 534 _method->print_name(st); 535 st->cr(); 536 if (bci() >= 0 && bci() < _method->code_size()) { 537 st->print(" bc: "); 538 _method->print_codes_on(bci(), bci()+1, st); 539 } 540 } 541 if (caller() != NULL) { 542 caller()->dump_on(st); 543 } 544 } 545 546 // Extra way to dump a jvms from the debugger, 547 // to avoid a bug with C++ member function calls. 548 void dump_jvms(JVMState* jvms) { 549 jvms->dump(); 550 } 551 #endif 552 553 //--------------------------clone_shallow-------------------------------------- 554 JVMState* JVMState::clone_shallow(Compile* C) const { 555 JVMState* n = has_method() ? new (C) JVMState(_method, _caller) : new (C) JVMState(0); 556 n->set_bci(_bci); 557 n->_reexecute = _reexecute; 558 n->set_locoff(_locoff); 559 n->set_stkoff(_stkoff); 560 n->set_monoff(_monoff); 561 n->set_scloff(_scloff); 562 n->set_endoff(_endoff); 563 n->set_sp(_sp); 564 n->set_map(_map); 565 return n; 566 } 567 568 //---------------------------clone_deep---------------------------------------- 569 JVMState* JVMState::clone_deep(Compile* C) const { 570 JVMState* n = clone_shallow(C); 571 for (JVMState* p = n; p->_caller != NULL; p = p->_caller) { 572 p->_caller = p->_caller->clone_shallow(C); 573 } 574 assert(n->depth() == depth(), "sanity"); 575 assert(n->debug_depth() == debug_depth(), "sanity"); 576 return n; 577 } 578 579 //============================================================================= 580 uint CallNode::cmp( const Node &n ) const 581 { return _tf == ((CallNode&)n)._tf && _jvms == ((CallNode&)n)._jvms; } 582 #ifndef PRODUCT 583 void CallNode::dump_req() const { 584 // Dump the required inputs, enclosed in '(' and ')' 585 uint i; // Exit value of loop 586 for( i=0; i<req(); i++ ) { // For all required inputs 587 if( i == TypeFunc::Parms ) tty->print("("); 588 if( in(i) ) tty->print("%c%d ", Compile::current()->node_arena()->contains(in(i)) ? ' ' : 'o', in(i)->_idx); 589 else tty->print("_ "); 590 } 591 tty->print(")"); 592 } 593 594 void CallNode::dump_spec(outputStream *st) const { 595 st->print(" "); 596 tf()->dump_on(st); 597 if (_cnt != COUNT_UNKNOWN) st->print(" C=%f",_cnt); 598 if (jvms() != NULL) jvms()->dump_spec(st); 599 } 600 #endif 601 602 const Type *CallNode::bottom_type() const { return tf()->range(); } 603 const Type *CallNode::Value(PhaseTransform *phase) const { 604 if (phase->type(in(0)) == Type::TOP) return Type::TOP; 605 return tf()->range(); 606 } 607 608 //------------------------------calling_convention----------------------------- 609 void CallNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const { 610 // Use the standard compiler calling convention 611 Matcher::calling_convention( sig_bt, parm_regs, argcnt, true ); 612 } 613 614 615 //------------------------------match------------------------------------------ 616 // Construct projections for control, I/O, memory-fields, ..., and 617 // return result(s) along with their RegMask info 618 Node *CallNode::match( const ProjNode *proj, const Matcher *match ) { 619 switch (proj->_con) { 620 case TypeFunc::Control: 621 case TypeFunc::I_O: 622 case TypeFunc::Memory: 623 return new (match->C, 1) MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj); 624 625 case TypeFunc::Parms+1: // For LONG & DOUBLE returns 626 assert(tf()->_range->field_at(TypeFunc::Parms+1) == Type::HALF, ""); 627 // 2nd half of doubles and longs 628 return new (match->C, 1) MachProjNode(this,proj->_con, RegMask::Empty, (uint)OptoReg::Bad); 629 630 case TypeFunc::Parms: { // Normal returns 631 uint ideal_reg = Matcher::base2reg[tf()->range()->field_at(TypeFunc::Parms)->base()]; 632 OptoRegPair regs = is_CallRuntime() 633 ? match->c_return_value(ideal_reg,true) // Calls into C runtime 634 : match-> return_value(ideal_reg,true); // Calls into compiled Java code 635 RegMask rm = RegMask(regs.first()); 636 if( OptoReg::is_valid(regs.second()) ) 637 rm.Insert( regs.second() ); 638 return new (match->C, 1) MachProjNode(this,proj->_con,rm,ideal_reg); 639 } 640 641 case TypeFunc::ReturnAdr: 642 case TypeFunc::FramePtr: 643 default: 644 ShouldNotReachHere(); 645 } 646 return NULL; 647 } 648 649 // Do we Match on this edge index or not? Match no edges 650 uint CallNode::match_edge(uint idx) const { 651 return 0; 652 } 653 654 // 655 // Determine whether the call could modify the field of the specified 656 // instance at the specified offset. 657 // 658 bool CallNode::may_modify(const TypePtr *addr_t, PhaseTransform *phase) { 659 const TypeOopPtr *adrInst_t = addr_t->isa_oopptr(); 660 661 // If not an OopPtr or not an instance type, assume the worst. 662 // Note: currently this method is called only for instance types. 663 if (adrInst_t == NULL || !adrInst_t->is_known_instance()) { 664 return true; 665 } 666 // The instance_id is set only for scalar-replaceable allocations which 667 // are not passed as arguments according to Escape Analysis. 668 return false; 669 } 670 671 // Does this call have a direct reference to n other than debug information? 672 bool CallNode::has_non_debug_use(Node *n) { 673 const TypeTuple * d = tf()->domain(); 674 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 675 Node *arg = in(i); 676 if (arg == n) { 677 return true; 678 } 679 } 680 return false; 681 } 682 683 // Returns the unique CheckCastPP of a call 684 // or 'this' if there are several CheckCastPP 685 // or returns NULL if there is no one. 686 Node *CallNode::result_cast() { 687 Node *cast = NULL; 688 689 Node *p = proj_out(TypeFunc::Parms); 690 if (p == NULL) 691 return NULL; 692 693 for (DUIterator_Fast imax, i = p->fast_outs(imax); i < imax; i++) { 694 Node *use = p->fast_out(i); 695 if (use->is_CheckCastPP()) { 696 if (cast != NULL) { 697 return this; // more than 1 CheckCastPP 698 } 699 cast = use; 700 } 701 } 702 return cast; 703 } 704 705 706 void CallNode::extract_projections(CallProjections* projs, bool separate_io_proj) { 707 projs->fallthrough_proj = NULL; 708 projs->fallthrough_catchproj = NULL; 709 projs->fallthrough_ioproj = NULL; 710 projs->catchall_ioproj = NULL; 711 projs->catchall_catchproj = NULL; 712 projs->fallthrough_memproj = NULL; 713 projs->catchall_memproj = NULL; 714 projs->resproj = NULL; 715 projs->exobj = NULL; 716 717 for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) { 718 ProjNode *pn = fast_out(i)->as_Proj(); 719 if (pn->outcnt() == 0) continue; 720 switch (pn->_con) { 721 case TypeFunc::Control: 722 { 723 // For Control (fallthrough) and I_O (catch_all_index) we have CatchProj -> Catch -> Proj 724 projs->fallthrough_proj = pn; 725 DUIterator_Fast jmax, j = pn->fast_outs(jmax); 726 const Node *cn = pn->fast_out(j); 727 if (cn->is_Catch()) { 728 ProjNode *cpn = NULL; 729 for (DUIterator_Fast kmax, k = cn->fast_outs(kmax); k < kmax; k++) { 730 cpn = cn->fast_out(k)->as_Proj(); 731 assert(cpn->is_CatchProj(), "must be a CatchProjNode"); 732 if (cpn->_con == CatchProjNode::fall_through_index) 733 projs->fallthrough_catchproj = cpn; 734 else { 735 assert(cpn->_con == CatchProjNode::catch_all_index, "must be correct index."); 736 projs->catchall_catchproj = cpn; 737 } 738 } 739 } 740 break; 741 } 742 case TypeFunc::I_O: 743 if (pn->_is_io_use) 744 projs->catchall_ioproj = pn; 745 else 746 projs->fallthrough_ioproj = pn; 747 for (DUIterator j = pn->outs(); pn->has_out(j); j++) { 748 Node* e = pn->out(j); 749 if (e->Opcode() == Op_CreateEx && e->in(0)->is_CatchProj()) { 750 assert(projs->exobj == NULL, "only one"); 751 projs->exobj = e; 752 } 753 } 754 break; 755 case TypeFunc::Memory: 756 if (pn->_is_io_use) 757 projs->catchall_memproj = pn; 758 else 759 projs->fallthrough_memproj = pn; 760 break; 761 case TypeFunc::Parms: 762 projs->resproj = pn; 763 break; 764 default: 765 assert(false, "unexpected projection from allocation node."); 766 } 767 } 768 769 // The resproj may not exist because the result couuld be ignored 770 // and the exception object may not exist if an exception handler 771 // swallows the exception but all the other must exist and be found. 772 assert(projs->fallthrough_proj != NULL, "must be found"); 773 assert(projs->fallthrough_catchproj != NULL, "must be found"); 774 assert(projs->fallthrough_memproj != NULL, "must be found"); 775 assert(projs->fallthrough_ioproj != NULL, "must be found"); 776 assert(projs->catchall_catchproj != NULL, "must be found"); 777 if (separate_io_proj) { 778 assert(projs->catchall_memproj != NULL, "must be found"); 779 assert(projs->catchall_ioproj != NULL, "must be found"); 780 } 781 } 782 783 784 //============================================================================= 785 uint CallJavaNode::size_of() const { return sizeof(*this); } 786 uint CallJavaNode::cmp( const Node &n ) const { 787 CallJavaNode &call = (CallJavaNode&)n; 788 return CallNode::cmp(call) && _method == call._method; 789 } 790 #ifndef PRODUCT 791 void CallJavaNode::dump_spec(outputStream *st) const { 792 if( _method ) _method->print_short_name(st); 793 CallNode::dump_spec(st); 794 } 795 #endif 796 797 //============================================================================= 798 uint CallStaticJavaNode::size_of() const { return sizeof(*this); } 799 uint CallStaticJavaNode::cmp( const Node &n ) const { 800 CallStaticJavaNode &call = (CallStaticJavaNode&)n; 801 return CallJavaNode::cmp(call); 802 } 803 804 //----------------------------uncommon_trap_request---------------------------- 805 // If this is an uncommon trap, return the request code, else zero. 806 int CallStaticJavaNode::uncommon_trap_request() const { 807 if (_name != NULL && !strcmp(_name, "uncommon_trap")) { 808 return extract_uncommon_trap_request(this); 809 } 810 return 0; 811 } 812 int CallStaticJavaNode::extract_uncommon_trap_request(const Node* call) { 813 #ifndef PRODUCT 814 if (!(call->req() > TypeFunc::Parms && 815 call->in(TypeFunc::Parms) != NULL && 816 call->in(TypeFunc::Parms)->is_Con())) { 817 assert(_in_dump_cnt != 0, "OK if dumping"); 818 tty->print("[bad uncommon trap]"); 819 return 0; 820 } 821 #endif 822 return call->in(TypeFunc::Parms)->bottom_type()->is_int()->get_con(); 823 } 824 825 #ifndef PRODUCT 826 void CallStaticJavaNode::dump_spec(outputStream *st) const { 827 st->print("# Static "); 828 if (_name != NULL) { 829 st->print("%s", _name); 830 int trap_req = uncommon_trap_request(); 831 if (trap_req != 0) { 832 char buf[100]; 833 st->print("(%s)", 834 Deoptimization::format_trap_request(buf, sizeof(buf), 835 trap_req)); 836 } 837 st->print(" "); 838 } 839 CallJavaNode::dump_spec(st); 840 } 841 #endif 842 843 //============================================================================= 844 uint CallDynamicJavaNode::size_of() const { return sizeof(*this); } 845 uint CallDynamicJavaNode::cmp( const Node &n ) const { 846 CallDynamicJavaNode &call = (CallDynamicJavaNode&)n; 847 return CallJavaNode::cmp(call); 848 } 849 #ifndef PRODUCT 850 void CallDynamicJavaNode::dump_spec(outputStream *st) const { 851 st->print("# Dynamic "); 852 CallJavaNode::dump_spec(st); 853 } 854 #endif 855 856 //============================================================================= 857 uint CallRuntimeNode::size_of() const { return sizeof(*this); } 858 uint CallRuntimeNode::cmp( const Node &n ) const { 859 CallRuntimeNode &call = (CallRuntimeNode&)n; 860 return CallNode::cmp(call) && !strcmp(_name,call._name); 861 } 862 #ifndef PRODUCT 863 void CallRuntimeNode::dump_spec(outputStream *st) const { 864 st->print("# "); 865 st->print(_name); 866 CallNode::dump_spec(st); 867 } 868 #endif 869 870 //------------------------------calling_convention----------------------------- 871 void CallRuntimeNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const { 872 Matcher::c_calling_convention( sig_bt, parm_regs, argcnt ); 873 } 874 875 //============================================================================= 876 //------------------------------calling_convention----------------------------- 877 878 879 //============================================================================= 880 #ifndef PRODUCT 881 void CallLeafNode::dump_spec(outputStream *st) const { 882 st->print("# "); 883 st->print(_name); 884 CallNode::dump_spec(st); 885 } 886 #endif 887 888 //============================================================================= 889 890 void SafePointNode::set_local(JVMState* jvms, uint idx, Node *c) { 891 assert(verify_jvms(jvms), "jvms must match"); 892 int loc = jvms->locoff() + idx; 893 if (in(loc)->is_top() && idx > 0 && !c->is_top() ) { 894 // If current local idx is top then local idx - 1 could 895 // be a long/double that needs to be killed since top could 896 // represent the 2nd half ofthe long/double. 897 uint ideal = in(loc -1)->ideal_reg(); 898 if (ideal == Op_RegD || ideal == Op_RegL) { 899 // set other (low index) half to top 900 set_req(loc - 1, in(loc)); 901 } 902 } 903 set_req(loc, c); 904 } 905 906 uint SafePointNode::size_of() const { return sizeof(*this); } 907 uint SafePointNode::cmp( const Node &n ) const { 908 return (&n == this); // Always fail except on self 909 } 910 911 //-------------------------set_next_exception---------------------------------- 912 void SafePointNode::set_next_exception(SafePointNode* n) { 913 assert(n == NULL || n->Opcode() == Op_SafePoint, "correct value for next_exception"); 914 if (len() == req()) { 915 if (n != NULL) add_prec(n); 916 } else { 917 set_prec(req(), n); 918 } 919 } 920 921 922 //----------------------------next_exception----------------------------------- 923 SafePointNode* SafePointNode::next_exception() const { 924 if (len() == req()) { 925 return NULL; 926 } else { 927 Node* n = in(req()); 928 assert(n == NULL || n->Opcode() == Op_SafePoint, "no other uses of prec edges"); 929 return (SafePointNode*) n; 930 } 931 } 932 933 934 //------------------------------Ideal------------------------------------------ 935 // Skip over any collapsed Regions 936 Node *SafePointNode::Ideal(PhaseGVN *phase, bool can_reshape) { 937 return remove_dead_region(phase, can_reshape) ? this : NULL; 938 } 939 940 //------------------------------Identity--------------------------------------- 941 // Remove obviously duplicate safepoints 942 Node *SafePointNode::Identity( PhaseTransform *phase ) { 943 944 // If you have back to back safepoints, remove one 945 if( in(TypeFunc::Control)->is_SafePoint() ) 946 return in(TypeFunc::Control); 947 948 if( in(0)->is_Proj() ) { 949 Node *n0 = in(0)->in(0); 950 // Check if he is a call projection (except Leaf Call) 951 if( n0->is_Catch() ) { 952 n0 = n0->in(0)->in(0); 953 assert( n0->is_Call(), "expect a call here" ); 954 } 955 if( n0->is_Call() && n0->as_Call()->guaranteed_safepoint() ) { 956 // Useless Safepoint, so remove it 957 return in(TypeFunc::Control); 958 } 959 } 960 961 return this; 962 } 963 964 //------------------------------Value------------------------------------------ 965 const Type *SafePointNode::Value( PhaseTransform *phase ) const { 966 if( phase->type(in(0)) == Type::TOP ) return Type::TOP; 967 if( phase->eqv( in(0), this ) ) return Type::TOP; // Dead infinite loop 968 return Type::CONTROL; 969 } 970 971 #ifndef PRODUCT 972 void SafePointNode::dump_spec(outputStream *st) const { 973 st->print(" SafePoint "); 974 } 975 #endif 976 977 const RegMask &SafePointNode::in_RegMask(uint idx) const { 978 if( idx < TypeFunc::Parms ) return RegMask::Empty; 979 // Values outside the domain represent debug info 980 return *(Compile::current()->matcher()->idealreg2debugmask[in(idx)->ideal_reg()]); 981 } 982 const RegMask &SafePointNode::out_RegMask() const { 983 return RegMask::Empty; 984 } 985 986 987 void SafePointNode::grow_stack(JVMState* jvms, uint grow_by) { 988 assert((int)grow_by > 0, "sanity"); 989 int monoff = jvms->monoff(); 990 int scloff = jvms->scloff(); 991 int endoff = jvms->endoff(); 992 assert(endoff == (int)req(), "no other states or debug info after me"); 993 Node* top = Compile::current()->top(); 994 for (uint i = 0; i < grow_by; i++) { 995 ins_req(monoff, top); 996 } 997 jvms->set_monoff(monoff + grow_by); 998 jvms->set_scloff(scloff + grow_by); 999 jvms->set_endoff(endoff + grow_by); 1000 } 1001 1002 void SafePointNode::push_monitor(const FastLockNode *lock) { 1003 // Add a LockNode, which points to both the original BoxLockNode (the 1004 // stack space for the monitor) and the Object being locked. 1005 const int MonitorEdges = 2; 1006 assert(JVMState::logMonitorEdges == exact_log2(MonitorEdges), "correct MonitorEdges"); 1007 assert(req() == jvms()->endoff(), "correct sizing"); 1008 int nextmon = jvms()->scloff(); 1009 if (GenerateSynchronizationCode) { 1010 add_req(lock->box_node()); 1011 add_req(lock->obj_node()); 1012 } else { 1013 Node* top = Compile::current()->top(); 1014 add_req(top); 1015 add_req(top); 1016 } 1017 jvms()->set_scloff(nextmon+MonitorEdges); 1018 jvms()->set_endoff(req()); 1019 } 1020 1021 void SafePointNode::pop_monitor() { 1022 // Delete last monitor from debug info 1023 debug_only(int num_before_pop = jvms()->nof_monitors()); 1024 const int MonitorEdges = (1<<JVMState::logMonitorEdges); 1025 int scloff = jvms()->scloff(); 1026 int endoff = jvms()->endoff(); 1027 int new_scloff = scloff - MonitorEdges; 1028 int new_endoff = endoff - MonitorEdges; 1029 jvms()->set_scloff(new_scloff); 1030 jvms()->set_endoff(new_endoff); 1031 while (scloff > new_scloff) del_req(--scloff); 1032 assert(jvms()->nof_monitors() == num_before_pop-1, ""); 1033 } 1034 1035 Node *SafePointNode::peek_monitor_box() const { 1036 int mon = jvms()->nof_monitors() - 1; 1037 assert(mon >= 0, "most have a monitor"); 1038 return monitor_box(jvms(), mon); 1039 } 1040 1041 Node *SafePointNode::peek_monitor_obj() const { 1042 int mon = jvms()->nof_monitors() - 1; 1043 assert(mon >= 0, "most have a monitor"); 1044 return monitor_obj(jvms(), mon); 1045 } 1046 1047 // Do we Match on this edge index or not? Match no edges 1048 uint SafePointNode::match_edge(uint idx) const { 1049 if( !needs_polling_address_input() ) 1050 return 0; 1051 1052 return (TypeFunc::Parms == idx); 1053 } 1054 1055 //============== SafePointScalarObjectNode ============== 1056 1057 SafePointScalarObjectNode::SafePointScalarObjectNode(const TypeOopPtr* tp, 1058 #ifdef ASSERT 1059 AllocateNode* alloc, 1060 #endif 1061 uint first_index, 1062 uint n_fields) : 1063 TypeNode(tp, 1), // 1 control input -- seems required. Get from root. 1064 #ifdef ASSERT 1065 _alloc(alloc), 1066 #endif 1067 _first_index(first_index), 1068 _n_fields(n_fields) 1069 { 1070 init_class_id(Class_SafePointScalarObject); 1071 } 1072 1073 // Do not allow value-numbering for SafePointScalarObject node. 1074 uint SafePointScalarObjectNode::hash() const { return NO_HASH; } 1075 uint SafePointScalarObjectNode::cmp( const Node &n ) const { 1076 return (&n == this); // Always fail except on self 1077 } 1078 1079 uint SafePointScalarObjectNode::ideal_reg() const { 1080 return 0; // No matching to machine instruction 1081 } 1082 1083 const RegMask &SafePointScalarObjectNode::in_RegMask(uint idx) const { 1084 return *(Compile::current()->matcher()->idealreg2debugmask[in(idx)->ideal_reg()]); 1085 } 1086 1087 const RegMask &SafePointScalarObjectNode::out_RegMask() const { 1088 return RegMask::Empty; 1089 } 1090 1091 uint SafePointScalarObjectNode::match_edge(uint idx) const { 1092 return 0; 1093 } 1094 1095 SafePointScalarObjectNode* 1096 SafePointScalarObjectNode::clone(int jvms_adj, Dict* sosn_map) const { 1097 void* cached = (*sosn_map)[(void*)this]; 1098 if (cached != NULL) { 1099 return (SafePointScalarObjectNode*)cached; 1100 } 1101 SafePointScalarObjectNode* res = (SafePointScalarObjectNode*)Node::clone(); 1102 res->_first_index += jvms_adj; 1103 sosn_map->Insert((void*)this, (void*)res); 1104 return res; 1105 } 1106 1107 1108 #ifndef PRODUCT 1109 void SafePointScalarObjectNode::dump_spec(outputStream *st) const { 1110 st->print(" # fields@[%d..%d]", first_index(), 1111 first_index() + n_fields() - 1); 1112 } 1113 1114 #endif 1115 1116 //============================================================================= 1117 uint AllocateNode::size_of() const { return sizeof(*this); } 1118 1119 AllocateNode::AllocateNode(Compile* C, const TypeFunc *atype, 1120 Node *ctrl, Node *mem, Node *abio, 1121 Node *size, Node *klass_node, Node *initial_test) 1122 : CallNode(atype, NULL, TypeRawPtr::BOTTOM) 1123 { 1124 init_class_id(Class_Allocate); 1125 init_flags(Flag_is_macro); 1126 _is_scalar_replaceable = false; 1127 Node *topnode = C->top(); 1128 1129 init_req( TypeFunc::Control , ctrl ); 1130 init_req( TypeFunc::I_O , abio ); 1131 init_req( TypeFunc::Memory , mem ); 1132 init_req( TypeFunc::ReturnAdr, topnode ); 1133 init_req( TypeFunc::FramePtr , topnode ); 1134 init_req( AllocSize , size); 1135 init_req( KlassNode , klass_node); 1136 init_req( InitialTest , initial_test); 1137 init_req( ALength , topnode); 1138 C->add_macro_node(this); 1139 } 1140 1141 //============================================================================= 1142 uint AllocateArrayNode::size_of() const { return sizeof(*this); } 1143 1144 Node* AllocateArrayNode::Ideal(PhaseGVN *phase, bool can_reshape) { 1145 if (remove_dead_region(phase, can_reshape)) return this; 1146 // Don't bother trying to transform a dead node 1147 if (in(0) && in(0)->is_top()) return NULL; 1148 1149 const Type* type = phase->type(Ideal_length()); 1150 if (type->isa_int() && type->is_int()->_hi < 0) { 1151 if (can_reshape) { 1152 PhaseIterGVN *igvn = phase->is_IterGVN(); 1153 // Unreachable fall through path (negative array length), 1154 // the allocation can only throw so disconnect it. 1155 Node* proj = proj_out(TypeFunc::Control); 1156 Node* catchproj = NULL; 1157 if (proj != NULL) { 1158 for (DUIterator_Fast imax, i = proj->fast_outs(imax); i < imax; i++) { 1159 Node *cn = proj->fast_out(i); 1160 if (cn->is_Catch()) { 1161 catchproj = cn->as_Multi()->proj_out(CatchProjNode::fall_through_index); 1162 break; 1163 } 1164 } 1165 } 1166 if (catchproj != NULL && catchproj->outcnt() > 0 && 1167 (catchproj->outcnt() > 1 || 1168 catchproj->unique_out()->Opcode() != Op_Halt)) { 1169 assert(catchproj->is_CatchProj(), "must be a CatchProjNode"); 1170 Node* nproj = catchproj->clone(); 1171 igvn->register_new_node_with_optimizer(nproj); 1172 1173 Node *frame = new (phase->C, 1) ParmNode( phase->C->start(), TypeFunc::FramePtr ); 1174 frame = phase->transform(frame); 1175 // Halt & Catch Fire 1176 Node *halt = new (phase->C, TypeFunc::Parms) HaltNode( nproj, frame ); 1177 phase->C->root()->add_req(halt); 1178 phase->transform(halt); 1179 1180 igvn->replace_node(catchproj, phase->C->top()); 1181 return this; 1182 } 1183 } else { 1184 // Can't correct it during regular GVN so register for IGVN 1185 phase->C->record_for_igvn(this); 1186 } 1187 } 1188 return NULL; 1189 } 1190 1191 // Retrieve the length from the AllocateArrayNode. Narrow the type with a 1192 // CastII, if appropriate. If we are not allowed to create new nodes, and 1193 // a CastII is appropriate, return NULL. 1194 Node *AllocateArrayNode::make_ideal_length(const TypeOopPtr* oop_type, PhaseTransform *phase, bool allow_new_nodes) { 1195 Node *length = in(AllocateNode::ALength); 1196 assert(length != NULL, "length is not null"); 1197 1198 const TypeInt* length_type = phase->find_int_type(length); 1199 const TypeAryPtr* ary_type = oop_type->isa_aryptr(); 1200 1201 if (ary_type != NULL && length_type != NULL) { 1202 const TypeInt* narrow_length_type = ary_type->narrow_size_type(length_type); 1203 if (narrow_length_type != length_type) { 1204 // Assert one of: 1205 // - the narrow_length is 0 1206 // - the narrow_length is not wider than length 1207 assert(narrow_length_type == TypeInt::ZERO || 1208 (narrow_length_type->_hi <= length_type->_hi && 1209 narrow_length_type->_lo >= length_type->_lo), 1210 "narrow type must be narrower than length type"); 1211 1212 // Return NULL if new nodes are not allowed 1213 if (!allow_new_nodes) return NULL; 1214 // Create a cast which is control dependent on the initialization to 1215 // propagate the fact that the array length must be positive. 1216 length = new (phase->C, 2) CastIINode(length, narrow_length_type); 1217 length->set_req(0, initialization()->proj_out(0)); 1218 } 1219 } 1220 1221 return length; 1222 } 1223 1224 //============================================================================= 1225 uint LockNode::size_of() const { return sizeof(*this); } 1226 1227 // Redundant lock elimination 1228 // 1229 // There are various patterns of locking where we release and 1230 // immediately reacquire a lock in a piece of code where no operations 1231 // occur in between that would be observable. In those cases we can 1232 // skip releasing and reacquiring the lock without violating any 1233 // fairness requirements. Doing this around a loop could cause a lock 1234 // to be held for a very long time so we concentrate on non-looping 1235 // control flow. We also require that the operations are fully 1236 // redundant meaning that we don't introduce new lock operations on 1237 // some paths so to be able to eliminate it on others ala PRE. This 1238 // would probably require some more extensive graph manipulation to 1239 // guarantee that the memory edges were all handled correctly. 1240 // 1241 // Assuming p is a simple predicate which can't trap in any way and s 1242 // is a synchronized method consider this code: 1243 // 1244 // s(); 1245 // if (p) 1246 // s(); 1247 // else 1248 // s(); 1249 // s(); 1250 // 1251 // 1. The unlocks of the first call to s can be eliminated if the 1252 // locks inside the then and else branches are eliminated. 1253 // 1254 // 2. The unlocks of the then and else branches can be eliminated if 1255 // the lock of the final call to s is eliminated. 1256 // 1257 // Either of these cases subsumes the simple case of sequential control flow 1258 // 1259 // Addtionally we can eliminate versions without the else case: 1260 // 1261 // s(); 1262 // if (p) 1263 // s(); 1264 // s(); 1265 // 1266 // 3. In this case we eliminate the unlock of the first s, the lock 1267 // and unlock in the then case and the lock in the final s. 1268 // 1269 // Note also that in all these cases the then/else pieces don't have 1270 // to be trivial as long as they begin and end with synchronization 1271 // operations. 1272 // 1273 // s(); 1274 // if (p) 1275 // s(); 1276 // f(); 1277 // s(); 1278 // s(); 1279 // 1280 // The code will work properly for this case, leaving in the unlock 1281 // before the call to f and the relock after it. 1282 // 1283 // A potentially interesting case which isn't handled here is when the 1284 // locking is partially redundant. 1285 // 1286 // s(); 1287 // if (p) 1288 // s(); 1289 // 1290 // This could be eliminated putting unlocking on the else case and 1291 // eliminating the first unlock and the lock in the then side. 1292 // Alternatively the unlock could be moved out of the then side so it 1293 // was after the merge and the first unlock and second lock 1294 // eliminated. This might require less manipulation of the memory 1295 // state to get correct. 1296 // 1297 // Additionally we might allow work between a unlock and lock before 1298 // giving up eliminating the locks. The current code disallows any 1299 // conditional control flow between these operations. A formulation 1300 // similar to partial redundancy elimination computing the 1301 // availability of unlocking and the anticipatability of locking at a 1302 // program point would allow detection of fully redundant locking with 1303 // some amount of work in between. I'm not sure how often I really 1304 // think that would occur though. Most of the cases I've seen 1305 // indicate it's likely non-trivial work would occur in between. 1306 // There may be other more complicated constructs where we could 1307 // eliminate locking but I haven't seen any others appear as hot or 1308 // interesting. 1309 // 1310 // Locking and unlocking have a canonical form in ideal that looks 1311 // roughly like this: 1312 // 1313 // <obj> 1314 // | \\------+ 1315 // | \ \ 1316 // | BoxLock \ 1317 // | | | \ 1318 // | | \ \ 1319 // | | FastLock 1320 // | | / 1321 // | | / 1322 // | | | 1323 // 1324 // Lock 1325 // | 1326 // Proj #0 1327 // | 1328 // MembarAcquire 1329 // | 1330 // Proj #0 1331 // 1332 // MembarRelease 1333 // | 1334 // Proj #0 1335 // | 1336 // Unlock 1337 // | 1338 // Proj #0 1339 // 1340 // 1341 // This code proceeds by processing Lock nodes during PhaseIterGVN 1342 // and searching back through its control for the proper code 1343 // patterns. Once it finds a set of lock and unlock operations to 1344 // eliminate they are marked as eliminatable which causes the 1345 // expansion of the Lock and Unlock macro nodes to make the operation a NOP 1346 // 1347 //============================================================================= 1348 1349 // 1350 // Utility function to skip over uninteresting control nodes. Nodes skipped are: 1351 // - copy regions. (These may not have been optimized away yet.) 1352 // - eliminated locking nodes 1353 // 1354 static Node *next_control(Node *ctrl) { 1355 if (ctrl == NULL) 1356 return NULL; 1357 while (1) { 1358 if (ctrl->is_Region()) { 1359 RegionNode *r = ctrl->as_Region(); 1360 Node *n = r->is_copy(); 1361 if (n == NULL) 1362 break; // hit a region, return it 1363 else 1364 ctrl = n; 1365 } else if (ctrl->is_Proj()) { 1366 Node *in0 = ctrl->in(0); 1367 if (in0->is_AbstractLock() && in0->as_AbstractLock()->is_eliminated()) { 1368 ctrl = in0->in(0); 1369 } else { 1370 break; 1371 } 1372 } else { 1373 break; // found an interesting control 1374 } 1375 } 1376 return ctrl; 1377 } 1378 // 1379 // Given a control, see if it's the control projection of an Unlock which 1380 // operating on the same object as lock. 1381 // 1382 bool AbstractLockNode::find_matching_unlock(const Node* ctrl, LockNode* lock, 1383 GrowableArray<AbstractLockNode*> &lock_ops) { 1384 ProjNode *ctrl_proj = (ctrl->is_Proj()) ? ctrl->as_Proj() : NULL; 1385 if (ctrl_proj != NULL && ctrl_proj->_con == TypeFunc::Control) { 1386 Node *n = ctrl_proj->in(0); 1387 if (n != NULL && n->is_Unlock()) { 1388 UnlockNode *unlock = n->as_Unlock(); 1389 if (lock->obj_node()->eqv_uncast(unlock->obj_node()) && 1390 BoxLockNode::same_slot(lock->box_node(), unlock->box_node()) && 1391 !unlock->is_eliminated()) { 1392 lock_ops.append(unlock); 1393 return true; 1394 } 1395 } 1396 } 1397 return false; 1398 } 1399 1400 // 1401 // Find the lock matching an unlock. Returns null if a safepoint 1402 // or complicated control is encountered first. 1403 LockNode *AbstractLockNode::find_matching_lock(UnlockNode* unlock) { 1404 LockNode *lock_result = NULL; 1405 // find the matching lock, or an intervening safepoint 1406 Node *ctrl = next_control(unlock->in(0)); 1407 while (1) { 1408 assert(ctrl != NULL, "invalid control graph"); 1409 assert(!ctrl->is_Start(), "missing lock for unlock"); 1410 if (ctrl->is_top()) break; // dead control path 1411 if (ctrl->is_Proj()) ctrl = ctrl->in(0); 1412 if (ctrl->is_SafePoint()) { 1413 break; // found a safepoint (may be the lock we are searching for) 1414 } else if (ctrl->is_Region()) { 1415 // Check for a simple diamond pattern. Punt on anything more complicated 1416 if (ctrl->req() == 3 && ctrl->in(1) != NULL && ctrl->in(2) != NULL) { 1417 Node *in1 = next_control(ctrl->in(1)); 1418 Node *in2 = next_control(ctrl->in(2)); 1419 if (((in1->is_IfTrue() && in2->is_IfFalse()) || 1420 (in2->is_IfTrue() && in1->is_IfFalse())) && (in1->in(0) == in2->in(0))) { 1421 ctrl = next_control(in1->in(0)->in(0)); 1422 } else { 1423 break; 1424 } 1425 } else { 1426 break; 1427 } 1428 } else { 1429 ctrl = next_control(ctrl->in(0)); // keep searching 1430 } 1431 } 1432 if (ctrl->is_Lock()) { 1433 LockNode *lock = ctrl->as_Lock(); 1434 if (lock->obj_node()->eqv_uncast(unlock->obj_node()) && 1435 BoxLockNode::same_slot(lock->box_node(), unlock->box_node())) { 1436 lock_result = lock; 1437 } 1438 } 1439 return lock_result; 1440 } 1441 1442 // This code corresponds to case 3 above. 1443 1444 bool AbstractLockNode::find_lock_and_unlock_through_if(Node* node, LockNode* lock, 1445 GrowableArray<AbstractLockNode*> &lock_ops) { 1446 Node* if_node = node->in(0); 1447 bool if_true = node->is_IfTrue(); 1448 1449 if (if_node->is_If() && if_node->outcnt() == 2 && (if_true || node->is_IfFalse())) { 1450 Node *lock_ctrl = next_control(if_node->in(0)); 1451 if (find_matching_unlock(lock_ctrl, lock, lock_ops)) { 1452 Node* lock1_node = NULL; 1453 ProjNode* proj = if_node->as_If()->proj_out(!if_true); 1454 if (if_true) { 1455 if (proj->is_IfFalse() && proj->outcnt() == 1) { 1456 lock1_node = proj->unique_out(); 1457 } 1458 } else { 1459 if (proj->is_IfTrue() && proj->outcnt() == 1) { 1460 lock1_node = proj->unique_out(); 1461 } 1462 } 1463 if (lock1_node != NULL && lock1_node->is_Lock()) { 1464 LockNode *lock1 = lock1_node->as_Lock(); 1465 if (lock->obj_node()->eqv_uncast(lock1->obj_node()) && 1466 BoxLockNode::same_slot(lock->box_node(), lock1->box_node()) && 1467 !lock1->is_eliminated()) { 1468 lock_ops.append(lock1); 1469 return true; 1470 } 1471 } 1472 } 1473 } 1474 1475 lock_ops.trunc_to(0); 1476 return false; 1477 } 1478 1479 bool AbstractLockNode::find_unlocks_for_region(const RegionNode* region, LockNode* lock, 1480 GrowableArray<AbstractLockNode*> &lock_ops) { 1481 // check each control merging at this point for a matching unlock. 1482 // in(0) should be self edge so skip it. 1483 for (int i = 1; i < (int)region->req(); i++) { 1484 Node *in_node = next_control(region->in(i)); 1485 if (in_node != NULL) { 1486 if (find_matching_unlock(in_node, lock, lock_ops)) { 1487 // found a match so keep on checking. 1488 continue; 1489 } else if (find_lock_and_unlock_through_if(in_node, lock, lock_ops)) { 1490 continue; 1491 } 1492 1493 // If we fall through to here then it was some kind of node we 1494 // don't understand or there wasn't a matching unlock, so give 1495 // up trying to merge locks. 1496 lock_ops.trunc_to(0); 1497 return false; 1498 } 1499 } 1500 return true; 1501 1502 } 1503 1504 #ifndef PRODUCT 1505 // 1506 // Create a counter which counts the number of times this lock is acquired 1507 // 1508 void AbstractLockNode::create_lock_counter(JVMState* state) { 1509 _counter = OptoRuntime::new_named_counter(state, NamedCounter::LockCounter); 1510 } 1511 1512 void AbstractLockNode::set_eliminated_lock_counter() { 1513 if (_counter) { 1514 // Update the counter to indicate that this lock was eliminated. 1515 // The counter update code will stay around even though the 1516 // optimizer will eliminate the lock operation itself. 1517 _counter->set_tag(NamedCounter::EliminatedLockCounter); 1518 } 1519 } 1520 #endif 1521 1522 //============================================================================= 1523 Node *LockNode::Ideal(PhaseGVN *phase, bool can_reshape) { 1524 1525 // perform any generic optimizations first (returns 'this' or NULL) 1526 Node *result = SafePointNode::Ideal(phase, can_reshape); 1527 if (result != NULL) return result; 1528 // Don't bother trying to transform a dead node 1529 if (in(0) && in(0)->is_top()) return NULL; 1530 1531 // Now see if we can optimize away this lock. We don't actually 1532 // remove the locking here, we simply set the _eliminate flag which 1533 // prevents macro expansion from expanding the lock. Since we don't 1534 // modify the graph, the value returned from this function is the 1535 // one computed above. 1536 if (can_reshape && EliminateLocks && !is_non_esc_obj()) { 1537 // 1538 // If we are locking an unescaped object, the lock/unlock is unnecessary 1539 // 1540 ConnectionGraph *cgr = phase->C->congraph(); 1541 if (cgr != NULL && cgr->not_global_escape(obj_node())) { 1542 assert(!is_eliminated() || is_coarsened(), "sanity"); 1543 // The lock could be marked eliminated by lock coarsening 1544 // code during first IGVN before EA. Replace coarsened flag 1545 // to eliminate all associated locks/unlocks. 1546 this->set_non_esc_obj(); 1547 return result; 1548 } 1549 1550 // 1551 // Try lock coarsening 1552 // 1553 PhaseIterGVN* iter = phase->is_IterGVN(); 1554 if (iter != NULL && !is_eliminated()) { 1555 1556 GrowableArray<AbstractLockNode*> lock_ops; 1557 1558 Node *ctrl = next_control(in(0)); 1559 1560 // now search back for a matching Unlock 1561 if (find_matching_unlock(ctrl, this, lock_ops)) { 1562 // found an unlock directly preceding this lock. This is the 1563 // case of single unlock directly control dependent on a 1564 // single lock which is the trivial version of case 1 or 2. 1565 } else if (ctrl->is_Region() ) { 1566 if (find_unlocks_for_region(ctrl->as_Region(), this, lock_ops)) { 1567 // found lock preceded by multiple unlocks along all paths 1568 // joining at this point which is case 3 in description above. 1569 } 1570 } else { 1571 // see if this lock comes from either half of an if and the 1572 // predecessors merges unlocks and the other half of the if 1573 // performs a lock. 1574 if (find_lock_and_unlock_through_if(ctrl, this, lock_ops)) { 1575 // found unlock splitting to an if with locks on both branches. 1576 } 1577 } 1578 1579 if (lock_ops.length() > 0) { 1580 // add ourselves to the list of locks to be eliminated. 1581 lock_ops.append(this); 1582 1583 #ifndef PRODUCT 1584 if (PrintEliminateLocks) { 1585 int locks = 0; 1586 int unlocks = 0; 1587 for (int i = 0; i < lock_ops.length(); i++) { 1588 AbstractLockNode* lock = lock_ops.at(i); 1589 if (lock->Opcode() == Op_Lock) 1590 locks++; 1591 else 1592 unlocks++; 1593 if (Verbose) { 1594 lock->dump(1); 1595 } 1596 } 1597 tty->print_cr("***Eliminated %d unlocks and %d locks", unlocks, locks); 1598 } 1599 #endif 1600 1601 // for each of the identified locks, mark them 1602 // as eliminatable 1603 for (int i = 0; i < lock_ops.length(); i++) { 1604 AbstractLockNode* lock = lock_ops.at(i); 1605 1606 // Mark it eliminated by coarsening and update any counters 1607 lock->set_coarsened(); 1608 } 1609 } else if (ctrl->is_Region() && 1610 iter->_worklist.member(ctrl)) { 1611 // We weren't able to find any opportunities but the region this 1612 // lock is control dependent on hasn't been processed yet so put 1613 // this lock back on the worklist so we can check again once any 1614 // region simplification has occurred. 1615 iter->_worklist.push(this); 1616 } 1617 } 1618 } 1619 1620 return result; 1621 } 1622 1623 //============================================================================= 1624 bool LockNode::is_nested_lock_region() { 1625 BoxLockNode* box = box_node()->as_BoxLock(); 1626 int stk_slot = box->stack_slot(); 1627 if (stk_slot <= 0) 1628 return false; // External lock or it is not Box (Phi node). 1629 1630 // Ignore complex cases: merged locks or multiple locks. 1631 Node* obj = obj_node(); 1632 LockNode* unique_lock = NULL; 1633 if (!box->is_simple_lock_region(&unique_lock, obj) || 1634 (unique_lock != this)) { 1635 return false; 1636 } 1637 1638 // Look for external lock for the same object. 1639 SafePointNode* sfn = this->as_SafePoint(); 1640 JVMState* youngest_jvms = sfn->jvms(); 1641 int max_depth = youngest_jvms->depth(); 1642 for (int depth = 1; depth <= max_depth; depth++) { 1643 JVMState* jvms = youngest_jvms->of_depth(depth); 1644 int num_mon = jvms->nof_monitors(); 1645 // Loop over monitors 1646 for (int idx = 0; idx < num_mon; idx++) { 1647 Node* obj_node = sfn->monitor_obj(jvms, idx); 1648 BoxLockNode* box_node = sfn->monitor_box(jvms, idx)->as_BoxLock(); 1649 if ((box_node->stack_slot() < stk_slot) && obj_node->eqv_uncast(obj)) { 1650 return true; 1651 } 1652 } 1653 } 1654 return false; 1655 } 1656 1657 //============================================================================= 1658 uint UnlockNode::size_of() const { return sizeof(*this); } 1659 1660 //============================================================================= 1661 Node *UnlockNode::Ideal(PhaseGVN *phase, bool can_reshape) { 1662 1663 // perform any generic optimizations first (returns 'this' or NULL) 1664 Node *result = SafePointNode::Ideal(phase, can_reshape); 1665 if (result != NULL) return result; 1666 // Don't bother trying to transform a dead node 1667 if (in(0) && in(0)->is_top()) return NULL; 1668 1669 // Now see if we can optimize away this unlock. We don't actually 1670 // remove the unlocking here, we simply set the _eliminate flag which 1671 // prevents macro expansion from expanding the unlock. Since we don't 1672 // modify the graph, the value returned from this function is the 1673 // one computed above. 1674 // Escape state is defined after Parse phase. 1675 if (can_reshape && EliminateLocks && !is_non_esc_obj()) { 1676 // 1677 // If we are unlocking an unescaped object, the lock/unlock is unnecessary. 1678 // 1679 ConnectionGraph *cgr = phase->C->congraph(); 1680 if (cgr != NULL && cgr->not_global_escape(obj_node())) { 1681 assert(!is_eliminated() || is_coarsened(), "sanity"); 1682 // The lock could be marked eliminated by lock coarsening 1683 // code during first IGVN before EA. Replace coarsened flag 1684 // to eliminate all associated locks/unlocks. 1685 this->set_non_esc_obj(); 1686 } 1687 } 1688 return result; 1689 }