1 /* 2 * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "compiler/compileLog.hpp" 27 #include "ci/bcEscapeAnalyzer.hpp" 28 #include "compiler/oopMap.hpp" 29 #include "interpreter/interpreter.hpp" 30 #include "opto/callGenerator.hpp" 31 #include "opto/callnode.hpp" 32 #include "opto/castnode.hpp" 33 #include "opto/convertnode.hpp" 34 #include "opto/escape.hpp" 35 #include "opto/locknode.hpp" 36 #include "opto/machnode.hpp" 37 #include "opto/matcher.hpp" 38 #include "opto/parse.hpp" 39 #include "opto/regalloc.hpp" 40 #include "opto/regmask.hpp" 41 #include "opto/rootnode.hpp" 42 #include "opto/runtime.hpp" 43 #include "opto/valuetypenode.hpp" 44 #include "runtime/sharedRuntime.hpp" 45 46 // Portions of code courtesy of Clifford Click 47 48 // Optimization - Graph Style 49 50 //============================================================================= 51 uint StartNode::size_of() const { return sizeof(*this); } 52 uint StartNode::cmp( const Node &n ) const 53 { return _domain == ((StartNode&)n)._domain; } 54 const Type *StartNode::bottom_type() const { return _domain; } 55 const Type* StartNode::Value(PhaseGVN* phase) const { return _domain; } 56 #ifndef PRODUCT 57 void StartNode::dump_spec(outputStream *st) const { st->print(" #"); _domain->dump_on(st);} 58 void StartNode::dump_compact_spec(outputStream *st) const { /* empty */ } 59 #endif 60 61 //------------------------------Ideal------------------------------------------ 62 Node *StartNode::Ideal(PhaseGVN *phase, bool can_reshape){ 63 return remove_dead_region(phase, can_reshape) ? this : NULL; 64 } 65 66 //------------------------------calling_convention----------------------------- 67 void StartNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const { 68 Matcher::calling_convention( sig_bt, parm_regs, argcnt, false ); 69 } 70 71 //------------------------------Registers-------------------------------------- 72 const RegMask &StartNode::in_RegMask(uint) const { 73 return RegMask::Empty; 74 } 75 76 //------------------------------match------------------------------------------ 77 // Construct projections for incoming parameters, and their RegMask info 78 Node *StartNode::match(const ProjNode *proj, const Matcher *match, const RegMask* mask) { 79 switch (proj->_con) { 80 case TypeFunc::Control: 81 case TypeFunc::I_O: 82 case TypeFunc::Memory: 83 return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj); 84 case TypeFunc::FramePtr: 85 return new MachProjNode(this,proj->_con,Matcher::c_frame_ptr_mask, Op_RegP); 86 case TypeFunc::ReturnAdr: 87 return new MachProjNode(this,proj->_con,match->_return_addr_mask,Op_RegP); 88 case TypeFunc::Parms: 89 default: { 90 uint parm_num = proj->_con - TypeFunc::Parms; 91 const Type *t = _domain->field_at(proj->_con); 92 if (t->base() == Type::Half) // 2nd half of Longs and Doubles 93 return new ConNode(Type::TOP); 94 uint ideal_reg = t->ideal_reg(); 95 RegMask &rm = match->_calling_convention_mask[parm_num]; 96 return new MachProjNode(this,proj->_con,rm,ideal_reg); 97 } 98 } 99 return NULL; 100 } 101 102 //------------------------------StartOSRNode---------------------------------- 103 // The method start node for an on stack replacement adapter 104 105 //------------------------------osr_domain----------------------------- 106 const TypeTuple *StartOSRNode::osr_domain() { 107 const Type **fields = TypeTuple::fields(2); 108 fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // address of osr buffer 109 110 return TypeTuple::make(TypeFunc::Parms+1, fields); 111 } 112 113 //============================================================================= 114 const char * const ParmNode::names[TypeFunc::Parms+1] = { 115 "Control", "I_O", "Memory", "FramePtr", "ReturnAdr", "Parms" 116 }; 117 118 #ifndef PRODUCT 119 void ParmNode::dump_spec(outputStream *st) const { 120 if( _con < TypeFunc::Parms ) { 121 st->print("%s", names[_con]); 122 } else { 123 st->print("Parm%d: ",_con-TypeFunc::Parms); 124 // Verbose and WizardMode dump bottom_type for all nodes 125 if( !Verbose && !WizardMode ) bottom_type()->dump_on(st); 126 } 127 } 128 129 void ParmNode::dump_compact_spec(outputStream *st) const { 130 if (_con < TypeFunc::Parms) { 131 st->print("%s", names[_con]); 132 } else { 133 st->print("%d:", _con-TypeFunc::Parms); 134 // unconditionally dump bottom_type 135 bottom_type()->dump_on(st); 136 } 137 } 138 139 // For a ParmNode, all immediate inputs and outputs are considered relevant 140 // both in compact and standard representation. 141 void ParmNode::related(GrowableArray<Node*> *in_rel, GrowableArray<Node*> *out_rel, bool compact) const { 142 this->collect_nodes(in_rel, 1, false, false); 143 this->collect_nodes(out_rel, -1, false, false); 144 } 145 #endif 146 147 uint ParmNode::ideal_reg() const { 148 switch( _con ) { 149 case TypeFunc::Control : // fall through 150 case TypeFunc::I_O : // fall through 151 case TypeFunc::Memory : return 0; 152 case TypeFunc::FramePtr : // fall through 153 case TypeFunc::ReturnAdr: return Op_RegP; 154 default : assert( _con > TypeFunc::Parms, "" ); 155 // fall through 156 case TypeFunc::Parms : { 157 // Type of argument being passed 158 const Type *t = in(0)->as_Start()->_domain->field_at(_con); 159 return t->ideal_reg(); 160 } 161 } 162 ShouldNotReachHere(); 163 return 0; 164 } 165 166 //============================================================================= 167 ReturnNode::ReturnNode(uint edges, Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *retadr ) : Node(edges) { 168 init_req(TypeFunc::Control,cntrl); 169 init_req(TypeFunc::I_O,i_o); 170 init_req(TypeFunc::Memory,memory); 171 init_req(TypeFunc::FramePtr,frameptr); 172 init_req(TypeFunc::ReturnAdr,retadr); 173 } 174 175 Node *ReturnNode::Ideal(PhaseGVN *phase, bool can_reshape){ 176 return remove_dead_region(phase, can_reshape) ? this : NULL; 177 } 178 179 const Type* ReturnNode::Value(PhaseGVN* phase) const { 180 return ( phase->type(in(TypeFunc::Control)) == Type::TOP) 181 ? Type::TOP 182 : Type::BOTTOM; 183 } 184 185 // Do we Match on this edge index or not? No edges on return nodes 186 uint ReturnNode::match_edge(uint idx) const { 187 return 0; 188 } 189 190 191 #ifndef PRODUCT 192 void ReturnNode::dump_req(outputStream *st) const { 193 // Dump the required inputs, enclosed in '(' and ')' 194 uint i; // Exit value of loop 195 for (i = 0; i < req(); i++) { // For all required inputs 196 if (i == TypeFunc::Parms) st->print("returns"); 197 if (in(i)) st->print("%c%d ", Compile::current()->node_arena()->contains(in(i)) ? ' ' : 'o', in(i)->_idx); 198 else st->print("_ "); 199 } 200 } 201 #endif 202 203 //============================================================================= 204 RethrowNode::RethrowNode( 205 Node* cntrl, 206 Node* i_o, 207 Node* memory, 208 Node* frameptr, 209 Node* ret_adr, 210 Node* exception 211 ) : Node(TypeFunc::Parms + 1) { 212 init_req(TypeFunc::Control , cntrl ); 213 init_req(TypeFunc::I_O , i_o ); 214 init_req(TypeFunc::Memory , memory ); 215 init_req(TypeFunc::FramePtr , frameptr ); 216 init_req(TypeFunc::ReturnAdr, ret_adr); 217 init_req(TypeFunc::Parms , exception); 218 } 219 220 Node *RethrowNode::Ideal(PhaseGVN *phase, bool can_reshape){ 221 return remove_dead_region(phase, can_reshape) ? this : NULL; 222 } 223 224 const Type* RethrowNode::Value(PhaseGVN* phase) const { 225 return (phase->type(in(TypeFunc::Control)) == Type::TOP) 226 ? Type::TOP 227 : Type::BOTTOM; 228 } 229 230 uint RethrowNode::match_edge(uint idx) const { 231 return 0; 232 } 233 234 #ifndef PRODUCT 235 void RethrowNode::dump_req(outputStream *st) const { 236 // Dump the required inputs, enclosed in '(' and ')' 237 uint i; // Exit value of loop 238 for (i = 0; i < req(); i++) { // For all required inputs 239 if (i == TypeFunc::Parms) st->print("exception"); 240 if (in(i)) st->print("%c%d ", Compile::current()->node_arena()->contains(in(i)) ? ' ' : 'o', in(i)->_idx); 241 else st->print("_ "); 242 } 243 } 244 #endif 245 246 //============================================================================= 247 // Do we Match on this edge index or not? Match only target address & method 248 uint TailCallNode::match_edge(uint idx) const { 249 return TypeFunc::Parms <= idx && idx <= TypeFunc::Parms+1; 250 } 251 252 //============================================================================= 253 // Do we Match on this edge index or not? Match only target address & oop 254 uint TailJumpNode::match_edge(uint idx) const { 255 return TypeFunc::Parms <= idx && idx <= TypeFunc::Parms+1; 256 } 257 258 //============================================================================= 259 JVMState::JVMState(ciMethod* method, JVMState* caller) : 260 _method(method) { 261 assert(method != NULL, "must be valid call site"); 262 _bci = InvocationEntryBci; 263 _reexecute = Reexecute_Undefined; 264 debug_only(_bci = -99); // random garbage value 265 debug_only(_map = (SafePointNode*)-1); 266 _caller = caller; 267 _depth = 1 + (caller == NULL ? 0 : caller->depth()); 268 _locoff = TypeFunc::Parms; 269 _stkoff = _locoff + _method->max_locals(); 270 _monoff = _stkoff + _method->max_stack(); 271 _scloff = _monoff; 272 _endoff = _monoff; 273 _sp = 0; 274 } 275 JVMState::JVMState(int stack_size) : 276 _method(NULL) { 277 _bci = InvocationEntryBci; 278 _reexecute = Reexecute_Undefined; 279 debug_only(_map = (SafePointNode*)-1); 280 _caller = NULL; 281 _depth = 1; 282 _locoff = TypeFunc::Parms; 283 _stkoff = _locoff; 284 _monoff = _stkoff + stack_size; 285 _scloff = _monoff; 286 _endoff = _monoff; 287 _sp = 0; 288 } 289 290 //--------------------------------of_depth------------------------------------- 291 JVMState* JVMState::of_depth(int d) const { 292 const JVMState* jvmp = this; 293 assert(0 < d && (uint)d <= depth(), "oob"); 294 for (int skip = depth() - d; skip > 0; skip--) { 295 jvmp = jvmp->caller(); 296 } 297 assert(jvmp->depth() == (uint)d, "found the right one"); 298 return (JVMState*)jvmp; 299 } 300 301 //-----------------------------same_calls_as----------------------------------- 302 bool JVMState::same_calls_as(const JVMState* that) const { 303 if (this == that) return true; 304 if (this->depth() != that->depth()) return false; 305 const JVMState* p = this; 306 const JVMState* q = that; 307 for (;;) { 308 if (p->_method != q->_method) return false; 309 if (p->_method == NULL) return true; // bci is irrelevant 310 if (p->_bci != q->_bci) return false; 311 if (p->_reexecute != q->_reexecute) return false; 312 p = p->caller(); 313 q = q->caller(); 314 if (p == q) return true; 315 assert(p != NULL && q != NULL, "depth check ensures we don't run off end"); 316 } 317 } 318 319 //------------------------------debug_start------------------------------------ 320 uint JVMState::debug_start() const { 321 debug_only(JVMState* jvmroot = of_depth(1)); 322 assert(jvmroot->locoff() <= this->locoff(), "youngest JVMState must be last"); 323 return of_depth(1)->locoff(); 324 } 325 326 //-------------------------------debug_end------------------------------------- 327 uint JVMState::debug_end() const { 328 debug_only(JVMState* jvmroot = of_depth(1)); 329 assert(jvmroot->endoff() <= this->endoff(), "youngest JVMState must be last"); 330 return endoff(); 331 } 332 333 //------------------------------debug_depth------------------------------------ 334 uint JVMState::debug_depth() const { 335 uint total = 0; 336 for (const JVMState* jvmp = this; jvmp != NULL; jvmp = jvmp->caller()) { 337 total += jvmp->debug_size(); 338 } 339 return total; 340 } 341 342 #ifndef PRODUCT 343 344 //------------------------------format_helper---------------------------------- 345 // Given an allocation (a Chaitin object) and a Node decide if the Node carries 346 // any defined value or not. If it does, print out the register or constant. 347 static void format_helper( PhaseRegAlloc *regalloc, outputStream* st, Node *n, const char *msg, uint i, GrowableArray<SafePointScalarObjectNode*> *scobjs ) { 348 if (n == NULL) { st->print(" NULL"); return; } 349 if (n->is_SafePointScalarObject()) { 350 // Scalar replacement. 351 SafePointScalarObjectNode* spobj = n->as_SafePointScalarObject(); 352 scobjs->append_if_missing(spobj); 353 int sco_n = scobjs->find(spobj); 354 assert(sco_n >= 0, ""); 355 st->print(" %s%d]=#ScObj" INT32_FORMAT, msg, i, sco_n); 356 return; 357 } 358 if (regalloc->node_regs_max_index() > 0 && 359 OptoReg::is_valid(regalloc->get_reg_first(n))) { // Check for undefined 360 char buf[50]; 361 regalloc->dump_register(n,buf); 362 st->print(" %s%d]=%s",msg,i,buf); 363 } else { // No register, but might be constant 364 const Type *t = n->bottom_type(); 365 switch (t->base()) { 366 case Type::Int: 367 st->print(" %s%d]=#" INT32_FORMAT,msg,i,t->is_int()->get_con()); 368 break; 369 case Type::AnyPtr: 370 assert( t == TypePtr::NULL_PTR || n->in_dump(), "" ); 371 st->print(" %s%d]=#NULL",msg,i); 372 break; 373 case Type::AryPtr: 374 case Type::InstPtr: 375 st->print(" %s%d]=#Ptr" INTPTR_FORMAT,msg,i,p2i(t->isa_oopptr()->const_oop())); 376 break; 377 case Type::KlassPtr: 378 st->print(" %s%d]=#Ptr" INTPTR_FORMAT,msg,i,p2i(t->make_ptr()->isa_klassptr()->klass())); 379 break; 380 case Type::MetadataPtr: 381 st->print(" %s%d]=#Ptr" INTPTR_FORMAT,msg,i,p2i(t->make_ptr()->isa_metadataptr()->metadata())); 382 break; 383 case Type::NarrowOop: 384 st->print(" %s%d]=#Ptr" INTPTR_FORMAT,msg,i,p2i(t->make_ptr()->isa_oopptr()->const_oop())); 385 break; 386 case Type::RawPtr: 387 st->print(" %s%d]=#Raw" INTPTR_FORMAT,msg,i,p2i(t->is_rawptr())); 388 break; 389 case Type::DoubleCon: 390 st->print(" %s%d]=#%fD",msg,i,t->is_double_constant()->_d); 391 break; 392 case Type::FloatCon: 393 st->print(" %s%d]=#%fF",msg,i,t->is_float_constant()->_f); 394 break; 395 case Type::Long: 396 st->print(" %s%d]=#" INT64_FORMAT,msg,i,(int64_t)(t->is_long()->get_con())); 397 break; 398 case Type::Half: 399 case Type::Top: 400 st->print(" %s%d]=_",msg,i); 401 break; 402 default: ShouldNotReachHere(); 403 } 404 } 405 } 406 407 //------------------------------format----------------------------------------- 408 void JVMState::format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st) const { 409 st->print(" #"); 410 if (_method) { 411 _method->print_short_name(st); 412 st->print(" @ bci:%d ",_bci); 413 } else { 414 st->print_cr(" runtime stub "); 415 return; 416 } 417 if (n->is_MachSafePoint()) { 418 GrowableArray<SafePointScalarObjectNode*> scobjs; 419 MachSafePointNode *mcall = n->as_MachSafePoint(); 420 uint i; 421 // Print locals 422 for (i = 0; i < (uint)loc_size(); i++) 423 format_helper(regalloc, st, mcall->local(this, i), "L[", i, &scobjs); 424 // Print stack 425 for (i = 0; i < (uint)stk_size(); i++) { 426 if ((uint)(_stkoff + i) >= mcall->len()) 427 st->print(" oob "); 428 else 429 format_helper(regalloc, st, mcall->stack(this, i), "STK[", i, &scobjs); 430 } 431 for (i = 0; (int)i < nof_monitors(); i++) { 432 Node *box = mcall->monitor_box(this, i); 433 Node *obj = mcall->monitor_obj(this, i); 434 if (regalloc->node_regs_max_index() > 0 && 435 OptoReg::is_valid(regalloc->get_reg_first(box))) { 436 box = BoxLockNode::box_node(box); 437 format_helper(regalloc, st, box, "MON-BOX[", i, &scobjs); 438 } else { 439 OptoReg::Name box_reg = BoxLockNode::reg(box); 440 st->print(" MON-BOX%d=%s+%d", 441 i, 442 OptoReg::regname(OptoReg::c_frame_pointer), 443 regalloc->reg2offset(box_reg)); 444 } 445 const char* obj_msg = "MON-OBJ["; 446 if (EliminateLocks) { 447 if (BoxLockNode::box_node(box)->is_eliminated()) 448 obj_msg = "MON-OBJ(LOCK ELIMINATED)["; 449 } 450 format_helper(regalloc, st, obj, obj_msg, i, &scobjs); 451 } 452 453 for (i = 0; i < (uint)scobjs.length(); i++) { 454 // Scalar replaced objects. 455 st->cr(); 456 st->print(" # ScObj" INT32_FORMAT " ", i); 457 SafePointScalarObjectNode* spobj = scobjs.at(i); 458 ciKlass* cik = spobj->bottom_type()->is_oopptr()->klass(); 459 assert(cik->is_instance_klass() || 460 cik->is_array_klass(), "Not supported allocation."); 461 ciInstanceKlass *iklass = NULL; 462 if (cik->is_instance_klass()) { 463 cik->print_name_on(st); 464 iklass = cik->as_instance_klass(); 465 } else if (cik->is_type_array_klass()) { 466 cik->as_array_klass()->base_element_type()->print_name_on(st); 467 st->print("[%d]", spobj->n_fields()); 468 } else if (cik->is_obj_array_klass()) { 469 ciKlass* cie = cik->as_obj_array_klass()->base_element_klass(); 470 if (cie->is_instance_klass()) { 471 cie->print_name_on(st); 472 } else if (cie->is_type_array_klass()) { 473 cie->as_array_klass()->base_element_type()->print_name_on(st); 474 } else { 475 ShouldNotReachHere(); 476 } 477 st->print("[%d]", spobj->n_fields()); 478 int ndim = cik->as_array_klass()->dimension() - 1; 479 while (ndim-- > 0) { 480 st->print("[]"); 481 } 482 } else if (cik->is_value_array_klass()) { 483 ciKlass* cie = cik->as_value_array_klass()->base_element_klass(); 484 cie->print_name_on(st); 485 st->print("[%d]", spobj->n_fields()); 486 int ndim = cik->as_array_klass()->dimension() - 1; 487 while (ndim-- > 0) { 488 st->print("[]"); 489 } 490 } 491 st->print("={"); 492 uint nf = spobj->n_fields(); 493 if (nf > 0) { 494 uint first_ind = spobj->first_index(mcall->jvms()); 495 Node* fld_node = mcall->in(first_ind); 496 ciField* cifield; 497 if (iklass != NULL) { 498 st->print(" ["); 499 cifield = iklass->nonstatic_field_at(0); 500 cifield->print_name_on(st); 501 format_helper(regalloc, st, fld_node, ":", 0, &scobjs); 502 } else { 503 format_helper(regalloc, st, fld_node, "[", 0, &scobjs); 504 } 505 for (uint j = 1; j < nf; j++) { 506 fld_node = mcall->in(first_ind+j); 507 if (iklass != NULL) { 508 st->print(", ["); 509 cifield = iklass->nonstatic_field_at(j); 510 cifield->print_name_on(st); 511 format_helper(regalloc, st, fld_node, ":", j, &scobjs); 512 } else { 513 format_helper(regalloc, st, fld_node, ", [", j, &scobjs); 514 } 515 } 516 } 517 st->print(" }"); 518 } 519 } 520 st->cr(); 521 if (caller() != NULL) caller()->format(regalloc, n, st); 522 } 523 524 525 void JVMState::dump_spec(outputStream *st) const { 526 if (_method != NULL) { 527 bool printed = false; 528 if (!Verbose) { 529 // The JVMS dumps make really, really long lines. 530 // Take out the most boring parts, which are the package prefixes. 531 char buf[500]; 532 stringStream namest(buf, sizeof(buf)); 533 _method->print_short_name(&namest); 534 if (namest.count() < sizeof(buf)) { 535 const char* name = namest.base(); 536 if (name[0] == ' ') ++name; 537 const char* endcn = strchr(name, ':'); // end of class name 538 if (endcn == NULL) endcn = strchr(name, '('); 539 if (endcn == NULL) endcn = name + strlen(name); 540 while (endcn > name && endcn[-1] != '.' && endcn[-1] != '/') 541 --endcn; 542 st->print(" %s", endcn); 543 printed = true; 544 } 545 } 546 if (!printed) 547 _method->print_short_name(st); 548 st->print(" @ bci:%d",_bci); 549 if(_reexecute == Reexecute_True) 550 st->print(" reexecute"); 551 } else { 552 st->print(" runtime stub"); 553 } 554 if (caller() != NULL) caller()->dump_spec(st); 555 } 556 557 558 void JVMState::dump_on(outputStream* st) const { 559 bool print_map = _map && !((uintptr_t)_map & 1) && 560 ((caller() == NULL) || (caller()->map() != _map)); 561 if (print_map) { 562 if (_map->len() > _map->req()) { // _map->has_exceptions() 563 Node* ex = _map->in(_map->req()); // _map->next_exception() 564 // skip the first one; it's already being printed 565 while (ex != NULL && ex->len() > ex->req()) { 566 ex = ex->in(ex->req()); // ex->next_exception() 567 ex->dump(1); 568 } 569 } 570 _map->dump(Verbose ? 2 : 1); 571 } 572 if (caller() != NULL) { 573 caller()->dump_on(st); 574 } 575 st->print("JVMS depth=%d loc=%d stk=%d arg=%d mon=%d scalar=%d end=%d mondepth=%d sp=%d bci=%d reexecute=%s method=", 576 depth(), locoff(), stkoff(), argoff(), monoff(), scloff(), endoff(), monitor_depth(), sp(), bci(), should_reexecute()?"true":"false"); 577 if (_method == NULL) { 578 st->print_cr("(none)"); 579 } else { 580 _method->print_name(st); 581 st->cr(); 582 if (bci() >= 0 && bci() < _method->code_size()) { 583 st->print(" bc: "); 584 _method->print_codes_on(bci(), bci()+1, st); 585 } 586 } 587 } 588 589 // Extra way to dump a jvms from the debugger, 590 // to avoid a bug with C++ member function calls. 591 void dump_jvms(JVMState* jvms) { 592 jvms->dump(); 593 } 594 #endif 595 596 //--------------------------clone_shallow-------------------------------------- 597 JVMState* JVMState::clone_shallow(Compile* C) const { 598 JVMState* n = has_method() ? new (C) JVMState(_method, _caller) : new (C) JVMState(0); 599 n->set_bci(_bci); 600 n->_reexecute = _reexecute; 601 n->set_locoff(_locoff); 602 n->set_stkoff(_stkoff); 603 n->set_monoff(_monoff); 604 n->set_scloff(_scloff); 605 n->set_endoff(_endoff); 606 n->set_sp(_sp); 607 n->set_map(_map); 608 return n; 609 } 610 611 //---------------------------clone_deep---------------------------------------- 612 JVMState* JVMState::clone_deep(Compile* C) const { 613 JVMState* n = clone_shallow(C); 614 for (JVMState* p = n; p->_caller != NULL; p = p->_caller) { 615 p->_caller = p->_caller->clone_shallow(C); 616 } 617 assert(n->depth() == depth(), "sanity"); 618 assert(n->debug_depth() == debug_depth(), "sanity"); 619 return n; 620 } 621 622 /** 623 * Reset map for all callers 624 */ 625 void JVMState::set_map_deep(SafePointNode* map) { 626 for (JVMState* p = this; p->_caller != NULL; p = p->_caller) { 627 p->set_map(map); 628 } 629 } 630 631 // Adapt offsets in in-array after adding or removing an edge. 632 // Prerequisite is that the JVMState is used by only one node. 633 void JVMState::adapt_position(int delta) { 634 for (JVMState* jvms = this; jvms != NULL; jvms = jvms->caller()) { 635 jvms->set_locoff(jvms->locoff() + delta); 636 jvms->set_stkoff(jvms->stkoff() + delta); 637 jvms->set_monoff(jvms->monoff() + delta); 638 jvms->set_scloff(jvms->scloff() + delta); 639 jvms->set_endoff(jvms->endoff() + delta); 640 } 641 } 642 643 // Mirror the stack size calculation in the deopt code 644 // How much stack space would we need at this point in the program in 645 // case of deoptimization? 646 int JVMState::interpreter_frame_size() const { 647 const JVMState* jvms = this; 648 int size = 0; 649 int callee_parameters = 0; 650 int callee_locals = 0; 651 int extra_args = method()->max_stack() - stk_size(); 652 653 while (jvms != NULL) { 654 int locks = jvms->nof_monitors(); 655 int temps = jvms->stk_size(); 656 bool is_top_frame = (jvms == this); 657 ciMethod* method = jvms->method(); 658 659 int frame_size = BytesPerWord * Interpreter::size_activation(method->max_stack(), 660 temps + callee_parameters, 661 extra_args, 662 locks, 663 callee_parameters, 664 callee_locals, 665 is_top_frame); 666 size += frame_size; 667 668 callee_parameters = method->size_of_parameters(); 669 callee_locals = method->max_locals(); 670 extra_args = 0; 671 jvms = jvms->caller(); 672 } 673 return size + Deoptimization::last_frame_adjust(0, callee_locals) * BytesPerWord; 674 } 675 676 //============================================================================= 677 uint CallNode::cmp( const Node &n ) const 678 { return _tf == ((CallNode&)n)._tf && _jvms == ((CallNode&)n)._jvms; } 679 #ifndef PRODUCT 680 void CallNode::dump_req(outputStream *st) const { 681 // Dump the required inputs, enclosed in '(' and ')' 682 uint i; // Exit value of loop 683 for (i = 0; i < req(); i++) { // For all required inputs 684 if (i == TypeFunc::Parms) st->print("("); 685 if (in(i)) st->print("%c%d ", Compile::current()->node_arena()->contains(in(i)) ? ' ' : 'o', in(i)->_idx); 686 else st->print("_ "); 687 } 688 st->print(")"); 689 } 690 691 void CallNode::dump_spec(outputStream *st) const { 692 st->print(" "); 693 if (tf() != NULL) tf()->dump_on(st); 694 if (_cnt != COUNT_UNKNOWN) st->print(" C=%f",_cnt); 695 if (jvms() != NULL) jvms()->dump_spec(st); 696 } 697 #endif 698 699 const Type *CallNode::bottom_type() const { return tf()->range_cc(); } 700 const Type* CallNode::Value(PhaseGVN* phase) const { 701 if (!in(0) || phase->type(in(0)) == Type::TOP) { 702 return Type::TOP; 703 } 704 return tf()->range_cc(); 705 } 706 707 //------------------------------calling_convention----------------------------- 708 void CallNode::calling_convention(BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt) const { 709 if (_entry_point == StubRoutines::store_value_type_fields_to_buf()) { 710 // The call to that stub is a special case: its inputs are 711 // multiple values returned from a call and so it should follow 712 // the return convention. 713 SharedRuntime::java_return_convention(sig_bt, parm_regs, argcnt); 714 return; 715 } 716 // Use the standard compiler calling convention 717 Matcher::calling_convention( sig_bt, parm_regs, argcnt, true ); 718 } 719 720 721 //------------------------------match------------------------------------------ 722 // Construct projections for control, I/O, memory-fields, ..., and 723 // return result(s) along with their RegMask info 724 Node *CallNode::match(const ProjNode *proj, const Matcher *match, const RegMask* mask) { 725 uint con = proj->_con; 726 const TypeTuple *range_cc = tf()->range_cc(); 727 if (con >= TypeFunc::Parms) { 728 if (is_CallRuntime()) { 729 if (con == TypeFunc::Parms) { 730 uint ideal_reg = range_cc->field_at(TypeFunc::Parms)->ideal_reg(); 731 OptoRegPair regs = match->c_return_value(ideal_reg,true); 732 RegMask rm = RegMask(regs.first()); 733 if (OptoReg::is_valid(regs.second())) { 734 rm.Insert(regs.second()); 735 } 736 return new MachProjNode(this,con,rm,ideal_reg); 737 } else { 738 assert(con == TypeFunc::Parms+1, "only one return value"); 739 assert(range_cc->field_at(TypeFunc::Parms+1) == Type::HALF, ""); 740 return new MachProjNode(this,con, RegMask::Empty, (uint)OptoReg::Bad); 741 } 742 } else { 743 // The Call may return multiple values (value type fields): we 744 // create one projection per returned values. 745 assert(con <= TypeFunc::Parms+1 || ValueTypeReturnedAsFields, "only for multi value return"); 746 uint ideal_reg = range_cc->field_at(con)->ideal_reg(); 747 return new MachProjNode(this, con, mask[con-TypeFunc::Parms], ideal_reg); 748 } 749 } 750 751 switch (con) { 752 case TypeFunc::Control: 753 case TypeFunc::I_O: 754 case TypeFunc::Memory: 755 return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj); 756 757 case TypeFunc::ReturnAdr: 758 case TypeFunc::FramePtr: 759 default: 760 ShouldNotReachHere(); 761 } 762 return NULL; 763 } 764 765 // Do we Match on this edge index or not? Match no edges 766 uint CallNode::match_edge(uint idx) const { 767 return 0; 768 } 769 770 // 771 // Determine whether the call could modify the field of the specified 772 // instance at the specified offset. 773 // 774 bool CallNode::may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase) { 775 assert((t_oop != NULL), "sanity"); 776 if (is_call_to_arraycopystub() && strcmp(_name, "unsafe_arraycopy") != 0) { 777 const TypeTuple* args = _tf->domain_sig(); 778 Node* dest = NULL; 779 // Stubs that can be called once an ArrayCopyNode is expanded have 780 // different signatures. Look for the second pointer argument, 781 // that is the destination of the copy. 782 for (uint i = TypeFunc::Parms, j = 0; i < args->cnt(); i++) { 783 if (args->field_at(i)->isa_ptr()) { 784 j++; 785 if (j == 2) { 786 dest = in(i); 787 break; 788 } 789 } 790 } 791 if (!dest->is_top() && may_modify_arraycopy_helper(phase->type(dest)->is_oopptr(), t_oop, phase)) { 792 return true; 793 } 794 return false; 795 } 796 if (t_oop->is_known_instance()) { 797 // The instance_id is set only for scalar-replaceable allocations which 798 // are not passed as arguments according to Escape Analysis. 799 return false; 800 } 801 if (t_oop->is_ptr_to_boxed_value()) { 802 ciKlass* boxing_klass = t_oop->klass(); 803 if (is_CallStaticJava() && as_CallStaticJava()->is_boxing_method()) { 804 // Skip unrelated boxing methods. 805 Node* proj = proj_out_or_null(TypeFunc::Parms); 806 if ((proj == NULL) || (phase->type(proj)->is_instptr()->klass() != boxing_klass)) { 807 return false; 808 } 809 } 810 if (is_CallJava() && as_CallJava()->method() != NULL) { 811 ciMethod* meth = as_CallJava()->method(); 812 if (meth->is_getter()) { 813 return false; 814 } 815 // May modify (by reflection) if an boxing object is passed 816 // as argument or returned. 817 Node* proj = returns_pointer() ? proj_out_or_null(TypeFunc::Parms) : NULL; 818 if (proj != NULL) { 819 const TypeInstPtr* inst_t = phase->type(proj)->isa_instptr(); 820 if ((inst_t != NULL) && (!inst_t->klass_is_exact() || 821 (inst_t->klass() == boxing_klass))) { 822 return true; 823 } 824 } 825 const TypeTuple* d = tf()->domain_cc(); 826 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 827 const TypeInstPtr* inst_t = d->field_at(i)->isa_instptr(); 828 if ((inst_t != NULL) && (!inst_t->klass_is_exact() || 829 (inst_t->klass() == boxing_klass))) { 830 return true; 831 } 832 } 833 return false; 834 } 835 } 836 return true; 837 } 838 839 // Does this call have a direct reference to n other than debug information? 840 bool CallNode::has_non_debug_use(Node *n) { 841 const TypeTuple * d = tf()->domain_cc(); 842 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 843 Node *arg = in(i); 844 if (arg == n) { 845 return true; 846 } 847 } 848 return false; 849 } 850 851 bool CallNode::has_debug_use(Node *n) { 852 for (uint i = jvms()->debug_start(); i < jvms()->debug_end(); i++) { 853 Node *arg = in(i); 854 if (arg == n) { 855 return true; 856 } 857 } 858 return false; 859 } 860 861 // Returns the unique CheckCastPP of a call 862 // or 'this' if there are several CheckCastPP or unexpected uses 863 // or returns NULL if there is no one. 864 Node *CallNode::result_cast() { 865 Node *cast = NULL; 866 867 Node *p = proj_out_or_null(TypeFunc::Parms); 868 if (p == NULL) 869 return NULL; 870 871 for (DUIterator_Fast imax, i = p->fast_outs(imax); i < imax; i++) { 872 Node *use = p->fast_out(i); 873 if (use->is_CheckCastPP()) { 874 if (cast != NULL) { 875 return this; // more than 1 CheckCastPP 876 } 877 cast = use; 878 } else if (!use->is_Initialize() && 879 !use->is_AddP() && 880 use->Opcode() != Op_MemBarStoreStore) { 881 // Expected uses are restricted to a CheckCastPP, an Initialize 882 // node, a MemBarStoreStore (clone) and AddP nodes. If we 883 // encounter any other use (a Phi node can be seen in rare 884 // cases) return this to prevent incorrect optimizations. 885 return this; 886 } 887 } 888 return cast; 889 } 890 891 892 CallProjections* CallNode::extract_projections(bool separate_io_proj, bool do_asserts) { 893 uint max_res = TypeFunc::Parms-1; 894 for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) { 895 ProjNode *pn = fast_out(i)->as_Proj(); 896 max_res = MAX2(max_res, pn->_con); 897 } 898 899 assert(max_res < _tf->range_cc()->cnt(), "result out of bounds"); 900 901 uint projs_size = sizeof(CallProjections); 902 if (max_res > TypeFunc::Parms) { 903 projs_size += (max_res-TypeFunc::Parms)*sizeof(Node*); 904 } 905 char* projs_storage = resource_allocate_bytes(projs_size); 906 CallProjections* projs = new(projs_storage)CallProjections(max_res - TypeFunc::Parms + 1); 907 908 for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) { 909 ProjNode *pn = fast_out(i)->as_Proj(); 910 if (pn->outcnt() == 0) continue; 911 switch (pn->_con) { 912 case TypeFunc::Control: 913 { 914 // For Control (fallthrough) and I_O (catch_all_index) we have CatchProj -> Catch -> Proj 915 projs->fallthrough_proj = pn; 916 DUIterator_Fast jmax, j = pn->fast_outs(jmax); 917 const Node *cn = pn->fast_out(j); 918 if (cn->is_Catch()) { 919 ProjNode *cpn = NULL; 920 for (DUIterator_Fast kmax, k = cn->fast_outs(kmax); k < kmax; k++) { 921 cpn = cn->fast_out(k)->as_Proj(); 922 assert(cpn->is_CatchProj(), "must be a CatchProjNode"); 923 if (cpn->_con == CatchProjNode::fall_through_index) 924 projs->fallthrough_catchproj = cpn; 925 else { 926 assert(cpn->_con == CatchProjNode::catch_all_index, "must be correct index."); 927 projs->catchall_catchproj = cpn; 928 } 929 } 930 } 931 break; 932 } 933 case TypeFunc::I_O: 934 if (pn->_is_io_use) 935 projs->catchall_ioproj = pn; 936 else 937 projs->fallthrough_ioproj = pn; 938 for (DUIterator j = pn->outs(); pn->has_out(j); j++) { 939 Node* e = pn->out(j); 940 if (e->Opcode() == Op_CreateEx && e->in(0)->is_CatchProj() && e->outcnt() > 0) { 941 assert(projs->exobj == NULL, "only one"); 942 projs->exobj = e; 943 } 944 } 945 break; 946 case TypeFunc::Memory: 947 if (pn->_is_io_use) 948 projs->catchall_memproj = pn; 949 else 950 projs->fallthrough_memproj = pn; 951 break; 952 case TypeFunc::Parms: 953 projs->resproj[0] = pn; 954 break; 955 default: 956 assert(pn->_con <= max_res, "unexpected projection from allocation node."); 957 projs->resproj[pn->_con-TypeFunc::Parms] = pn; 958 break; 959 } 960 } 961 962 // The resproj may not exist because the result could be ignored 963 // and the exception object may not exist if an exception handler 964 // swallows the exception but all the other must exist and be found. 965 assert(projs->fallthrough_proj != NULL, "must be found"); 966 do_asserts = do_asserts && !Compile::current()->inlining_incrementally(); 967 assert(!do_asserts || projs->fallthrough_catchproj != NULL, "must be found"); 968 assert(!do_asserts || projs->fallthrough_memproj != NULL, "must be found"); 969 assert(!do_asserts || projs->fallthrough_ioproj != NULL, "must be found"); 970 assert(!do_asserts || projs->catchall_catchproj != NULL, "must be found"); 971 if (separate_io_proj) { 972 assert(!do_asserts || projs->catchall_memproj != NULL, "must be found"); 973 assert(!do_asserts || projs->catchall_ioproj != NULL, "must be found"); 974 } 975 return projs; 976 } 977 978 Node *CallNode::Ideal(PhaseGVN *phase, bool can_reshape) { 979 CallGenerator* cg = generator(); 980 if (can_reshape && cg != NULL && cg->is_mh_late_inline() && !cg->already_attempted()) { 981 // Check whether this MH handle call becomes a candidate for inlining 982 ciMethod* callee = cg->method(); 983 vmIntrinsics::ID iid = callee->intrinsic_id(); 984 if (iid == vmIntrinsics::_invokeBasic) { 985 if (in(TypeFunc::Parms)->Opcode() == Op_ConP) { 986 phase->C->prepend_late_inline(cg); 987 set_generator(NULL); 988 } 989 } else { 990 assert(callee->has_member_arg(), "wrong type of call?"); 991 if (in(TypeFunc::Parms + callee->arg_size() - 1)->Opcode() == Op_ConP) { 992 phase->C->prepend_late_inline(cg); 993 set_generator(NULL); 994 } 995 } 996 } 997 return SafePointNode::Ideal(phase, can_reshape); 998 } 999 1000 bool CallNode::is_call_to_arraycopystub() const { 1001 if (_name != NULL && strstr(_name, "arraycopy") != 0) { 1002 return true; 1003 } 1004 return false; 1005 } 1006 1007 //============================================================================= 1008 uint CallJavaNode::size_of() const { return sizeof(*this); } 1009 uint CallJavaNode::cmp( const Node &n ) const { 1010 CallJavaNode &call = (CallJavaNode&)n; 1011 return CallNode::cmp(call) && _method == call._method && 1012 _override_symbolic_info == call._override_symbolic_info; 1013 } 1014 #ifndef PRODUCT 1015 void CallJavaNode::dump_spec(outputStream *st) const { 1016 if( _method ) _method->print_short_name(st); 1017 CallNode::dump_spec(st); 1018 } 1019 1020 void CallJavaNode::dump_compact_spec(outputStream* st) const { 1021 if (_method) { 1022 _method->print_short_name(st); 1023 } else { 1024 st->print("<?>"); 1025 } 1026 } 1027 #endif 1028 1029 //============================================================================= 1030 uint CallStaticJavaNode::size_of() const { return sizeof(*this); } 1031 uint CallStaticJavaNode::cmp( const Node &n ) const { 1032 CallStaticJavaNode &call = (CallStaticJavaNode&)n; 1033 return CallJavaNode::cmp(call); 1034 } 1035 1036 //----------------------------uncommon_trap_request---------------------------- 1037 // If this is an uncommon trap, return the request code, else zero. 1038 int CallStaticJavaNode::uncommon_trap_request() const { 1039 if (_name != NULL && !strcmp(_name, "uncommon_trap")) { 1040 return extract_uncommon_trap_request(this); 1041 } 1042 return 0; 1043 } 1044 int CallStaticJavaNode::extract_uncommon_trap_request(const Node* call) { 1045 #ifndef PRODUCT 1046 if (!(call->req() > TypeFunc::Parms && 1047 call->in(TypeFunc::Parms) != NULL && 1048 call->in(TypeFunc::Parms)->is_Con() && 1049 call->in(TypeFunc::Parms)->bottom_type()->isa_int())) { 1050 assert(in_dump() != 0, "OK if dumping"); 1051 tty->print("[bad uncommon trap]"); 1052 return 0; 1053 } 1054 #endif 1055 return call->in(TypeFunc::Parms)->bottom_type()->is_int()->get_con(); 1056 } 1057 1058 #ifndef PRODUCT 1059 void CallStaticJavaNode::dump_spec(outputStream *st) const { 1060 st->print("# Static "); 1061 if (_name != NULL) { 1062 st->print("%s", _name); 1063 int trap_req = uncommon_trap_request(); 1064 if (trap_req != 0) { 1065 char buf[100]; 1066 st->print("(%s)", 1067 Deoptimization::format_trap_request(buf, sizeof(buf), 1068 trap_req)); 1069 } 1070 st->print(" "); 1071 } 1072 CallJavaNode::dump_spec(st); 1073 } 1074 1075 void CallStaticJavaNode::dump_compact_spec(outputStream* st) const { 1076 if (_method) { 1077 _method->print_short_name(st); 1078 } else if (_name) { 1079 st->print("%s", _name); 1080 } else { 1081 st->print("<?>"); 1082 } 1083 } 1084 #endif 1085 1086 //============================================================================= 1087 uint CallDynamicJavaNode::size_of() const { return sizeof(*this); } 1088 uint CallDynamicJavaNode::cmp( const Node &n ) const { 1089 CallDynamicJavaNode &call = (CallDynamicJavaNode&)n; 1090 return CallJavaNode::cmp(call); 1091 } 1092 #ifndef PRODUCT 1093 void CallDynamicJavaNode::dump_spec(outputStream *st) const { 1094 st->print("# Dynamic "); 1095 CallJavaNode::dump_spec(st); 1096 } 1097 #endif 1098 1099 //============================================================================= 1100 uint CallRuntimeNode::size_of() const { return sizeof(*this); } 1101 uint CallRuntimeNode::cmp( const Node &n ) const { 1102 CallRuntimeNode &call = (CallRuntimeNode&)n; 1103 return CallNode::cmp(call) && !strcmp(_name,call._name); 1104 } 1105 #ifndef PRODUCT 1106 void CallRuntimeNode::dump_spec(outputStream *st) const { 1107 st->print("# "); 1108 st->print("%s", _name); 1109 CallNode::dump_spec(st); 1110 } 1111 #endif 1112 1113 //------------------------------calling_convention----------------------------- 1114 void CallRuntimeNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const { 1115 if (_entry_point == NULL) { 1116 // The call to that stub is a special case: its inputs are 1117 // multiple values returned from a call and so it should follow 1118 // the return convention. 1119 SharedRuntime::java_return_convention(sig_bt, parm_regs, argcnt); 1120 return; 1121 } 1122 Matcher::c_calling_convention( sig_bt, parm_regs, argcnt ); 1123 } 1124 1125 //============================================================================= 1126 //------------------------------calling_convention----------------------------- 1127 1128 1129 //============================================================================= 1130 #ifndef PRODUCT 1131 void CallLeafNode::dump_spec(outputStream *st) const { 1132 st->print("# "); 1133 st->print("%s", _name); 1134 CallNode::dump_spec(st); 1135 } 1136 #endif 1137 1138 uint CallLeafNoFPNode::match_edge(uint idx) const { 1139 // Null entry point is a special case for which the target is in a 1140 // register. Need to match that edge. 1141 return entry_point() == NULL && idx == TypeFunc::Parms; 1142 } 1143 1144 //============================================================================= 1145 1146 void SafePointNode::set_local(JVMState* jvms, uint idx, Node *c) { 1147 assert(verify_jvms(jvms), "jvms must match"); 1148 int loc = jvms->locoff() + idx; 1149 if (in(loc)->is_top() && idx > 0 && !c->is_top() ) { 1150 // If current local idx is top then local idx - 1 could 1151 // be a long/double that needs to be killed since top could 1152 // represent the 2nd half ofthe long/double. 1153 uint ideal = in(loc -1)->ideal_reg(); 1154 if (ideal == Op_RegD || ideal == Op_RegL) { 1155 // set other (low index) half to top 1156 set_req(loc - 1, in(loc)); 1157 } 1158 } 1159 set_req(loc, c); 1160 } 1161 1162 uint SafePointNode::size_of() const { return sizeof(*this); } 1163 uint SafePointNode::cmp( const Node &n ) const { 1164 return (&n == this); // Always fail except on self 1165 } 1166 1167 //-------------------------set_next_exception---------------------------------- 1168 void SafePointNode::set_next_exception(SafePointNode* n) { 1169 assert(n == NULL || n->Opcode() == Op_SafePoint, "correct value for next_exception"); 1170 if (len() == req()) { 1171 if (n != NULL) add_prec(n); 1172 } else { 1173 set_prec(req(), n); 1174 } 1175 } 1176 1177 1178 //----------------------------next_exception----------------------------------- 1179 SafePointNode* SafePointNode::next_exception() const { 1180 if (len() == req()) { 1181 return NULL; 1182 } else { 1183 Node* n = in(req()); 1184 assert(n == NULL || n->Opcode() == Op_SafePoint, "no other uses of prec edges"); 1185 return (SafePointNode*) n; 1186 } 1187 } 1188 1189 1190 //------------------------------Ideal------------------------------------------ 1191 // Skip over any collapsed Regions 1192 Node *SafePointNode::Ideal(PhaseGVN *phase, bool can_reshape) { 1193 if (remove_dead_region(phase, can_reshape)) { 1194 return this; 1195 } 1196 if (jvms() != NULL) { 1197 bool progress = false; 1198 // A ValueTypeNode that was already heap allocated in the debug 1199 // info? Reference the object directly. Helps removal of useless 1200 // value type allocations with incremental inlining. 1201 for (uint i = jvms()->debug_start(); i < jvms()->debug_end(); i++) { 1202 Node *arg = in(i); 1203 if (arg->is_ValueType()) { 1204 ValueTypeNode* vt = arg->as_ValueType(); 1205 Node* in_oop = vt->get_oop(); 1206 const Type* oop_type = phase->type(in_oop); 1207 if (!TypePtr::NULL_PTR->higher_equal(oop_type)) { 1208 set_req(i, in_oop); 1209 progress = true; 1210 } 1211 } 1212 } 1213 if (progress) { 1214 return this; 1215 } 1216 } 1217 return NULL; 1218 } 1219 1220 //------------------------------Identity--------------------------------------- 1221 // Remove obviously duplicate safepoints 1222 Node* SafePointNode::Identity(PhaseGVN* phase) { 1223 1224 // If you have back to back safepoints, remove one 1225 if( in(TypeFunc::Control)->is_SafePoint() ) 1226 return in(TypeFunc::Control); 1227 1228 if( in(0)->is_Proj() ) { 1229 Node *n0 = in(0)->in(0); 1230 // Check if he is a call projection (except Leaf Call) 1231 if( n0->is_Catch() ) { 1232 n0 = n0->in(0)->in(0); 1233 assert( n0->is_Call(), "expect a call here" ); 1234 } 1235 if( n0->is_Call() && n0->as_Call()->guaranteed_safepoint() ) { 1236 // Useless Safepoint, so remove it 1237 return in(TypeFunc::Control); 1238 } 1239 } 1240 1241 return this; 1242 } 1243 1244 //------------------------------Value------------------------------------------ 1245 const Type* SafePointNode::Value(PhaseGVN* phase) const { 1246 if( phase->type(in(0)) == Type::TOP ) return Type::TOP; 1247 if( phase->eqv( in(0), this ) ) return Type::TOP; // Dead infinite loop 1248 return Type::CONTROL; 1249 } 1250 1251 #ifndef PRODUCT 1252 void SafePointNode::dump_spec(outputStream *st) const { 1253 st->print(" SafePoint "); 1254 _replaced_nodes.dump(st); 1255 } 1256 1257 // The related nodes of a SafepointNode are all data inputs, excluding the 1258 // control boundary, as well as all outputs till level 2 (to include projection 1259 // nodes and targets). In compact mode, just include inputs till level 1 and 1260 // outputs as before. 1261 void SafePointNode::related(GrowableArray<Node*> *in_rel, GrowableArray<Node*> *out_rel, bool compact) const { 1262 if (compact) { 1263 this->collect_nodes(in_rel, 1, false, false); 1264 } else { 1265 this->collect_nodes_in_all_data(in_rel, false); 1266 } 1267 this->collect_nodes(out_rel, -2, false, false); 1268 } 1269 #endif 1270 1271 const RegMask &SafePointNode::in_RegMask(uint idx) const { 1272 if( idx < TypeFunc::Parms ) return RegMask::Empty; 1273 // Values outside the domain represent debug info 1274 return *(Compile::current()->matcher()->idealreg2debugmask[in(idx)->ideal_reg()]); 1275 } 1276 const RegMask &SafePointNode::out_RegMask() const { 1277 return RegMask::Empty; 1278 } 1279 1280 1281 void SafePointNode::grow_stack(JVMState* jvms, uint grow_by) { 1282 assert((int)grow_by > 0, "sanity"); 1283 int monoff = jvms->monoff(); 1284 int scloff = jvms->scloff(); 1285 int endoff = jvms->endoff(); 1286 assert(endoff == (int)req(), "no other states or debug info after me"); 1287 Node* top = Compile::current()->top(); 1288 for (uint i = 0; i < grow_by; i++) { 1289 ins_req(monoff, top); 1290 } 1291 jvms->set_monoff(monoff + grow_by); 1292 jvms->set_scloff(scloff + grow_by); 1293 jvms->set_endoff(endoff + grow_by); 1294 } 1295 1296 void SafePointNode::push_monitor(const FastLockNode *lock) { 1297 // Add a LockNode, which points to both the original BoxLockNode (the 1298 // stack space for the monitor) and the Object being locked. 1299 const int MonitorEdges = 2; 1300 assert(JVMState::logMonitorEdges == exact_log2(MonitorEdges), "correct MonitorEdges"); 1301 assert(req() == jvms()->endoff(), "correct sizing"); 1302 int nextmon = jvms()->scloff(); 1303 if (GenerateSynchronizationCode) { 1304 ins_req(nextmon, lock->box_node()); 1305 ins_req(nextmon+1, lock->obj_node()); 1306 } else { 1307 Node* top = Compile::current()->top(); 1308 ins_req(nextmon, top); 1309 ins_req(nextmon, top); 1310 } 1311 jvms()->set_scloff(nextmon + MonitorEdges); 1312 jvms()->set_endoff(req()); 1313 } 1314 1315 void SafePointNode::pop_monitor() { 1316 // Delete last monitor from debug info 1317 debug_only(int num_before_pop = jvms()->nof_monitors()); 1318 const int MonitorEdges = 2; 1319 assert(JVMState::logMonitorEdges == exact_log2(MonitorEdges), "correct MonitorEdges"); 1320 int scloff = jvms()->scloff(); 1321 int endoff = jvms()->endoff(); 1322 int new_scloff = scloff - MonitorEdges; 1323 int new_endoff = endoff - MonitorEdges; 1324 jvms()->set_scloff(new_scloff); 1325 jvms()->set_endoff(new_endoff); 1326 while (scloff > new_scloff) del_req_ordered(--scloff); 1327 assert(jvms()->nof_monitors() == num_before_pop-1, ""); 1328 } 1329 1330 Node *SafePointNode::peek_monitor_box() const { 1331 int mon = jvms()->nof_monitors() - 1; 1332 assert(mon >= 0, "must have a monitor"); 1333 return monitor_box(jvms(), mon); 1334 } 1335 1336 Node *SafePointNode::peek_monitor_obj() const { 1337 int mon = jvms()->nof_monitors() - 1; 1338 assert(mon >= 0, "must have a monitor"); 1339 return monitor_obj(jvms(), mon); 1340 } 1341 1342 // Do we Match on this edge index or not? Match no edges 1343 uint SafePointNode::match_edge(uint idx) const { 1344 if( !needs_polling_address_input() ) 1345 return 0; 1346 1347 return (TypeFunc::Parms == idx); 1348 } 1349 1350 //============== SafePointScalarObjectNode ============== 1351 1352 SafePointScalarObjectNode::SafePointScalarObjectNode(const TypeOopPtr* tp, 1353 #ifdef ASSERT 1354 AllocateNode* alloc, 1355 #endif 1356 uint first_index, 1357 uint n_fields) : 1358 TypeNode(tp, 1), // 1 control input -- seems required. Get from root. 1359 #ifdef ASSERT 1360 _alloc(alloc), 1361 #endif 1362 _first_index(first_index), 1363 _n_fields(n_fields) 1364 { 1365 init_class_id(Class_SafePointScalarObject); 1366 } 1367 1368 // Do not allow value-numbering for SafePointScalarObject node. 1369 uint SafePointScalarObjectNode::hash() const { return NO_HASH; } 1370 uint SafePointScalarObjectNode::cmp( const Node &n ) const { 1371 return (&n == this); // Always fail except on self 1372 } 1373 1374 uint SafePointScalarObjectNode::ideal_reg() const { 1375 return 0; // No matching to machine instruction 1376 } 1377 1378 const RegMask &SafePointScalarObjectNode::in_RegMask(uint idx) const { 1379 return *(Compile::current()->matcher()->idealreg2debugmask[in(idx)->ideal_reg()]); 1380 } 1381 1382 const RegMask &SafePointScalarObjectNode::out_RegMask() const { 1383 return RegMask::Empty; 1384 } 1385 1386 uint SafePointScalarObjectNode::match_edge(uint idx) const { 1387 return 0; 1388 } 1389 1390 SafePointScalarObjectNode* 1391 SafePointScalarObjectNode::clone(Dict* sosn_map) const { 1392 void* cached = (*sosn_map)[(void*)this]; 1393 if (cached != NULL) { 1394 return (SafePointScalarObjectNode*)cached; 1395 } 1396 SafePointScalarObjectNode* res = (SafePointScalarObjectNode*)Node::clone(); 1397 sosn_map->Insert((void*)this, (void*)res); 1398 return res; 1399 } 1400 1401 1402 #ifndef PRODUCT 1403 void SafePointScalarObjectNode::dump_spec(outputStream *st) const { 1404 st->print(" # fields@[%d..%d]", first_index(), 1405 first_index() + n_fields() - 1); 1406 } 1407 1408 #endif 1409 1410 //============================================================================= 1411 uint AllocateNode::size_of() const { return sizeof(*this); } 1412 1413 AllocateNode::AllocateNode(Compile* C, const TypeFunc *atype, 1414 Node *ctrl, Node *mem, Node *abio, 1415 Node *size, Node *klass_node, 1416 Node* initial_test, ValueTypeBaseNode* value_node) 1417 : CallNode(atype, NULL, TypeRawPtr::BOTTOM) 1418 { 1419 init_class_id(Class_Allocate); 1420 init_flags(Flag_is_macro); 1421 _is_scalar_replaceable = false; 1422 _is_non_escaping = false; 1423 _is_allocation_MemBar_redundant = false; 1424 Node *topnode = C->top(); 1425 1426 init_req( TypeFunc::Control , ctrl ); 1427 init_req( TypeFunc::I_O , abio ); 1428 init_req( TypeFunc::Memory , mem ); 1429 init_req( TypeFunc::ReturnAdr, topnode ); 1430 init_req( TypeFunc::FramePtr , topnode ); 1431 init_req( AllocSize , size); 1432 init_req( KlassNode , klass_node); 1433 init_req( InitialTest , initial_test); 1434 init_req( ALength , topnode); 1435 init_req( ValueNode , value_node); 1436 C->add_macro_node(this); 1437 } 1438 1439 void AllocateNode::compute_MemBar_redundancy(ciMethod* initializer) 1440 { 1441 assert(initializer != NULL && 1442 initializer->is_initializer() && 1443 !initializer->is_static(), 1444 "unexpected initializer method"); 1445 BCEscapeAnalyzer* analyzer = initializer->get_bcea(); 1446 if (analyzer == NULL) { 1447 return; 1448 } 1449 1450 // Allocation node is first parameter in its initializer 1451 if (analyzer->is_arg_stack(0) || analyzer->is_arg_local(0)) { 1452 _is_allocation_MemBar_redundant = true; 1453 } 1454 } 1455 1456 Node* AllocateNode::Ideal(PhaseGVN* phase, bool can_reshape) { 1457 // Check for unused value type allocation 1458 if (can_reshape && in(AllocateNode::ValueNode) != NULL && 1459 outcnt() != 0 && result_cast() == NULL) { 1460 // Remove allocation by replacing the projection nodes with its inputs 1461 PhaseIterGVN* igvn = phase->is_IterGVN(); 1462 CallProjections* projs = extract_projections(true, false); 1463 assert(projs->nb_resproj <= 1, "unexpected number of results"); 1464 if (projs->fallthrough_catchproj != NULL) { 1465 igvn->replace_node(projs->fallthrough_catchproj, in(TypeFunc::Control)); 1466 } 1467 if (projs->fallthrough_memproj != NULL) { 1468 igvn->replace_node(projs->fallthrough_memproj, in(TypeFunc::Memory)); 1469 } 1470 if (projs->catchall_memproj != NULL) { 1471 igvn->replace_node(projs->catchall_memproj, phase->C->top()); 1472 } 1473 if (projs->fallthrough_ioproj != NULL) { 1474 igvn->replace_node(projs->fallthrough_ioproj, in(TypeFunc::I_O)); 1475 } 1476 if (projs->catchall_ioproj != NULL) { 1477 igvn->replace_node(projs->catchall_ioproj, phase->C->top()); 1478 } 1479 if (projs->catchall_catchproj != NULL) { 1480 igvn->replace_node(projs->catchall_catchproj, phase->C->top()); 1481 } 1482 if (projs->resproj[0] != NULL) { 1483 igvn->replace_node(projs->resproj[0], phase->C->top()); 1484 } 1485 igvn->remove_dead_node(this); 1486 return NULL; 1487 } 1488 1489 return CallNode::Ideal(phase, can_reshape); 1490 } 1491 1492 //============================================================================= 1493 Node* AllocateArrayNode::Ideal(PhaseGVN *phase, bool can_reshape) { 1494 Node* res = SafePointNode::Ideal(phase, can_reshape); 1495 if (res != NULL) { 1496 return res; 1497 } 1498 // Don't bother trying to transform a dead node 1499 if (in(0) && in(0)->is_top()) return NULL; 1500 1501 const Type* type = phase->type(Ideal_length()); 1502 if (type->isa_int() && type->is_int()->_hi < 0) { 1503 if (can_reshape) { 1504 PhaseIterGVN *igvn = phase->is_IterGVN(); 1505 // Unreachable fall through path (negative array length), 1506 // the allocation can only throw so disconnect it. 1507 Node* proj = proj_out_or_null(TypeFunc::Control); 1508 Node* catchproj = NULL; 1509 if (proj != NULL) { 1510 for (DUIterator_Fast imax, i = proj->fast_outs(imax); i < imax; i++) { 1511 Node *cn = proj->fast_out(i); 1512 if (cn->is_Catch()) { 1513 catchproj = cn->as_Multi()->proj_out_or_null(CatchProjNode::fall_through_index); 1514 break; 1515 } 1516 } 1517 } 1518 if (catchproj != NULL && catchproj->outcnt() > 0 && 1519 (catchproj->outcnt() > 1 || 1520 catchproj->unique_out()->Opcode() != Op_Halt)) { 1521 assert(catchproj->is_CatchProj(), "must be a CatchProjNode"); 1522 Node* nproj = catchproj->clone(); 1523 igvn->register_new_node_with_optimizer(nproj); 1524 1525 Node *frame = new ParmNode( phase->C->start(), TypeFunc::FramePtr ); 1526 frame = phase->transform(frame); 1527 // Halt & Catch Fire 1528 Node *halt = new HaltNode( nproj, frame ); 1529 phase->C->root()->add_req(halt); 1530 phase->transform(halt); 1531 1532 igvn->replace_node(catchproj, phase->C->top()); 1533 return this; 1534 } 1535 } else { 1536 // Can't correct it during regular GVN so register for IGVN 1537 phase->C->record_for_igvn(this); 1538 } 1539 } 1540 return NULL; 1541 } 1542 1543 // Retrieve the length from the AllocateArrayNode. Narrow the type with a 1544 // CastII, if appropriate. If we are not allowed to create new nodes, and 1545 // a CastII is appropriate, return NULL. 1546 Node *AllocateArrayNode::make_ideal_length(const TypeOopPtr* oop_type, PhaseTransform *phase, bool allow_new_nodes) { 1547 Node *length = in(AllocateNode::ALength); 1548 assert(length != NULL, "length is not null"); 1549 1550 const TypeInt* length_type = phase->find_int_type(length); 1551 const TypeAryPtr* ary_type = oop_type->isa_aryptr(); 1552 1553 if (ary_type != NULL && length_type != NULL) { 1554 const TypeInt* narrow_length_type = ary_type->narrow_size_type(length_type); 1555 if (narrow_length_type != length_type) { 1556 // Assert one of: 1557 // - the narrow_length is 0 1558 // - the narrow_length is not wider than length 1559 assert(narrow_length_type == TypeInt::ZERO || 1560 length_type->is_con() && narrow_length_type->is_con() && 1561 (narrow_length_type->_hi <= length_type->_lo) || 1562 (narrow_length_type->_hi <= length_type->_hi && 1563 narrow_length_type->_lo >= length_type->_lo), 1564 "narrow type must be narrower than length type"); 1565 1566 // Return NULL if new nodes are not allowed 1567 if (!allow_new_nodes) return NULL; 1568 // Create a cast which is control dependent on the initialization to 1569 // propagate the fact that the array length must be positive. 1570 InitializeNode* init = initialization(); 1571 assert(init != NULL, "initialization not found"); 1572 length = new CastIINode(length, narrow_length_type); 1573 length->set_req(0, init->proj_out_or_null(0)); 1574 } 1575 } 1576 1577 return length; 1578 } 1579 1580 //============================================================================= 1581 uint LockNode::size_of() const { return sizeof(*this); } 1582 1583 // Redundant lock elimination 1584 // 1585 // There are various patterns of locking where we release and 1586 // immediately reacquire a lock in a piece of code where no operations 1587 // occur in between that would be observable. In those cases we can 1588 // skip releasing and reacquiring the lock without violating any 1589 // fairness requirements. Doing this around a loop could cause a lock 1590 // to be held for a very long time so we concentrate on non-looping 1591 // control flow. We also require that the operations are fully 1592 // redundant meaning that we don't introduce new lock operations on 1593 // some paths so to be able to eliminate it on others ala PRE. This 1594 // would probably require some more extensive graph manipulation to 1595 // guarantee that the memory edges were all handled correctly. 1596 // 1597 // Assuming p is a simple predicate which can't trap in any way and s 1598 // is a synchronized method consider this code: 1599 // 1600 // s(); 1601 // if (p) 1602 // s(); 1603 // else 1604 // s(); 1605 // s(); 1606 // 1607 // 1. The unlocks of the first call to s can be eliminated if the 1608 // locks inside the then and else branches are eliminated. 1609 // 1610 // 2. The unlocks of the then and else branches can be eliminated if 1611 // the lock of the final call to s is eliminated. 1612 // 1613 // Either of these cases subsumes the simple case of sequential control flow 1614 // 1615 // Addtionally we can eliminate versions without the else case: 1616 // 1617 // s(); 1618 // if (p) 1619 // s(); 1620 // s(); 1621 // 1622 // 3. In this case we eliminate the unlock of the first s, the lock 1623 // and unlock in the then case and the lock in the final s. 1624 // 1625 // Note also that in all these cases the then/else pieces don't have 1626 // to be trivial as long as they begin and end with synchronization 1627 // operations. 1628 // 1629 // s(); 1630 // if (p) 1631 // s(); 1632 // f(); 1633 // s(); 1634 // s(); 1635 // 1636 // The code will work properly for this case, leaving in the unlock 1637 // before the call to f and the relock after it. 1638 // 1639 // A potentially interesting case which isn't handled here is when the 1640 // locking is partially redundant. 1641 // 1642 // s(); 1643 // if (p) 1644 // s(); 1645 // 1646 // This could be eliminated putting unlocking on the else case and 1647 // eliminating the first unlock and the lock in the then side. 1648 // Alternatively the unlock could be moved out of the then side so it 1649 // was after the merge and the first unlock and second lock 1650 // eliminated. This might require less manipulation of the memory 1651 // state to get correct. 1652 // 1653 // Additionally we might allow work between a unlock and lock before 1654 // giving up eliminating the locks. The current code disallows any 1655 // conditional control flow between these operations. A formulation 1656 // similar to partial redundancy elimination computing the 1657 // availability of unlocking and the anticipatability of locking at a 1658 // program point would allow detection of fully redundant locking with 1659 // some amount of work in between. I'm not sure how often I really 1660 // think that would occur though. Most of the cases I've seen 1661 // indicate it's likely non-trivial work would occur in between. 1662 // There may be other more complicated constructs where we could 1663 // eliminate locking but I haven't seen any others appear as hot or 1664 // interesting. 1665 // 1666 // Locking and unlocking have a canonical form in ideal that looks 1667 // roughly like this: 1668 // 1669 // <obj> 1670 // | \\------+ 1671 // | \ \ 1672 // | BoxLock \ 1673 // | | | \ 1674 // | | \ \ 1675 // | | FastLock 1676 // | | / 1677 // | | / 1678 // | | | 1679 // 1680 // Lock 1681 // | 1682 // Proj #0 1683 // | 1684 // MembarAcquire 1685 // | 1686 // Proj #0 1687 // 1688 // MembarRelease 1689 // | 1690 // Proj #0 1691 // | 1692 // Unlock 1693 // | 1694 // Proj #0 1695 // 1696 // 1697 // This code proceeds by processing Lock nodes during PhaseIterGVN 1698 // and searching back through its control for the proper code 1699 // patterns. Once it finds a set of lock and unlock operations to 1700 // eliminate they are marked as eliminatable which causes the 1701 // expansion of the Lock and Unlock macro nodes to make the operation a NOP 1702 // 1703 //============================================================================= 1704 1705 // 1706 // Utility function to skip over uninteresting control nodes. Nodes skipped are: 1707 // - copy regions. (These may not have been optimized away yet.) 1708 // - eliminated locking nodes 1709 // 1710 static Node *next_control(Node *ctrl) { 1711 if (ctrl == NULL) 1712 return NULL; 1713 while (1) { 1714 if (ctrl->is_Region()) { 1715 RegionNode *r = ctrl->as_Region(); 1716 Node *n = r->is_copy(); 1717 if (n == NULL) 1718 break; // hit a region, return it 1719 else 1720 ctrl = n; 1721 } else if (ctrl->is_Proj()) { 1722 Node *in0 = ctrl->in(0); 1723 if (in0->is_AbstractLock() && in0->as_AbstractLock()->is_eliminated()) { 1724 ctrl = in0->in(0); 1725 } else { 1726 break; 1727 } 1728 } else { 1729 break; // found an interesting control 1730 } 1731 } 1732 return ctrl; 1733 } 1734 // 1735 // Given a control, see if it's the control projection of an Unlock which 1736 // operating on the same object as lock. 1737 // 1738 bool AbstractLockNode::find_matching_unlock(const Node* ctrl, LockNode* lock, 1739 GrowableArray<AbstractLockNode*> &lock_ops) { 1740 ProjNode *ctrl_proj = (ctrl->is_Proj()) ? ctrl->as_Proj() : NULL; 1741 if (ctrl_proj != NULL && ctrl_proj->_con == TypeFunc::Control) { 1742 Node *n = ctrl_proj->in(0); 1743 if (n != NULL && n->is_Unlock()) { 1744 UnlockNode *unlock = n->as_Unlock(); 1745 if (lock->obj_node()->eqv_uncast(unlock->obj_node()) && 1746 BoxLockNode::same_slot(lock->box_node(), unlock->box_node()) && 1747 !unlock->is_eliminated()) { 1748 lock_ops.append(unlock); 1749 return true; 1750 } 1751 } 1752 } 1753 return false; 1754 } 1755 1756 // 1757 // Find the lock matching an unlock. Returns null if a safepoint 1758 // or complicated control is encountered first. 1759 LockNode *AbstractLockNode::find_matching_lock(UnlockNode* unlock) { 1760 LockNode *lock_result = NULL; 1761 // find the matching lock, or an intervening safepoint 1762 Node *ctrl = next_control(unlock->in(0)); 1763 while (1) { 1764 assert(ctrl != NULL, "invalid control graph"); 1765 assert(!ctrl->is_Start(), "missing lock for unlock"); 1766 if (ctrl->is_top()) break; // dead control path 1767 if (ctrl->is_Proj()) ctrl = ctrl->in(0); 1768 if (ctrl->is_SafePoint()) { 1769 break; // found a safepoint (may be the lock we are searching for) 1770 } else if (ctrl->is_Region()) { 1771 // Check for a simple diamond pattern. Punt on anything more complicated 1772 if (ctrl->req() == 3 && ctrl->in(1) != NULL && ctrl->in(2) != NULL) { 1773 Node *in1 = next_control(ctrl->in(1)); 1774 Node *in2 = next_control(ctrl->in(2)); 1775 if (((in1->is_IfTrue() && in2->is_IfFalse()) || 1776 (in2->is_IfTrue() && in1->is_IfFalse())) && (in1->in(0) == in2->in(0))) { 1777 ctrl = next_control(in1->in(0)->in(0)); 1778 } else { 1779 break; 1780 } 1781 } else { 1782 break; 1783 } 1784 } else { 1785 ctrl = next_control(ctrl->in(0)); // keep searching 1786 } 1787 } 1788 if (ctrl->is_Lock()) { 1789 LockNode *lock = ctrl->as_Lock(); 1790 if (lock->obj_node()->eqv_uncast(unlock->obj_node()) && 1791 BoxLockNode::same_slot(lock->box_node(), unlock->box_node())) { 1792 lock_result = lock; 1793 } 1794 } 1795 return lock_result; 1796 } 1797 1798 // This code corresponds to case 3 above. 1799 1800 bool AbstractLockNode::find_lock_and_unlock_through_if(Node* node, LockNode* lock, 1801 GrowableArray<AbstractLockNode*> &lock_ops) { 1802 Node* if_node = node->in(0); 1803 bool if_true = node->is_IfTrue(); 1804 1805 if (if_node->is_If() && if_node->outcnt() == 2 && (if_true || node->is_IfFalse())) { 1806 Node *lock_ctrl = next_control(if_node->in(0)); 1807 if (find_matching_unlock(lock_ctrl, lock, lock_ops)) { 1808 Node* lock1_node = NULL; 1809 ProjNode* proj = if_node->as_If()->proj_out(!if_true); 1810 if (if_true) { 1811 if (proj->is_IfFalse() && proj->outcnt() == 1) { 1812 lock1_node = proj->unique_out(); 1813 } 1814 } else { 1815 if (proj->is_IfTrue() && proj->outcnt() == 1) { 1816 lock1_node = proj->unique_out(); 1817 } 1818 } 1819 if (lock1_node != NULL && lock1_node->is_Lock()) { 1820 LockNode *lock1 = lock1_node->as_Lock(); 1821 if (lock->obj_node()->eqv_uncast(lock1->obj_node()) && 1822 BoxLockNode::same_slot(lock->box_node(), lock1->box_node()) && 1823 !lock1->is_eliminated()) { 1824 lock_ops.append(lock1); 1825 return true; 1826 } 1827 } 1828 } 1829 } 1830 1831 lock_ops.trunc_to(0); 1832 return false; 1833 } 1834 1835 bool AbstractLockNode::find_unlocks_for_region(const RegionNode* region, LockNode* lock, 1836 GrowableArray<AbstractLockNode*> &lock_ops) { 1837 // check each control merging at this point for a matching unlock. 1838 // in(0) should be self edge so skip it. 1839 for (int i = 1; i < (int)region->req(); i++) { 1840 Node *in_node = next_control(region->in(i)); 1841 if (in_node != NULL) { 1842 if (find_matching_unlock(in_node, lock, lock_ops)) { 1843 // found a match so keep on checking. 1844 continue; 1845 } else if (find_lock_and_unlock_through_if(in_node, lock, lock_ops)) { 1846 continue; 1847 } 1848 1849 // If we fall through to here then it was some kind of node we 1850 // don't understand or there wasn't a matching unlock, so give 1851 // up trying to merge locks. 1852 lock_ops.trunc_to(0); 1853 return false; 1854 } 1855 } 1856 return true; 1857 1858 } 1859 1860 #ifndef PRODUCT 1861 // 1862 // Create a counter which counts the number of times this lock is acquired 1863 // 1864 void AbstractLockNode::create_lock_counter(JVMState* state) { 1865 _counter = OptoRuntime::new_named_counter(state, NamedCounter::LockCounter); 1866 } 1867 1868 void AbstractLockNode::set_eliminated_lock_counter() { 1869 if (_counter) { 1870 // Update the counter to indicate that this lock was eliminated. 1871 // The counter update code will stay around even though the 1872 // optimizer will eliminate the lock operation itself. 1873 _counter->set_tag(NamedCounter::EliminatedLockCounter); 1874 } 1875 } 1876 1877 const char* AbstractLockNode::_kind_names[] = {"Regular", "NonEscObj", "Coarsened", "Nested"}; 1878 1879 void AbstractLockNode::dump_spec(outputStream* st) const { 1880 st->print("%s ", _kind_names[_kind]); 1881 CallNode::dump_spec(st); 1882 } 1883 1884 void AbstractLockNode::dump_compact_spec(outputStream* st) const { 1885 st->print("%s", _kind_names[_kind]); 1886 } 1887 1888 // The related set of lock nodes includes the control boundary. 1889 void AbstractLockNode::related(GrowableArray<Node*> *in_rel, GrowableArray<Node*> *out_rel, bool compact) const { 1890 if (compact) { 1891 this->collect_nodes(in_rel, 1, false, false); 1892 } else { 1893 this->collect_nodes_in_all_data(in_rel, true); 1894 } 1895 this->collect_nodes(out_rel, -2, false, false); 1896 } 1897 #endif 1898 1899 //============================================================================= 1900 Node *LockNode::Ideal(PhaseGVN *phase, bool can_reshape) { 1901 1902 // perform any generic optimizations first (returns 'this' or NULL) 1903 Node *result = SafePointNode::Ideal(phase, can_reshape); 1904 if (result != NULL) return result; 1905 // Don't bother trying to transform a dead node 1906 if (in(0) && in(0)->is_top()) return NULL; 1907 1908 // Now see if we can optimize away this lock. We don't actually 1909 // remove the locking here, we simply set the _eliminate flag which 1910 // prevents macro expansion from expanding the lock. Since we don't 1911 // modify the graph, the value returned from this function is the 1912 // one computed above. 1913 if (can_reshape && EliminateLocks && !is_non_esc_obj()) { 1914 // 1915 // If we are locking an unescaped object, the lock/unlock is unnecessary 1916 // 1917 ConnectionGraph *cgr = phase->C->congraph(); 1918 if (cgr != NULL && cgr->not_global_escape(obj_node())) { 1919 assert(!is_eliminated() || is_coarsened(), "sanity"); 1920 // The lock could be marked eliminated by lock coarsening 1921 // code during first IGVN before EA. Replace coarsened flag 1922 // to eliminate all associated locks/unlocks. 1923 #ifdef ASSERT 1924 this->log_lock_optimization(phase->C,"eliminate_lock_set_non_esc1"); 1925 #endif 1926 this->set_non_esc_obj(); 1927 return result; 1928 } 1929 1930 // 1931 // Try lock coarsening 1932 // 1933 PhaseIterGVN* iter = phase->is_IterGVN(); 1934 if (iter != NULL && !is_eliminated()) { 1935 1936 GrowableArray<AbstractLockNode*> lock_ops; 1937 1938 Node *ctrl = next_control(in(0)); 1939 1940 // now search back for a matching Unlock 1941 if (find_matching_unlock(ctrl, this, lock_ops)) { 1942 // found an unlock directly preceding this lock. This is the 1943 // case of single unlock directly control dependent on a 1944 // single lock which is the trivial version of case 1 or 2. 1945 } else if (ctrl->is_Region() ) { 1946 if (find_unlocks_for_region(ctrl->as_Region(), this, lock_ops)) { 1947 // found lock preceded by multiple unlocks along all paths 1948 // joining at this point which is case 3 in description above. 1949 } 1950 } else { 1951 // see if this lock comes from either half of an if and the 1952 // predecessors merges unlocks and the other half of the if 1953 // performs a lock. 1954 if (find_lock_and_unlock_through_if(ctrl, this, lock_ops)) { 1955 // found unlock splitting to an if with locks on both branches. 1956 } 1957 } 1958 1959 if (lock_ops.length() > 0) { 1960 // add ourselves to the list of locks to be eliminated. 1961 lock_ops.append(this); 1962 1963 #ifndef PRODUCT 1964 if (PrintEliminateLocks) { 1965 int locks = 0; 1966 int unlocks = 0; 1967 for (int i = 0; i < lock_ops.length(); i++) { 1968 AbstractLockNode* lock = lock_ops.at(i); 1969 if (lock->Opcode() == Op_Lock) 1970 locks++; 1971 else 1972 unlocks++; 1973 if (Verbose) { 1974 lock->dump(1); 1975 } 1976 } 1977 tty->print_cr("***Eliminated %d unlocks and %d locks", unlocks, locks); 1978 } 1979 #endif 1980 1981 // for each of the identified locks, mark them 1982 // as eliminatable 1983 for (int i = 0; i < lock_ops.length(); i++) { 1984 AbstractLockNode* lock = lock_ops.at(i); 1985 1986 // Mark it eliminated by coarsening and update any counters 1987 #ifdef ASSERT 1988 lock->log_lock_optimization(phase->C, "eliminate_lock_set_coarsened"); 1989 #endif 1990 lock->set_coarsened(); 1991 } 1992 } else if (ctrl->is_Region() && 1993 iter->_worklist.member(ctrl)) { 1994 // We weren't able to find any opportunities but the region this 1995 // lock is control dependent on hasn't been processed yet so put 1996 // this lock back on the worklist so we can check again once any 1997 // region simplification has occurred. 1998 iter->_worklist.push(this); 1999 } 2000 } 2001 } 2002 2003 return result; 2004 } 2005 2006 //============================================================================= 2007 bool LockNode::is_nested_lock_region() { 2008 return is_nested_lock_region(NULL); 2009 } 2010 2011 // p is used for access to compilation log; no logging if NULL 2012 bool LockNode::is_nested_lock_region(Compile * c) { 2013 BoxLockNode* box = box_node()->as_BoxLock(); 2014 int stk_slot = box->stack_slot(); 2015 if (stk_slot <= 0) { 2016 #ifdef ASSERT 2017 this->log_lock_optimization(c, "eliminate_lock_INLR_1"); 2018 #endif 2019 return false; // External lock or it is not Box (Phi node). 2020 } 2021 2022 // Ignore complex cases: merged locks or multiple locks. 2023 Node* obj = obj_node(); 2024 LockNode* unique_lock = NULL; 2025 if (!box->is_simple_lock_region(&unique_lock, obj)) { 2026 #ifdef ASSERT 2027 this->log_lock_optimization(c, "eliminate_lock_INLR_2a"); 2028 #endif 2029 return false; 2030 } 2031 if (unique_lock != this) { 2032 #ifdef ASSERT 2033 this->log_lock_optimization(c, "eliminate_lock_INLR_2b"); 2034 #endif 2035 return false; 2036 } 2037 2038 // Look for external lock for the same object. 2039 SafePointNode* sfn = this->as_SafePoint(); 2040 JVMState* youngest_jvms = sfn->jvms(); 2041 int max_depth = youngest_jvms->depth(); 2042 for (int depth = 1; depth <= max_depth; depth++) { 2043 JVMState* jvms = youngest_jvms->of_depth(depth); 2044 int num_mon = jvms->nof_monitors(); 2045 // Loop over monitors 2046 for (int idx = 0; idx < num_mon; idx++) { 2047 Node* obj_node = sfn->monitor_obj(jvms, idx); 2048 BoxLockNode* box_node = sfn->monitor_box(jvms, idx)->as_BoxLock(); 2049 if ((box_node->stack_slot() < stk_slot) && obj_node->eqv_uncast(obj)) { 2050 return true; 2051 } 2052 } 2053 } 2054 #ifdef ASSERT 2055 this->log_lock_optimization(c, "eliminate_lock_INLR_3"); 2056 #endif 2057 return false; 2058 } 2059 2060 //============================================================================= 2061 uint UnlockNode::size_of() const { return sizeof(*this); } 2062 2063 //============================================================================= 2064 Node *UnlockNode::Ideal(PhaseGVN *phase, bool can_reshape) { 2065 2066 // perform any generic optimizations first (returns 'this' or NULL) 2067 Node *result = SafePointNode::Ideal(phase, can_reshape); 2068 if (result != NULL) return result; 2069 // Don't bother trying to transform a dead node 2070 if (in(0) && in(0)->is_top()) return NULL; 2071 2072 // Now see if we can optimize away this unlock. We don't actually 2073 // remove the unlocking here, we simply set the _eliminate flag which 2074 // prevents macro expansion from expanding the unlock. Since we don't 2075 // modify the graph, the value returned from this function is the 2076 // one computed above. 2077 // Escape state is defined after Parse phase. 2078 if (can_reshape && EliminateLocks && !is_non_esc_obj()) { 2079 // 2080 // If we are unlocking an unescaped object, the lock/unlock is unnecessary. 2081 // 2082 ConnectionGraph *cgr = phase->C->congraph(); 2083 if (cgr != NULL && cgr->not_global_escape(obj_node())) { 2084 assert(!is_eliminated() || is_coarsened(), "sanity"); 2085 // The lock could be marked eliminated by lock coarsening 2086 // code during first IGVN before EA. Replace coarsened flag 2087 // to eliminate all associated locks/unlocks. 2088 #ifdef ASSERT 2089 this->log_lock_optimization(phase->C, "eliminate_lock_set_non_esc2"); 2090 #endif 2091 this->set_non_esc_obj(); 2092 } 2093 } 2094 return result; 2095 } 2096 2097 const char * AbstractLockNode::kind_as_string() const { 2098 return is_coarsened() ? "coarsened" : 2099 is_nested() ? "nested" : 2100 is_non_esc_obj() ? "non_escaping" : 2101 "?"; 2102 } 2103 2104 void AbstractLockNode::log_lock_optimization(Compile *C, const char * tag) const { 2105 if (C == NULL) { 2106 return; 2107 } 2108 CompileLog* log = C->log(); 2109 if (log != NULL) { 2110 log->begin_head("%s lock='%d' compile_id='%d' class_id='%s' kind='%s'", 2111 tag, is_Lock(), C->compile_id(), 2112 is_Unlock() ? "unlock" : is_Lock() ? "lock" : "?", 2113 kind_as_string()); 2114 log->stamp(); 2115 log->end_head(); 2116 JVMState* p = is_Unlock() ? (as_Unlock()->dbg_jvms()) : jvms(); 2117 while (p != NULL) { 2118 log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method())); 2119 p = p->caller(); 2120 } 2121 log->tail(tag); 2122 } 2123 } 2124 2125 bool CallNode::may_modify_arraycopy_helper(const TypeOopPtr* dest_t, const TypeOopPtr *t_oop, PhaseTransform *phase) { 2126 if (dest_t->is_known_instance() && t_oop->is_known_instance()) { 2127 return dest_t->instance_id() == t_oop->instance_id(); 2128 } 2129 2130 if (dest_t->isa_instptr() && !dest_t->klass()->equals(phase->C->env()->Object_klass())) { 2131 // clone 2132 if (t_oop->isa_aryptr()) { 2133 return false; 2134 } 2135 if (!t_oop->isa_instptr()) { 2136 return true; 2137 } 2138 if (dest_t->klass()->is_subtype_of(t_oop->klass()) || t_oop->klass()->is_subtype_of(dest_t->klass())) { 2139 return true; 2140 } 2141 // unrelated 2142 return false; 2143 } 2144 2145 if (dest_t->isa_aryptr()) { 2146 // arraycopy or array clone 2147 if (t_oop->isa_instptr()) { 2148 return false; 2149 } 2150 if (!t_oop->isa_aryptr()) { 2151 return true; 2152 } 2153 2154 const Type* elem = dest_t->is_aryptr()->elem(); 2155 if (elem == Type::BOTTOM) { 2156 // An array but we don't know what elements are 2157 return true; 2158 } 2159 2160 dest_t = dest_t->is_aryptr()->with_field_offset(Type::OffsetBot)->add_offset(Type::OffsetBot)->is_oopptr(); 2161 t_oop = t_oop->is_aryptr()->with_field_offset(Type::OffsetBot); 2162 uint dest_alias = phase->C->get_alias_index(dest_t); 2163 uint t_oop_alias = phase->C->get_alias_index(t_oop); 2164 2165 return dest_alias == t_oop_alias; 2166 } 2167 2168 return true; 2169 } 2170