1 /* 2 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "compiler/compileLog.hpp" 27 #include "libadt/vectset.hpp" 28 #include "opto/addnode.hpp" 29 #include "opto/callnode.hpp" 30 #include "opto/cfgnode.hpp" 31 #include "opto/compile.hpp" 32 #include "opto/connode.hpp" 33 #include "opto/locknode.hpp" 34 #include "opto/loopnode.hpp" 35 #include "opto/macro.hpp" 36 #include "opto/memnode.hpp" 37 #include "opto/node.hpp" 38 #include "opto/phaseX.hpp" 39 #include "opto/rootnode.hpp" 40 #include "opto/runtime.hpp" 41 #include "opto/subnode.hpp" 42 #include "opto/type.hpp" 43 #include "runtime/sharedRuntime.hpp" 44 45 46 // 47 // Replace any references to "oldref" in inputs to "use" with "newref". 48 // Returns the number of replacements made. 49 // 50 int PhaseMacroExpand::replace_input(Node *use, Node *oldref, Node *newref) { 51 int nreplacements = 0; 52 uint req = use->req(); 53 for (uint j = 0; j < use->len(); j++) { 54 Node *uin = use->in(j); 55 if (uin == oldref) { 56 if (j < req) 57 use->set_req(j, newref); 58 else 59 use->set_prec(j, newref); 60 nreplacements++; 61 } else if (j >= req && uin == NULL) { 62 break; 63 } 64 } 65 return nreplacements; 66 } 67 68 void PhaseMacroExpand::copy_call_debug_info(CallNode *oldcall, CallNode * newcall) { 69 // Copy debug information and adjust JVMState information 70 uint old_dbg_start = oldcall->tf()->domain()->cnt(); 71 uint new_dbg_start = newcall->tf()->domain()->cnt(); 72 int jvms_adj = new_dbg_start - old_dbg_start; 73 assert (new_dbg_start == newcall->req(), "argument count mismatch"); 74 75 Dict* sosn_map = new Dict(cmpkey,hashkey); 76 for (uint i = old_dbg_start; i < oldcall->req(); i++) { 77 Node* old_in = oldcall->in(i); 78 // Clone old SafePointScalarObjectNodes, adjusting their field contents. 79 if (old_in != NULL && old_in->is_SafePointScalarObject()) { 80 SafePointScalarObjectNode* old_sosn = old_in->as_SafePointScalarObject(); 81 uint old_unique = C->unique(); 82 Node* new_in = old_sosn->clone(jvms_adj, sosn_map); 83 if (old_unique != C->unique()) { 84 new_in->set_req(0, newcall->in(0)); // reset control edge 85 new_in = transform_later(new_in); // Register new node. 86 } 87 old_in = new_in; 88 } 89 newcall->add_req(old_in); 90 } 91 92 newcall->set_jvms(oldcall->jvms()); 93 for (JVMState *jvms = newcall->jvms(); jvms != NULL; jvms = jvms->caller()) { 94 jvms->set_map(newcall); 95 jvms->set_locoff(jvms->locoff()+jvms_adj); 96 jvms->set_stkoff(jvms->stkoff()+jvms_adj); 97 jvms->set_monoff(jvms->monoff()+jvms_adj); 98 jvms->set_scloff(jvms->scloff()+jvms_adj); 99 jvms->set_endoff(jvms->endoff()+jvms_adj); 100 } 101 } 102 103 Node* PhaseMacroExpand::opt_bits_test(Node* ctrl, Node* region, int edge, Node* word, int mask, int bits, bool return_fast_path) { 104 Node* cmp; 105 if (mask != 0) { 106 Node* and_node = transform_later(new (C, 3) AndXNode(word, MakeConX(mask))); 107 cmp = transform_later(new (C, 3) CmpXNode(and_node, MakeConX(bits))); 108 } else { 109 cmp = word; 110 } 111 Node* bol = transform_later(new (C, 2) BoolNode(cmp, BoolTest::ne)); 112 IfNode* iff = new (C, 2) IfNode( ctrl, bol, PROB_MIN, COUNT_UNKNOWN ); 113 transform_later(iff); 114 115 // Fast path taken. 116 Node *fast_taken = transform_later( new (C, 1) IfFalseNode(iff) ); 117 118 // Fast path not-taken, i.e. slow path 119 Node *slow_taken = transform_later( new (C, 1) IfTrueNode(iff) ); 120 121 if (return_fast_path) { 122 region->init_req(edge, slow_taken); // Capture slow-control 123 return fast_taken; 124 } else { 125 region->init_req(edge, fast_taken); // Capture fast-control 126 return slow_taken; 127 } 128 } 129 130 //--------------------copy_predefined_input_for_runtime_call-------------------- 131 void PhaseMacroExpand::copy_predefined_input_for_runtime_call(Node * ctrl, CallNode* oldcall, CallNode* call) { 132 // Set fixed predefined input arguments 133 call->init_req( TypeFunc::Control, ctrl ); 134 call->init_req( TypeFunc::I_O , oldcall->in( TypeFunc::I_O) ); 135 call->init_req( TypeFunc::Memory , oldcall->in( TypeFunc::Memory ) ); // ????? 136 call->init_req( TypeFunc::ReturnAdr, oldcall->in( TypeFunc::ReturnAdr ) ); 137 call->init_req( TypeFunc::FramePtr, oldcall->in( TypeFunc::FramePtr ) ); 138 } 139 140 //------------------------------make_slow_call--------------------------------- 141 CallNode* PhaseMacroExpand::make_slow_call(CallNode *oldcall, const TypeFunc* slow_call_type, address slow_call, const char* leaf_name, Node* slow_path, Node* parm0, Node* parm1) { 142 143 // Slow-path call 144 int size = slow_call_type->domain()->cnt(); 145 CallNode *call = leaf_name 146 ? (CallNode*)new (C, size) CallLeafNode ( slow_call_type, slow_call, leaf_name, TypeRawPtr::BOTTOM ) 147 : (CallNode*)new (C, size) CallStaticJavaNode( slow_call_type, slow_call, OptoRuntime::stub_name(slow_call), oldcall->jvms()->bci(), TypeRawPtr::BOTTOM ); 148 149 // Slow path call has no side-effects, uses few values 150 copy_predefined_input_for_runtime_call(slow_path, oldcall, call ); 151 if (parm0 != NULL) call->init_req(TypeFunc::Parms+0, parm0); 152 if (parm1 != NULL) call->init_req(TypeFunc::Parms+1, parm1); 153 copy_call_debug_info(oldcall, call); 154 call->set_cnt(PROB_UNLIKELY_MAG(4)); // Same effect as RC_UNCOMMON. 155 _igvn.replace_node(oldcall, call); 156 transform_later(call); 157 158 return call; 159 } 160 161 void PhaseMacroExpand::extract_call_projections(CallNode *call) { 162 _fallthroughproj = NULL; 163 _fallthroughcatchproj = NULL; 164 _ioproj_fallthrough = NULL; 165 _ioproj_catchall = NULL; 166 _catchallcatchproj = NULL; 167 _memproj_fallthrough = NULL; 168 _memproj_catchall = NULL; 169 _resproj = NULL; 170 for (DUIterator_Fast imax, i = call->fast_outs(imax); i < imax; i++) { 171 ProjNode *pn = call->fast_out(i)->as_Proj(); 172 switch (pn->_con) { 173 case TypeFunc::Control: 174 { 175 // For Control (fallthrough) and I_O (catch_all_index) we have CatchProj -> Catch -> Proj 176 _fallthroughproj = pn; 177 DUIterator_Fast jmax, j = pn->fast_outs(jmax); 178 const Node *cn = pn->fast_out(j); 179 if (cn->is_Catch()) { 180 ProjNode *cpn = NULL; 181 for (DUIterator_Fast kmax, k = cn->fast_outs(kmax); k < kmax; k++) { 182 cpn = cn->fast_out(k)->as_Proj(); 183 assert(cpn->is_CatchProj(), "must be a CatchProjNode"); 184 if (cpn->_con == CatchProjNode::fall_through_index) 185 _fallthroughcatchproj = cpn; 186 else { 187 assert(cpn->_con == CatchProjNode::catch_all_index, "must be correct index."); 188 _catchallcatchproj = cpn; 189 } 190 } 191 } 192 break; 193 } 194 case TypeFunc::I_O: 195 if (pn->_is_io_use) 196 _ioproj_catchall = pn; 197 else 198 _ioproj_fallthrough = pn; 199 break; 200 case TypeFunc::Memory: 201 if (pn->_is_io_use) 202 _memproj_catchall = pn; 203 else 204 _memproj_fallthrough = pn; 205 break; 206 case TypeFunc::Parms: 207 _resproj = pn; 208 break; 209 default: 210 assert(false, "unexpected projection from allocation node."); 211 } 212 } 213 214 } 215 216 // Eliminate a card mark sequence. p2x is a ConvP2XNode 217 void PhaseMacroExpand::eliminate_card_mark(Node* p2x) { 218 assert(p2x->Opcode() == Op_CastP2X, "ConvP2XNode required"); 219 if (!UseG1GC) { 220 // vanilla/CMS post barrier 221 Node *shift = p2x->unique_out(); 222 Node *addp = shift->unique_out(); 223 for (DUIterator_Last jmin, j = addp->last_outs(jmin); j >= jmin; --j) { 224 Node *st = addp->last_out(j); 225 assert(st->is_Store(), "store required"); 226 _igvn.replace_node(st, st->in(MemNode::Memory)); 227 } 228 } else { 229 // G1 pre/post barriers 230 assert(p2x->outcnt() == 2, "expects 2 users: Xor and URShift nodes"); 231 // It could be only one user, URShift node, in Object.clone() instrinsic 232 // but the new allocation is passed to arraycopy stub and it could not 233 // be scalar replaced. So we don't check the case. 234 235 // Remove G1 post barrier. 236 237 // Search for CastP2X->Xor->URShift->Cmp path which 238 // checks if the store done to a different from the value's region. 239 // And replace Cmp with #0 (false) to collapse G1 post barrier. 240 Node* xorx = NULL; 241 for (DUIterator_Fast imax, i = p2x->fast_outs(imax); i < imax; i++) { 242 Node* u = p2x->fast_out(i); 243 if (u->Opcode() == Op_XorX) { 244 xorx = u; 245 break; 246 } 247 } 248 assert(xorx != NULL, "missing G1 post barrier"); 249 Node* shift = xorx->unique_out(); 250 Node* cmpx = shift->unique_out(); 251 assert(cmpx->is_Cmp() && cmpx->unique_out()->is_Bool() && 252 cmpx->unique_out()->as_Bool()->_test._test == BoolTest::ne, 253 "missing region check in G1 post barrier"); 254 _igvn.replace_node(cmpx, makecon(TypeInt::CC_EQ)); 255 256 // Remove G1 pre barrier. 257 258 // Search "if (marking != 0)" check and set it to "false". 259 Node* this_region = p2x->in(0); 260 assert(this_region != NULL, ""); 261 // There is no G1 pre barrier if previous stored value is NULL 262 // (for example, after initialization). 263 if (this_region->is_Region() && this_region->req() == 3) { 264 int ind = 1; 265 if (!this_region->in(ind)->is_IfFalse()) { 266 ind = 2; 267 } 268 if (this_region->in(ind)->is_IfFalse()) { 269 Node* bol = this_region->in(ind)->in(0)->in(1); 270 assert(bol->is_Bool(), ""); 271 cmpx = bol->in(1); 272 if (bol->as_Bool()->_test._test == BoolTest::ne && 273 cmpx->is_Cmp() && cmpx->in(2) == intcon(0) && 274 cmpx->in(1)->is_Load()) { 275 Node* adr = cmpx->in(1)->as_Load()->in(MemNode::Address); 276 const int marking_offset = in_bytes(JavaThread::satb_mark_queue_offset() + 277 PtrQueue::byte_offset_of_active()); 278 if (adr->is_AddP() && adr->in(AddPNode::Base) == top() && 279 adr->in(AddPNode::Address)->Opcode() == Op_ThreadLocal && 280 adr->in(AddPNode::Offset) == MakeConX(marking_offset)) { 281 _igvn.replace_node(cmpx, makecon(TypeInt::CC_EQ)); 282 } 283 } 284 } 285 } 286 // Now CastP2X can be removed since it is used only on dead path 287 // which currently still alive until igvn optimize it. 288 assert(p2x->unique_out()->Opcode() == Op_URShiftX, ""); 289 _igvn.replace_node(p2x, top()); 290 } 291 } 292 293 // Search for a memory operation for the specified memory slice. 294 static Node *scan_mem_chain(Node *mem, int alias_idx, int offset, Node *start_mem, Node *alloc, PhaseGVN *phase) { 295 Node *orig_mem = mem; 296 Node *alloc_mem = alloc->in(TypeFunc::Memory); 297 const TypeOopPtr *tinst = phase->C->get_adr_type(alias_idx)->isa_oopptr(); 298 while (true) { 299 if (mem == alloc_mem || mem == start_mem ) { 300 return mem; // hit one of our sentinels 301 } else if (mem->is_MergeMem()) { 302 mem = mem->as_MergeMem()->memory_at(alias_idx); 303 } else if (mem->is_Proj() && mem->as_Proj()->_con == TypeFunc::Memory) { 304 Node *in = mem->in(0); 305 // we can safely skip over safepoints, calls, locks and membars because we 306 // already know that the object is safe to eliminate. 307 if (in->is_Initialize() && in->as_Initialize()->allocation() == alloc) { 308 return in; 309 } else if (in->is_Call()) { 310 CallNode *call = in->as_Call(); 311 if (!call->may_modify(tinst, phase)) { 312 mem = call->in(TypeFunc::Memory); 313 } 314 mem = in->in(TypeFunc::Memory); 315 } else if (in->is_MemBar()) { 316 mem = in->in(TypeFunc::Memory); 317 } else { 318 assert(false, "unexpected projection"); 319 } 320 } else if (mem->is_Store()) { 321 const TypePtr* atype = mem->as_Store()->adr_type(); 322 int adr_idx = Compile::current()->get_alias_index(atype); 323 if (adr_idx == alias_idx) { 324 assert(atype->isa_oopptr(), "address type must be oopptr"); 325 int adr_offset = atype->offset(); 326 uint adr_iid = atype->is_oopptr()->instance_id(); 327 // Array elements references have the same alias_idx 328 // but different offset and different instance_id. 329 if (adr_offset == offset && adr_iid == alloc->_idx) 330 return mem; 331 } else { 332 assert(adr_idx == Compile::AliasIdxRaw, "address must match or be raw"); 333 } 334 mem = mem->in(MemNode::Memory); 335 } else if (mem->is_ClearArray()) { 336 if (!ClearArrayNode::step_through(&mem, alloc->_idx, phase)) { 337 // Can not bypass initialization of the instance 338 // we are looking. 339 debug_only(intptr_t offset;) 340 assert(alloc == AllocateNode::Ideal_allocation(mem->in(3), phase, offset), "sanity"); 341 InitializeNode* init = alloc->as_Allocate()->initialization(); 342 // We are looking for stored value, return Initialize node 343 // or memory edge from Allocate node. 344 if (init != NULL) 345 return init; 346 else 347 return alloc->in(TypeFunc::Memory); // It will produce zero value (see callers). 348 } 349 // Otherwise skip it (the call updated 'mem' value). 350 } else if (mem->Opcode() == Op_SCMemProj) { 351 assert(mem->in(0)->is_LoadStore(), "sanity"); 352 const TypePtr* atype = mem->in(0)->in(MemNode::Address)->bottom_type()->is_ptr(); 353 int adr_idx = Compile::current()->get_alias_index(atype); 354 if (adr_idx == alias_idx) { 355 assert(false, "Object is not scalar replaceable if a LoadStore node access its field"); 356 return NULL; 357 } 358 mem = mem->in(0)->in(MemNode::Memory); 359 } else { 360 return mem; 361 } 362 assert(mem != orig_mem, "dead memory loop"); 363 } 364 } 365 366 // 367 // Given a Memory Phi, compute a value Phi containing the values from stores 368 // on the input paths. 369 // Note: this function is recursive, its depth is limied by the "level" argument 370 // Returns the computed Phi, or NULL if it cannot compute it. 371 Node *PhaseMacroExpand::value_from_mem_phi(Node *mem, BasicType ft, const Type *phi_type, const TypeOopPtr *adr_t, Node *alloc, Node_Stack *value_phis, int level) { 372 assert(mem->is_Phi(), "sanity"); 373 int alias_idx = C->get_alias_index(adr_t); 374 int offset = adr_t->offset(); 375 int instance_id = adr_t->instance_id(); 376 377 // Check if an appropriate value phi already exists. 378 Node* region = mem->in(0); 379 for (DUIterator_Fast kmax, k = region->fast_outs(kmax); k < kmax; k++) { 380 Node* phi = region->fast_out(k); 381 if (phi->is_Phi() && phi != mem && 382 phi->as_Phi()->is_same_inst_field(phi_type, instance_id, alias_idx, offset)) { 383 return phi; 384 } 385 } 386 // Check if an appropriate new value phi already exists. 387 Node* new_phi = NULL; 388 uint size = value_phis->size(); 389 for (uint i=0; i < size; i++) { 390 if ( mem->_idx == value_phis->index_at(i) ) { 391 return value_phis->node_at(i); 392 } 393 } 394 395 if (level <= 0) { 396 return NULL; // Give up: phi tree too deep 397 } 398 Node *start_mem = C->start()->proj_out(TypeFunc::Memory); 399 Node *alloc_mem = alloc->in(TypeFunc::Memory); 400 401 uint length = mem->req(); 402 GrowableArray <Node *> values(length, length, NULL); 403 404 // create a new Phi for the value 405 PhiNode *phi = new (C, length) PhiNode(mem->in(0), phi_type, NULL, instance_id, alias_idx, offset); 406 transform_later(phi); 407 value_phis->push(phi, mem->_idx); 408 409 for (uint j = 1; j < length; j++) { 410 Node *in = mem->in(j); 411 if (in == NULL || in->is_top()) { 412 values.at_put(j, in); 413 } else { 414 Node *val = scan_mem_chain(in, alias_idx, offset, start_mem, alloc, &_igvn); 415 if (val == start_mem || val == alloc_mem) { 416 // hit a sentinel, return appropriate 0 value 417 values.at_put(j, _igvn.zerocon(ft)); 418 continue; 419 } 420 if (val->is_Initialize()) { 421 val = val->as_Initialize()->find_captured_store(offset, type2aelembytes(ft), &_igvn); 422 } 423 if (val == NULL) { 424 return NULL; // can't find a value on this path 425 } 426 if (val == mem) { 427 values.at_put(j, mem); 428 } else if (val->is_Store()) { 429 values.at_put(j, val->in(MemNode::ValueIn)); 430 } else if(val->is_Proj() && val->in(0) == alloc) { 431 values.at_put(j, _igvn.zerocon(ft)); 432 } else if (val->is_Phi()) { 433 val = value_from_mem_phi(val, ft, phi_type, adr_t, alloc, value_phis, level-1); 434 if (val == NULL) { 435 return NULL; 436 } 437 values.at_put(j, val); 438 } else if (val->Opcode() == Op_SCMemProj) { 439 assert(val->in(0)->is_LoadStore(), "sanity"); 440 assert(false, "Object is not scalar replaceable if a LoadStore node access its field"); 441 return NULL; 442 } else { 443 #ifdef ASSERT 444 val->dump(); 445 assert(false, "unknown node on this path"); 446 #endif 447 return NULL; // unknown node on this path 448 } 449 } 450 } 451 // Set Phi's inputs 452 for (uint j = 1; j < length; j++) { 453 if (values.at(j) == mem) { 454 phi->init_req(j, phi); 455 } else { 456 phi->init_req(j, values.at(j)); 457 } 458 } 459 return phi; 460 } 461 462 // Search the last value stored into the object's field. 463 Node *PhaseMacroExpand::value_from_mem(Node *sfpt_mem, BasicType ft, const Type *ftype, const TypeOopPtr *adr_t, Node *alloc) { 464 assert(adr_t->is_known_instance_field(), "instance required"); 465 int instance_id = adr_t->instance_id(); 466 assert((uint)instance_id == alloc->_idx, "wrong allocation"); 467 468 int alias_idx = C->get_alias_index(adr_t); 469 int offset = adr_t->offset(); 470 Node *start_mem = C->start()->proj_out(TypeFunc::Memory); 471 Node *alloc_ctrl = alloc->in(TypeFunc::Control); 472 Node *alloc_mem = alloc->in(TypeFunc::Memory); 473 Arena *a = Thread::current()->resource_area(); 474 VectorSet visited(a); 475 476 477 bool done = sfpt_mem == alloc_mem; 478 Node *mem = sfpt_mem; 479 while (!done) { 480 if (visited.test_set(mem->_idx)) { 481 return NULL; // found a loop, give up 482 } 483 mem = scan_mem_chain(mem, alias_idx, offset, start_mem, alloc, &_igvn); 484 if (mem == start_mem || mem == alloc_mem) { 485 done = true; // hit a sentinel, return appropriate 0 value 486 } else if (mem->is_Initialize()) { 487 mem = mem->as_Initialize()->find_captured_store(offset, type2aelembytes(ft), &_igvn); 488 if (mem == NULL) { 489 done = true; // Something go wrong. 490 } else if (mem->is_Store()) { 491 const TypePtr* atype = mem->as_Store()->adr_type(); 492 assert(C->get_alias_index(atype) == Compile::AliasIdxRaw, "store is correct memory slice"); 493 done = true; 494 } 495 } else if (mem->is_Store()) { 496 const TypeOopPtr* atype = mem->as_Store()->adr_type()->isa_oopptr(); 497 assert(atype != NULL, "address type must be oopptr"); 498 assert(C->get_alias_index(atype) == alias_idx && 499 atype->is_known_instance_field() && atype->offset() == offset && 500 atype->instance_id() == instance_id, "store is correct memory slice"); 501 done = true; 502 } else if (mem->is_Phi()) { 503 // try to find a phi's unique input 504 Node *unique_input = NULL; 505 Node *top = C->top(); 506 for (uint i = 1; i < mem->req(); i++) { 507 Node *n = scan_mem_chain(mem->in(i), alias_idx, offset, start_mem, alloc, &_igvn); 508 if (n == NULL || n == top || n == mem) { 509 continue; 510 } else if (unique_input == NULL) { 511 unique_input = n; 512 } else if (unique_input != n) { 513 unique_input = top; 514 break; 515 } 516 } 517 if (unique_input != NULL && unique_input != top) { 518 mem = unique_input; 519 } else { 520 done = true; 521 } 522 } else { 523 assert(false, "unexpected node"); 524 } 525 } 526 if (mem != NULL) { 527 if (mem == start_mem || mem == alloc_mem) { 528 // hit a sentinel, return appropriate 0 value 529 return _igvn.zerocon(ft); 530 } else if (mem->is_Store()) { 531 return mem->in(MemNode::ValueIn); 532 } else if (mem->is_Phi()) { 533 // attempt to produce a Phi reflecting the values on the input paths of the Phi 534 Node_Stack value_phis(a, 8); 535 Node * phi = value_from_mem_phi(mem, ft, ftype, adr_t, alloc, &value_phis, ValueSearchLimit); 536 if (phi != NULL) { 537 return phi; 538 } else { 539 // Kill all new Phis 540 while(value_phis.is_nonempty()) { 541 Node* n = value_phis.node(); 542 _igvn.replace_node(n, C->top()); 543 value_phis.pop(); 544 } 545 } 546 } 547 } 548 // Something go wrong. 549 return NULL; 550 } 551 552 // Check the possibility of scalar replacement. 553 bool PhaseMacroExpand::can_eliminate_allocation(AllocateNode *alloc, GrowableArray <SafePointNode *>& safepoints) { 554 // Scan the uses of the allocation to check for anything that would 555 // prevent us from eliminating it. 556 NOT_PRODUCT( const char* fail_eliminate = NULL; ) 557 DEBUG_ONLY( Node* disq_node = NULL; ) 558 bool can_eliminate = true; 559 560 Node* res = alloc->result_cast(); 561 const TypeOopPtr* res_type = NULL; 562 if (res == NULL) { 563 // All users were eliminated. 564 } else if (!res->is_CheckCastPP()) { 565 alloc->_is_scalar_replaceable = false; // don't try again 566 NOT_PRODUCT(fail_eliminate = "Allocation does not have unique CheckCastPP";) 567 can_eliminate = false; 568 } else { 569 res_type = _igvn.type(res)->isa_oopptr(); 570 if (res_type == NULL) { 571 NOT_PRODUCT(fail_eliminate = "Neither instance or array allocation";) 572 can_eliminate = false; 573 } else if (res_type->isa_aryptr()) { 574 int length = alloc->in(AllocateNode::ALength)->find_int_con(-1); 575 if (length < 0) { 576 NOT_PRODUCT(fail_eliminate = "Array's size is not constant";) 577 can_eliminate = false; 578 } 579 } 580 } 581 582 if (can_eliminate && res != NULL) { 583 for (DUIterator_Fast jmax, j = res->fast_outs(jmax); 584 j < jmax && can_eliminate; j++) { 585 Node* use = res->fast_out(j); 586 587 if (use->is_AddP()) { 588 const TypePtr* addp_type = _igvn.type(use)->is_ptr(); 589 int offset = addp_type->offset(); 590 591 if (offset == Type::OffsetTop || offset == Type::OffsetBot) { 592 NOT_PRODUCT(fail_eliminate = "Undefined field referrence";) 593 can_eliminate = false; 594 break; 595 } 596 for (DUIterator_Fast kmax, k = use->fast_outs(kmax); 597 k < kmax && can_eliminate; k++) { 598 Node* n = use->fast_out(k); 599 if (!n->is_Store() && n->Opcode() != Op_CastP2X) { 600 DEBUG_ONLY(disq_node = n;) 601 if (n->is_Load() || n->is_LoadStore()) { 602 NOT_PRODUCT(fail_eliminate = "Field load";) 603 } else { 604 NOT_PRODUCT(fail_eliminate = "Not store field referrence";) 605 } 606 can_eliminate = false; 607 } 608 } 609 } else if (use->is_SafePoint()) { 610 SafePointNode* sfpt = use->as_SafePoint(); 611 if (sfpt->is_Call() && sfpt->as_Call()->has_non_debug_use(res)) { 612 // Object is passed as argument. 613 DEBUG_ONLY(disq_node = use;) 614 NOT_PRODUCT(fail_eliminate = "Object is passed as argument";) 615 can_eliminate = false; 616 } 617 Node* sfptMem = sfpt->memory(); 618 if (sfptMem == NULL || sfptMem->is_top()) { 619 DEBUG_ONLY(disq_node = use;) 620 NOT_PRODUCT(fail_eliminate = "NULL or TOP memory";) 621 can_eliminate = false; 622 } else { 623 safepoints.append_if_missing(sfpt); 624 } 625 } else if (use->Opcode() != Op_CastP2X) { // CastP2X is used by card mark 626 if (use->is_Phi()) { 627 if (use->outcnt() == 1 && use->unique_out()->Opcode() == Op_Return) { 628 NOT_PRODUCT(fail_eliminate = "Object is return value";) 629 } else { 630 NOT_PRODUCT(fail_eliminate = "Object is referenced by Phi";) 631 } 632 DEBUG_ONLY(disq_node = use;) 633 } else { 634 if (use->Opcode() == Op_Return) { 635 NOT_PRODUCT(fail_eliminate = "Object is return value";) 636 }else { 637 NOT_PRODUCT(fail_eliminate = "Object is referenced by node";) 638 } 639 DEBUG_ONLY(disq_node = use;) 640 } 641 can_eliminate = false; 642 } 643 } 644 } 645 646 #ifndef PRODUCT 647 if (PrintEliminateAllocations) { 648 if (can_eliminate) { 649 tty->print("Scalar "); 650 if (res == NULL) 651 alloc->dump(); 652 else 653 res->dump(); 654 } else { 655 tty->print("NotScalar (%s)", fail_eliminate); 656 if (res == NULL) 657 alloc->dump(); 658 else 659 res->dump(); 660 #ifdef ASSERT 661 if (disq_node != NULL) { 662 tty->print(" >>>> "); 663 disq_node->dump(); 664 } 665 #endif /*ASSERT*/ 666 } 667 } 668 #endif 669 return can_eliminate; 670 } 671 672 // Do scalar replacement. 673 bool PhaseMacroExpand::scalar_replacement(AllocateNode *alloc, GrowableArray <SafePointNode *>& safepoints) { 674 GrowableArray <SafePointNode *> safepoints_done; 675 676 ciKlass* klass = NULL; 677 ciInstanceKlass* iklass = NULL; 678 int nfields = 0; 679 int array_base; 680 int element_size; 681 BasicType basic_elem_type; 682 ciType* elem_type; 683 684 Node* res = alloc->result_cast(); 685 const TypeOopPtr* res_type = NULL; 686 if (res != NULL) { // Could be NULL when there are no users 687 res_type = _igvn.type(res)->isa_oopptr(); 688 } 689 690 if (res != NULL) { 691 klass = res_type->klass(); 692 if (res_type->isa_instptr()) { 693 // find the fields of the class which will be needed for safepoint debug information 694 assert(klass->is_instance_klass(), "must be an instance klass."); 695 iklass = klass->as_instance_klass(); 696 nfields = iklass->nof_nonstatic_fields(); 697 } else { 698 // find the array's elements which will be needed for safepoint debug information 699 nfields = alloc->in(AllocateNode::ALength)->find_int_con(-1); 700 assert(klass->is_array_klass() && nfields >= 0, "must be an array klass."); 701 elem_type = klass->as_array_klass()->element_type(); 702 basic_elem_type = elem_type->basic_type(); 703 array_base = arrayOopDesc::base_offset_in_bytes(basic_elem_type); 704 element_size = type2aelembytes(basic_elem_type); 705 } 706 } 707 // 708 // Process the safepoint uses 709 // 710 while (safepoints.length() > 0) { 711 SafePointNode* sfpt = safepoints.pop(); 712 Node* mem = sfpt->memory(); 713 uint first_ind = sfpt->req(); 714 SafePointScalarObjectNode* sobj = new (C, 1) SafePointScalarObjectNode(res_type, 715 #ifdef ASSERT 716 alloc, 717 #endif 718 first_ind, nfields); 719 sobj->init_req(0, sfpt->in(TypeFunc::Control)); 720 transform_later(sobj); 721 722 // Scan object's fields adding an input to the safepoint for each field. 723 for (int j = 0; j < nfields; j++) { 724 intptr_t offset; 725 ciField* field = NULL; 726 if (iklass != NULL) { 727 field = iklass->nonstatic_field_at(j); 728 offset = field->offset(); 729 elem_type = field->type(); 730 basic_elem_type = field->layout_type(); 731 } else { 732 offset = array_base + j * (intptr_t)element_size; 733 } 734 735 const Type *field_type; 736 // The next code is taken from Parse::do_get_xxx(). 737 if (basic_elem_type == T_OBJECT || basic_elem_type == T_ARRAY) { 738 if (!elem_type->is_loaded()) { 739 field_type = TypeInstPtr::BOTTOM; 740 } else if (field != NULL && field->is_constant() && field->is_static()) { 741 // This can happen if the constant oop is non-perm. 742 ciObject* con = field->constant_value().as_object(); 743 // Do not "join" in the previous type; it doesn't add value, 744 // and may yield a vacuous result if the field is of interface type. 745 field_type = TypeOopPtr::make_from_constant(con)->isa_oopptr(); 746 assert(field_type != NULL, "field singleton type must be consistent"); 747 } else { 748 field_type = TypeOopPtr::make_from_klass(elem_type->as_klass()); 749 } 750 if (UseCompressedOops) { 751 field_type = field_type->make_narrowoop(); 752 basic_elem_type = T_NARROWOOP; 753 } 754 } else { 755 field_type = Type::get_const_basic_type(basic_elem_type); 756 } 757 758 const TypeOopPtr *field_addr_type = res_type->add_offset(offset)->isa_oopptr(); 759 760 Node *field_val = value_from_mem(mem, basic_elem_type, field_type, field_addr_type, alloc); 761 if (field_val == NULL) { 762 // we weren't able to find a value for this field, 763 // give up on eliminating this allocation 764 alloc->_is_scalar_replaceable = false; // don't try again 765 // remove any extra entries we added to the safepoint 766 uint last = sfpt->req() - 1; 767 for (int k = 0; k < j; k++) { 768 sfpt->del_req(last--); 769 } 770 // rollback processed safepoints 771 while (safepoints_done.length() > 0) { 772 SafePointNode* sfpt_done = safepoints_done.pop(); 773 // remove any extra entries we added to the safepoint 774 last = sfpt_done->req() - 1; 775 for (int k = 0; k < nfields; k++) { 776 sfpt_done->del_req(last--); 777 } 778 JVMState *jvms = sfpt_done->jvms(); 779 jvms->set_endoff(sfpt_done->req()); 780 // Now make a pass over the debug information replacing any references 781 // to SafePointScalarObjectNode with the allocated object. 782 int start = jvms->debug_start(); 783 int end = jvms->debug_end(); 784 for (int i = start; i < end; i++) { 785 if (sfpt_done->in(i)->is_SafePointScalarObject()) { 786 SafePointScalarObjectNode* scobj = sfpt_done->in(i)->as_SafePointScalarObject(); 787 if (scobj->first_index() == sfpt_done->req() && 788 scobj->n_fields() == (uint)nfields) { 789 assert(scobj->alloc() == alloc, "sanity"); 790 sfpt_done->set_req(i, res); 791 } 792 } 793 } 794 } 795 #ifndef PRODUCT 796 if (PrintEliminateAllocations) { 797 if (field != NULL) { 798 tty->print("=== At SafePoint node %d can't find value of Field: ", 799 sfpt->_idx); 800 field->print(); 801 int field_idx = C->get_alias_index(field_addr_type); 802 tty->print(" (alias_idx=%d)", field_idx); 803 } else { // Array's element 804 tty->print("=== At SafePoint node %d can't find value of array element [%d]", 805 sfpt->_idx, j); 806 } 807 tty->print(", which prevents elimination of: "); 808 if (res == NULL) 809 alloc->dump(); 810 else 811 res->dump(); 812 } 813 #endif 814 return false; 815 } 816 if (UseCompressedOops && field_type->isa_narrowoop()) { 817 // Enable "DecodeN(EncodeP(Allocate)) --> Allocate" transformation 818 // to be able scalar replace the allocation. 819 if (field_val->is_EncodeP()) { 820 field_val = field_val->in(1); 821 } else { 822 field_val = transform_later(new (C, 2) DecodeNNode(field_val, field_val->bottom_type()->make_ptr())); 823 } 824 } 825 sfpt->add_req(field_val); 826 } 827 JVMState *jvms = sfpt->jvms(); 828 jvms->set_endoff(sfpt->req()); 829 // Now make a pass over the debug information replacing any references 830 // to the allocated object with "sobj" 831 int start = jvms->debug_start(); 832 int end = jvms->debug_end(); 833 for (int i = start; i < end; i++) { 834 if (sfpt->in(i) == res) { 835 sfpt->set_req(i, sobj); 836 } 837 } 838 safepoints_done.append_if_missing(sfpt); // keep it for rollback 839 } 840 return true; 841 } 842 843 // Process users of eliminated allocation. 844 void PhaseMacroExpand::process_users_of_allocation(AllocateNode *alloc) { 845 Node* res = alloc->result_cast(); 846 if (res != NULL) { 847 for (DUIterator_Last jmin, j = res->last_outs(jmin); j >= jmin; ) { 848 Node *use = res->last_out(j); 849 uint oc1 = res->outcnt(); 850 851 if (use->is_AddP()) { 852 for (DUIterator_Last kmin, k = use->last_outs(kmin); k >= kmin; ) { 853 Node *n = use->last_out(k); 854 uint oc2 = use->outcnt(); 855 if (n->is_Store()) { 856 #ifdef ASSERT 857 // Verify that there is no dependent MemBarVolatile nodes, 858 // they should be removed during IGVN, see MemBarNode::Ideal(). 859 for (DUIterator_Fast pmax, p = n->fast_outs(pmax); 860 p < pmax; p++) { 861 Node* mb = n->fast_out(p); 862 assert(mb->is_Initialize() || !mb->is_MemBar() || 863 mb->req() <= MemBarNode::Precedent || 864 mb->in(MemBarNode::Precedent) != n, 865 "MemBarVolatile should be eliminated for non-escaping object"); 866 } 867 #endif 868 _igvn.replace_node(n, n->in(MemNode::Memory)); 869 } else { 870 eliminate_card_mark(n); 871 } 872 k -= (oc2 - use->outcnt()); 873 } 874 } else { 875 eliminate_card_mark(use); 876 } 877 j -= (oc1 - res->outcnt()); 878 } 879 assert(res->outcnt() == 0, "all uses of allocated objects must be deleted"); 880 _igvn.remove_dead_node(res); 881 } 882 883 // 884 // Process other users of allocation's projections 885 // 886 if (_resproj != NULL && _resproj->outcnt() != 0) { 887 for (DUIterator_Last jmin, j = _resproj->last_outs(jmin); j >= jmin; ) { 888 Node *use = _resproj->last_out(j); 889 uint oc1 = _resproj->outcnt(); 890 if (use->is_Initialize()) { 891 // Eliminate Initialize node. 892 InitializeNode *init = use->as_Initialize(); 893 assert(init->outcnt() <= 2, "only a control and memory projection expected"); 894 Node *ctrl_proj = init->proj_out(TypeFunc::Control); 895 if (ctrl_proj != NULL) { 896 assert(init->in(TypeFunc::Control) == _fallthroughcatchproj, "allocation control projection"); 897 _igvn.replace_node(ctrl_proj, _fallthroughcatchproj); 898 } 899 Node *mem_proj = init->proj_out(TypeFunc::Memory); 900 if (mem_proj != NULL) { 901 Node *mem = init->in(TypeFunc::Memory); 902 #ifdef ASSERT 903 if (mem->is_MergeMem()) { 904 assert(mem->in(TypeFunc::Memory) == _memproj_fallthrough, "allocation memory projection"); 905 } else { 906 assert(mem == _memproj_fallthrough, "allocation memory projection"); 907 } 908 #endif 909 _igvn.replace_node(mem_proj, mem); 910 } 911 } else if (use->is_AddP()) { 912 // raw memory addresses used only by the initialization 913 _igvn.replace_node(use, C->top()); 914 } else { 915 assert(false, "only Initialize or AddP expected"); 916 } 917 j -= (oc1 - _resproj->outcnt()); 918 } 919 } 920 if (_fallthroughcatchproj != NULL) { 921 _igvn.replace_node(_fallthroughcatchproj, alloc->in(TypeFunc::Control)); 922 } 923 if (_memproj_fallthrough != NULL) { 924 _igvn.replace_node(_memproj_fallthrough, alloc->in(TypeFunc::Memory)); 925 } 926 if (_memproj_catchall != NULL) { 927 _igvn.replace_node(_memproj_catchall, C->top()); 928 } 929 if (_ioproj_fallthrough != NULL) { 930 _igvn.replace_node(_ioproj_fallthrough, alloc->in(TypeFunc::I_O)); 931 } 932 if (_ioproj_catchall != NULL) { 933 _igvn.replace_node(_ioproj_catchall, C->top()); 934 } 935 if (_catchallcatchproj != NULL) { 936 _igvn.replace_node(_catchallcatchproj, C->top()); 937 } 938 } 939 940 bool PhaseMacroExpand::eliminate_allocate_node(AllocateNode *alloc) { 941 942 if (!EliminateAllocations || !alloc->_is_scalar_replaceable) { 943 return false; 944 } 945 946 extract_call_projections(alloc); 947 948 GrowableArray <SafePointNode *> safepoints; 949 if (!can_eliminate_allocation(alloc, safepoints)) { 950 return false; 951 } 952 953 if (!scalar_replacement(alloc, safepoints)) { 954 return false; 955 } 956 957 CompileLog* log = C->log(); 958 if (log != NULL) { 959 Node* klass = alloc->in(AllocateNode::KlassNode); 960 const TypeKlassPtr* tklass = _igvn.type(klass)->is_klassptr(); 961 log->head("eliminate_allocation type='%d'", 962 log->identify(tklass->klass())); 963 JVMState* p = alloc->jvms(); 964 while (p != NULL) { 965 log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method())); 966 p = p->caller(); 967 } 968 log->tail("eliminate_allocation"); 969 } 970 971 process_users_of_allocation(alloc); 972 973 #ifndef PRODUCT 974 if (PrintEliminateAllocations) { 975 if (alloc->is_AllocateArray()) 976 tty->print_cr("++++ Eliminated: %d AllocateArray", alloc->_idx); 977 else 978 tty->print_cr("++++ Eliminated: %d Allocate", alloc->_idx); 979 } 980 #endif 981 982 return true; 983 } 984 985 986 //---------------------------set_eden_pointers------------------------- 987 void PhaseMacroExpand::set_eden_pointers(Node* &eden_top_adr, Node* &eden_end_adr) { 988 if (UseTLAB) { // Private allocation: load from TLS 989 Node* thread = transform_later(new (C, 1) ThreadLocalNode()); 990 int tlab_top_offset = in_bytes(JavaThread::tlab_top_offset()); 991 int tlab_end_offset = in_bytes(JavaThread::tlab_end_offset()); 992 eden_top_adr = basic_plus_adr(top()/*not oop*/, thread, tlab_top_offset); 993 eden_end_adr = basic_plus_adr(top()/*not oop*/, thread, tlab_end_offset); 994 } else { // Shared allocation: load from globals 995 CollectedHeap* ch = Universe::heap(); 996 address top_adr = (address)ch->top_addr(); 997 address end_adr = (address)ch->end_addr(); 998 eden_top_adr = makecon(TypeRawPtr::make(top_adr)); 999 eden_end_adr = basic_plus_adr(eden_top_adr, end_adr - top_adr); 1000 } 1001 } 1002 1003 1004 Node* PhaseMacroExpand::make_load(Node* ctl, Node* mem, Node* base, int offset, const Type* value_type, BasicType bt) { 1005 Node* adr = basic_plus_adr(base, offset); 1006 const TypePtr* adr_type = adr->bottom_type()->is_ptr(); 1007 Node* value = LoadNode::make(_igvn, ctl, mem, adr, adr_type, value_type, bt); 1008 transform_later(value); 1009 return value; 1010 } 1011 1012 1013 Node* PhaseMacroExpand::make_store(Node* ctl, Node* mem, Node* base, int offset, Node* value, BasicType bt) { 1014 Node* adr = basic_plus_adr(base, offset); 1015 mem = StoreNode::make(_igvn, ctl, mem, adr, NULL, value, bt); 1016 transform_later(mem); 1017 return mem; 1018 } 1019 1020 //============================================================================= 1021 // 1022 // A L L O C A T I O N 1023 // 1024 // Allocation attempts to be fast in the case of frequent small objects. 1025 // It breaks down like this: 1026 // 1027 // 1) Size in doublewords is computed. This is a constant for objects and 1028 // variable for most arrays. Doubleword units are used to avoid size 1029 // overflow of huge doubleword arrays. We need doublewords in the end for 1030 // rounding. 1031 // 1032 // 2) Size is checked for being 'too large'. Too-large allocations will go 1033 // the slow path into the VM. The slow path can throw any required 1034 // exceptions, and does all the special checks for very large arrays. The 1035 // size test can constant-fold away for objects. For objects with 1036 // finalizers it constant-folds the otherway: you always go slow with 1037 // finalizers. 1038 // 1039 // 3) If NOT using TLABs, this is the contended loop-back point. 1040 // Load-Locked the heap top. If using TLABs normal-load the heap top. 1041 // 1042 // 4) Check that heap top + size*8 < max. If we fail go the slow ` route. 1043 // NOTE: "top+size*8" cannot wrap the 4Gig line! Here's why: for largish 1044 // "size*8" we always enter the VM, where "largish" is a constant picked small 1045 // enough that there's always space between the eden max and 4Gig (old space is 1046 // there so it's quite large) and large enough that the cost of entering the VM 1047 // is dwarfed by the cost to initialize the space. 1048 // 1049 // 5) If NOT using TLABs, Store-Conditional the adjusted heap top back 1050 // down. If contended, repeat at step 3. If using TLABs normal-store 1051 // adjusted heap top back down; there is no contention. 1052 // 1053 // 6) If !ZeroTLAB then Bulk-clear the object/array. Fill in klass & mark 1054 // fields. 1055 // 1056 // 7) Merge with the slow-path; cast the raw memory pointer to the correct 1057 // oop flavor. 1058 // 1059 //============================================================================= 1060 // FastAllocateSizeLimit value is in DOUBLEWORDS. 1061 // Allocations bigger than this always go the slow route. 1062 // This value must be small enough that allocation attempts that need to 1063 // trigger exceptions go the slow route. Also, it must be small enough so 1064 // that heap_top + size_in_bytes does not wrap around the 4Gig limit. 1065 //=============================================================================j// 1066 // %%% Here is an old comment from parseHelper.cpp; is it outdated? 1067 // The allocator will coalesce int->oop copies away. See comment in 1068 // coalesce.cpp about how this works. It depends critically on the exact 1069 // code shape produced here, so if you are changing this code shape 1070 // make sure the GC info for the heap-top is correct in and around the 1071 // slow-path call. 1072 // 1073 1074 void PhaseMacroExpand::expand_allocate_common( 1075 AllocateNode* alloc, // allocation node to be expanded 1076 Node* length, // array length for an array allocation 1077 const TypeFunc* slow_call_type, // Type of slow call 1078 address slow_call_address // Address of slow call 1079 ) 1080 { 1081 1082 Node* ctrl = alloc->in(TypeFunc::Control); 1083 Node* mem = alloc->in(TypeFunc::Memory); 1084 Node* i_o = alloc->in(TypeFunc::I_O); 1085 Node* size_in_bytes = alloc->in(AllocateNode::AllocSize); 1086 Node* klass_node = alloc->in(AllocateNode::KlassNode); 1087 Node* initial_slow_test = alloc->in(AllocateNode::InitialTest); 1088 1089 assert(ctrl != NULL, "must have control"); 1090 // We need a Region and corresponding Phi's to merge the slow-path and fast-path results. 1091 // they will not be used if "always_slow" is set 1092 enum { slow_result_path = 1, fast_result_path = 2 }; 1093 Node *result_region; 1094 Node *result_phi_rawmem; 1095 Node *result_phi_rawoop; 1096 Node *result_phi_i_o; 1097 1098 // The initial slow comparison is a size check, the comparison 1099 // we want to do is a BoolTest::gt 1100 bool always_slow = false; 1101 int tv = _igvn.find_int_con(initial_slow_test, -1); 1102 if (tv >= 0) { 1103 always_slow = (tv == 1); 1104 initial_slow_test = NULL; 1105 } else { 1106 initial_slow_test = BoolNode::make_predicate(initial_slow_test, &_igvn); 1107 } 1108 1109 if (C->env()->dtrace_alloc_probes() || 1110 !UseTLAB && (!Universe::heap()->supports_inline_contig_alloc() || 1111 (UseConcMarkSweepGC && CMSIncrementalMode))) { 1112 // Force slow-path allocation 1113 always_slow = true; 1114 initial_slow_test = NULL; 1115 } 1116 1117 1118 enum { too_big_or_final_path = 1, need_gc_path = 2 }; 1119 Node *slow_region = NULL; 1120 Node *toobig_false = ctrl; 1121 1122 assert (initial_slow_test == NULL || !always_slow, "arguments must be consistent"); 1123 // generate the initial test if necessary 1124 if (initial_slow_test != NULL ) { 1125 slow_region = new (C, 3) RegionNode(3); 1126 1127 // Now make the initial failure test. Usually a too-big test but 1128 // might be a TRUE for finalizers or a fancy class check for 1129 // newInstance0. 1130 IfNode *toobig_iff = new (C, 2) IfNode(ctrl, initial_slow_test, PROB_MIN, COUNT_UNKNOWN); 1131 transform_later(toobig_iff); 1132 // Plug the failing-too-big test into the slow-path region 1133 Node *toobig_true = new (C, 1) IfTrueNode( toobig_iff ); 1134 transform_later(toobig_true); 1135 slow_region ->init_req( too_big_or_final_path, toobig_true ); 1136 toobig_false = new (C, 1) IfFalseNode( toobig_iff ); 1137 transform_later(toobig_false); 1138 } else { // No initial test, just fall into next case 1139 toobig_false = ctrl; 1140 debug_only(slow_region = NodeSentinel); 1141 } 1142 1143 Node *slow_mem = mem; // save the current memory state for slow path 1144 // generate the fast allocation code unless we know that the initial test will always go slow 1145 if (!always_slow) { 1146 // Fast path modifies only raw memory. 1147 if (mem->is_MergeMem()) { 1148 mem = mem->as_MergeMem()->memory_at(Compile::AliasIdxRaw); 1149 } 1150 1151 Node* eden_top_adr; 1152 Node* eden_end_adr; 1153 1154 set_eden_pointers(eden_top_adr, eden_end_adr); 1155 1156 // Load Eden::end. Loop invariant and hoisted. 1157 // 1158 // Note: We set the control input on "eden_end" and "old_eden_top" when using 1159 // a TLAB to work around a bug where these values were being moved across 1160 // a safepoint. These are not oops, so they cannot be include in the oop 1161 // map, but the can be changed by a GC. The proper way to fix this would 1162 // be to set the raw memory state when generating a SafepointNode. However 1163 // this will require extensive changes to the loop optimization in order to 1164 // prevent a degradation of the optimization. 1165 // See comment in memnode.hpp, around line 227 in class LoadPNode. 1166 Node *eden_end = make_load(ctrl, mem, eden_end_adr, 0, TypeRawPtr::BOTTOM, T_ADDRESS); 1167 1168 // allocate the Region and Phi nodes for the result 1169 result_region = new (C, 3) RegionNode(3); 1170 result_phi_rawmem = new (C, 3) PhiNode( result_region, Type::MEMORY, TypeRawPtr::BOTTOM ); 1171 result_phi_rawoop = new (C, 3) PhiNode( result_region, TypeRawPtr::BOTTOM ); 1172 result_phi_i_o = new (C, 3) PhiNode( result_region, Type::ABIO ); // I/O is used for Prefetch 1173 1174 // We need a Region for the loop-back contended case. 1175 enum { fall_in_path = 1, contended_loopback_path = 2 }; 1176 Node *contended_region; 1177 Node *contended_phi_rawmem; 1178 if( UseTLAB ) { 1179 contended_region = toobig_false; 1180 contended_phi_rawmem = mem; 1181 } else { 1182 contended_region = new (C, 3) RegionNode(3); 1183 contended_phi_rawmem = new (C, 3) PhiNode( contended_region, Type::MEMORY, TypeRawPtr::BOTTOM); 1184 // Now handle the passing-too-big test. We fall into the contended 1185 // loop-back merge point. 1186 contended_region ->init_req( fall_in_path, toobig_false ); 1187 contended_phi_rawmem->init_req( fall_in_path, mem ); 1188 transform_later(contended_region); 1189 transform_later(contended_phi_rawmem); 1190 } 1191 1192 // Load(-locked) the heap top. 1193 // See note above concerning the control input when using a TLAB 1194 Node *old_eden_top = UseTLAB 1195 ? new (C, 3) LoadPNode ( ctrl, contended_phi_rawmem, eden_top_adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM ) 1196 : new (C, 3) LoadPLockedNode( contended_region, contended_phi_rawmem, eden_top_adr ); 1197 1198 transform_later(old_eden_top); 1199 // Add to heap top to get a new heap top 1200 Node *new_eden_top = new (C, 4) AddPNode( top(), old_eden_top, size_in_bytes ); 1201 transform_later(new_eden_top); 1202 // Check for needing a GC; compare against heap end 1203 Node *needgc_cmp = new (C, 3) CmpPNode( new_eden_top, eden_end ); 1204 transform_later(needgc_cmp); 1205 Node *needgc_bol = new (C, 2) BoolNode( needgc_cmp, BoolTest::ge ); 1206 transform_later(needgc_bol); 1207 IfNode *needgc_iff = new (C, 2) IfNode(contended_region, needgc_bol, PROB_UNLIKELY_MAG(4), COUNT_UNKNOWN ); 1208 transform_later(needgc_iff); 1209 1210 // Plug the failing-heap-space-need-gc test into the slow-path region 1211 Node *needgc_true = new (C, 1) IfTrueNode( needgc_iff ); 1212 transform_later(needgc_true); 1213 if( initial_slow_test ) { 1214 slow_region ->init_req( need_gc_path, needgc_true ); 1215 // This completes all paths into the slow merge point 1216 transform_later(slow_region); 1217 } else { // No initial slow path needed! 1218 // Just fall from the need-GC path straight into the VM call. 1219 slow_region = needgc_true; 1220 } 1221 // No need for a GC. Setup for the Store-Conditional 1222 Node *needgc_false = new (C, 1) IfFalseNode( needgc_iff ); 1223 transform_later(needgc_false); 1224 1225 // Grab regular I/O before optional prefetch may change it. 1226 // Slow-path does no I/O so just set it to the original I/O. 1227 result_phi_i_o->init_req( slow_result_path, i_o ); 1228 1229 i_o = prefetch_allocation(i_o, needgc_false, contended_phi_rawmem, 1230 old_eden_top, new_eden_top, length); 1231 1232 // Store (-conditional) the modified eden top back down. 1233 // StorePConditional produces flags for a test PLUS a modified raw 1234 // memory state. 1235 Node *store_eden_top; 1236 Node *fast_oop_ctrl; 1237 if( UseTLAB ) { 1238 store_eden_top = new (C, 4) StorePNode( needgc_false, contended_phi_rawmem, eden_top_adr, TypeRawPtr::BOTTOM, new_eden_top ); 1239 transform_later(store_eden_top); 1240 fast_oop_ctrl = needgc_false; // No contention, so this is the fast path 1241 } else { 1242 store_eden_top = new (C, 5) StorePConditionalNode( needgc_false, contended_phi_rawmem, eden_top_adr, new_eden_top, old_eden_top ); 1243 transform_later(store_eden_top); 1244 Node *contention_check = new (C, 2) BoolNode( store_eden_top, BoolTest::ne ); 1245 transform_later(contention_check); 1246 store_eden_top = new (C, 1) SCMemProjNode(store_eden_top); 1247 transform_later(store_eden_top); 1248 1249 // If not using TLABs, check to see if there was contention. 1250 IfNode *contention_iff = new (C, 2) IfNode ( needgc_false, contention_check, PROB_MIN, COUNT_UNKNOWN ); 1251 transform_later(contention_iff); 1252 Node *contention_true = new (C, 1) IfTrueNode( contention_iff ); 1253 transform_later(contention_true); 1254 // If contention, loopback and try again. 1255 contended_region->init_req( contended_loopback_path, contention_true ); 1256 contended_phi_rawmem->init_req( contended_loopback_path, store_eden_top ); 1257 1258 // Fast-path succeeded with no contention! 1259 Node *contention_false = new (C, 1) IfFalseNode( contention_iff ); 1260 transform_later(contention_false); 1261 fast_oop_ctrl = contention_false; 1262 } 1263 1264 // Rename successful fast-path variables to make meaning more obvious 1265 Node* fast_oop = old_eden_top; 1266 Node* fast_oop_rawmem = store_eden_top; 1267 fast_oop_rawmem = initialize_object(alloc, 1268 fast_oop_ctrl, fast_oop_rawmem, fast_oop, 1269 klass_node, length, size_in_bytes); 1270 1271 if (C->env()->dtrace_extended_probes()) { 1272 // Slow-path call 1273 int size = TypeFunc::Parms + 2; 1274 CallLeafNode *call = new (C, size) CallLeafNode(OptoRuntime::dtrace_object_alloc_Type(), 1275 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc_base), 1276 "dtrace_object_alloc", 1277 TypeRawPtr::BOTTOM); 1278 1279 // Get base of thread-local storage area 1280 Node* thread = new (C, 1) ThreadLocalNode(); 1281 transform_later(thread); 1282 1283 call->init_req(TypeFunc::Parms+0, thread); 1284 call->init_req(TypeFunc::Parms+1, fast_oop); 1285 call->init_req( TypeFunc::Control, fast_oop_ctrl ); 1286 call->init_req( TypeFunc::I_O , top() ) ; // does no i/o 1287 call->init_req( TypeFunc::Memory , fast_oop_rawmem ); 1288 call->init_req( TypeFunc::ReturnAdr, alloc->in(TypeFunc::ReturnAdr) ); 1289 call->init_req( TypeFunc::FramePtr, alloc->in(TypeFunc::FramePtr) ); 1290 transform_later(call); 1291 fast_oop_ctrl = new (C, 1) ProjNode(call,TypeFunc::Control); 1292 transform_later(fast_oop_ctrl); 1293 fast_oop_rawmem = new (C, 1) ProjNode(call,TypeFunc::Memory); 1294 transform_later(fast_oop_rawmem); 1295 } 1296 1297 // Plug in the successful fast-path into the result merge point 1298 result_region ->init_req( fast_result_path, fast_oop_ctrl ); 1299 result_phi_rawoop->init_req( fast_result_path, fast_oop ); 1300 result_phi_i_o ->init_req( fast_result_path, i_o ); 1301 result_phi_rawmem->init_req( fast_result_path, fast_oop_rawmem ); 1302 } else { 1303 slow_region = ctrl; 1304 } 1305 1306 // Generate slow-path call 1307 CallNode *call = new (C, slow_call_type->domain()->cnt()) 1308 CallStaticJavaNode(slow_call_type, slow_call_address, 1309 OptoRuntime::stub_name(slow_call_address), 1310 alloc->jvms()->bci(), 1311 TypePtr::BOTTOM); 1312 call->init_req( TypeFunc::Control, slow_region ); 1313 call->init_req( TypeFunc::I_O , top() ) ; // does no i/o 1314 call->init_req( TypeFunc::Memory , slow_mem ); // may gc ptrs 1315 call->init_req( TypeFunc::ReturnAdr, alloc->in(TypeFunc::ReturnAdr) ); 1316 call->init_req( TypeFunc::FramePtr, alloc->in(TypeFunc::FramePtr) ); 1317 1318 call->init_req(TypeFunc::Parms+0, klass_node); 1319 if (length != NULL) { 1320 call->init_req(TypeFunc::Parms+1, length); 1321 } 1322 1323 // Copy debug information and adjust JVMState information, then replace 1324 // allocate node with the call 1325 copy_call_debug_info((CallNode *) alloc, call); 1326 if (!always_slow) { 1327 call->set_cnt(PROB_UNLIKELY_MAG(4)); // Same effect as RC_UNCOMMON. 1328 } 1329 _igvn.replace_node(alloc, call); 1330 transform_later(call); 1331 1332 // Identify the output projections from the allocate node and 1333 // adjust any references to them. 1334 // The control and io projections look like: 1335 // 1336 // v---Proj(ctrl) <-----+ v---CatchProj(ctrl) 1337 // Allocate Catch 1338 // ^---Proj(io) <-------+ ^---CatchProj(io) 1339 // 1340 // We are interested in the CatchProj nodes. 1341 // 1342 extract_call_projections(call); 1343 1344 // An allocate node has separate memory projections for the uses on the control and i_o paths 1345 // Replace uses of the control memory projection with result_phi_rawmem (unless we are only generating a slow call) 1346 if (!always_slow && _memproj_fallthrough != NULL) { 1347 for (DUIterator_Fast imax, i = _memproj_fallthrough->fast_outs(imax); i < imax; i++) { 1348 Node *use = _memproj_fallthrough->fast_out(i); 1349 _igvn.hash_delete(use); 1350 imax -= replace_input(use, _memproj_fallthrough, result_phi_rawmem); 1351 _igvn._worklist.push(use); 1352 // back up iterator 1353 --i; 1354 } 1355 } 1356 // Now change uses of _memproj_catchall to use _memproj_fallthrough and delete _memproj_catchall so 1357 // we end up with a call that has only 1 memory projection 1358 if (_memproj_catchall != NULL ) { 1359 if (_memproj_fallthrough == NULL) { 1360 _memproj_fallthrough = new (C, 1) ProjNode(call, TypeFunc::Memory); 1361 transform_later(_memproj_fallthrough); 1362 } 1363 for (DUIterator_Fast imax, i = _memproj_catchall->fast_outs(imax); i < imax; i++) { 1364 Node *use = _memproj_catchall->fast_out(i); 1365 _igvn.hash_delete(use); 1366 imax -= replace_input(use, _memproj_catchall, _memproj_fallthrough); 1367 _igvn._worklist.push(use); 1368 // back up iterator 1369 --i; 1370 } 1371 } 1372 1373 // An allocate node has separate i_o projections for the uses on the control and i_o paths 1374 // Replace uses of the control i_o projection with result_phi_i_o (unless we are only generating a slow call) 1375 if (_ioproj_fallthrough == NULL) { 1376 _ioproj_fallthrough = new (C, 1) ProjNode(call, TypeFunc::I_O); 1377 transform_later(_ioproj_fallthrough); 1378 } else if (!always_slow) { 1379 for (DUIterator_Fast imax, i = _ioproj_fallthrough->fast_outs(imax); i < imax; i++) { 1380 Node *use = _ioproj_fallthrough->fast_out(i); 1381 1382 _igvn.hash_delete(use); 1383 imax -= replace_input(use, _ioproj_fallthrough, result_phi_i_o); 1384 _igvn._worklist.push(use); 1385 // back up iterator 1386 --i; 1387 } 1388 } 1389 // Now change uses of _ioproj_catchall to use _ioproj_fallthrough and delete _ioproj_catchall so 1390 // we end up with a call that has only 1 control projection 1391 if (_ioproj_catchall != NULL ) { 1392 for (DUIterator_Fast imax, i = _ioproj_catchall->fast_outs(imax); i < imax; i++) { 1393 Node *use = _ioproj_catchall->fast_out(i); 1394 _igvn.hash_delete(use); 1395 imax -= replace_input(use, _ioproj_catchall, _ioproj_fallthrough); 1396 _igvn._worklist.push(use); 1397 // back up iterator 1398 --i; 1399 } 1400 } 1401 1402 // if we generated only a slow call, we are done 1403 if (always_slow) 1404 return; 1405 1406 1407 if (_fallthroughcatchproj != NULL) { 1408 ctrl = _fallthroughcatchproj->clone(); 1409 transform_later(ctrl); 1410 _igvn.replace_node(_fallthroughcatchproj, result_region); 1411 } else { 1412 ctrl = top(); 1413 } 1414 Node *slow_result; 1415 if (_resproj == NULL) { 1416 // no uses of the allocation result 1417 slow_result = top(); 1418 } else { 1419 slow_result = _resproj->clone(); 1420 transform_later(slow_result); 1421 _igvn.replace_node(_resproj, result_phi_rawoop); 1422 } 1423 1424 // Plug slow-path into result merge point 1425 result_region ->init_req( slow_result_path, ctrl ); 1426 result_phi_rawoop->init_req( slow_result_path, slow_result); 1427 result_phi_rawmem->init_req( slow_result_path, _memproj_fallthrough ); 1428 transform_later(result_region); 1429 transform_later(result_phi_rawoop); 1430 transform_later(result_phi_rawmem); 1431 transform_later(result_phi_i_o); 1432 // This completes all paths into the result merge point 1433 } 1434 1435 1436 // Helper for PhaseMacroExpand::expand_allocate_common. 1437 // Initializes the newly-allocated storage. 1438 Node* 1439 PhaseMacroExpand::initialize_object(AllocateNode* alloc, 1440 Node* control, Node* rawmem, Node* object, 1441 Node* klass_node, Node* length, 1442 Node* size_in_bytes) { 1443 InitializeNode* init = alloc->initialization(); 1444 // Store the klass & mark bits 1445 Node* mark_node = NULL; 1446 // For now only enable fast locking for non-array types 1447 if (UseBiasedLocking && (length == NULL)) { 1448 mark_node = make_load(control, rawmem, klass_node, Klass::prototype_header_offset_in_bytes() + sizeof(oopDesc), TypeRawPtr::BOTTOM, T_ADDRESS); 1449 } else { 1450 mark_node = makecon(TypeRawPtr::make((address)markOopDesc::prototype())); 1451 } 1452 rawmem = make_store(control, rawmem, object, oopDesc::mark_offset_in_bytes(), mark_node, T_ADDRESS); 1453 1454 rawmem = make_store(control, rawmem, object, oopDesc::klass_offset_in_bytes(), klass_node, T_OBJECT); 1455 int header_size = alloc->minimum_header_size(); // conservatively small 1456 1457 // Array length 1458 if (length != NULL) { // Arrays need length field 1459 rawmem = make_store(control, rawmem, object, arrayOopDesc::length_offset_in_bytes(), length, T_INT); 1460 // conservatively small header size: 1461 header_size = arrayOopDesc::base_offset_in_bytes(T_BYTE); 1462 ciKlass* k = _igvn.type(klass_node)->is_klassptr()->klass(); 1463 if (k->is_array_klass()) // we know the exact header size in most cases: 1464 header_size = Klass::layout_helper_header_size(k->layout_helper()); 1465 } 1466 1467 // Clear the object body, if necessary. 1468 if (init == NULL) { 1469 // The init has somehow disappeared; be cautious and clear everything. 1470 // 1471 // This can happen if a node is allocated but an uncommon trap occurs 1472 // immediately. In this case, the Initialize gets associated with the 1473 // trap, and may be placed in a different (outer) loop, if the Allocate 1474 // is in a loop. If (this is rare) the inner loop gets unrolled, then 1475 // there can be two Allocates to one Initialize. The answer in all these 1476 // edge cases is safety first. It is always safe to clear immediately 1477 // within an Allocate, and then (maybe or maybe not) clear some more later. 1478 if (!ZeroTLAB) 1479 rawmem = ClearArrayNode::clear_memory(control, rawmem, object, 1480 header_size, size_in_bytes, 1481 &_igvn); 1482 } else { 1483 if (!init->is_complete()) { 1484 // Try to win by zeroing only what the init does not store. 1485 // We can also try to do some peephole optimizations, 1486 // such as combining some adjacent subword stores. 1487 rawmem = init->complete_stores(control, rawmem, object, 1488 header_size, size_in_bytes, &_igvn); 1489 } 1490 // We have no more use for this link, since the AllocateNode goes away: 1491 init->set_req(InitializeNode::RawAddress, top()); 1492 // (If we keep the link, it just confuses the register allocator, 1493 // who thinks he sees a real use of the address by the membar.) 1494 } 1495 1496 return rawmem; 1497 } 1498 1499 // Generate prefetch instructions for next allocations. 1500 Node* PhaseMacroExpand::prefetch_allocation(Node* i_o, Node*& needgc_false, 1501 Node*& contended_phi_rawmem, 1502 Node* old_eden_top, Node* new_eden_top, 1503 Node* length) { 1504 enum { fall_in_path = 1, pf_path = 2 }; 1505 if( UseTLAB && AllocatePrefetchStyle == 2 ) { 1506 // Generate prefetch allocation with watermark check. 1507 // As an allocation hits the watermark, we will prefetch starting 1508 // at a "distance" away from watermark. 1509 1510 Node *pf_region = new (C, 3) RegionNode(3); 1511 Node *pf_phi_rawmem = new (C, 3) PhiNode( pf_region, Type::MEMORY, 1512 TypeRawPtr::BOTTOM ); 1513 // I/O is used for Prefetch 1514 Node *pf_phi_abio = new (C, 3) PhiNode( pf_region, Type::ABIO ); 1515 1516 Node *thread = new (C, 1) ThreadLocalNode(); 1517 transform_later(thread); 1518 1519 Node *eden_pf_adr = new (C, 4) AddPNode( top()/*not oop*/, thread, 1520 _igvn.MakeConX(in_bytes(JavaThread::tlab_pf_top_offset())) ); 1521 transform_later(eden_pf_adr); 1522 1523 Node *old_pf_wm = new (C, 3) LoadPNode( needgc_false, 1524 contended_phi_rawmem, eden_pf_adr, 1525 TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM ); 1526 transform_later(old_pf_wm); 1527 1528 // check against new_eden_top 1529 Node *need_pf_cmp = new (C, 3) CmpPNode( new_eden_top, old_pf_wm ); 1530 transform_later(need_pf_cmp); 1531 Node *need_pf_bol = new (C, 2) BoolNode( need_pf_cmp, BoolTest::ge ); 1532 transform_later(need_pf_bol); 1533 IfNode *need_pf_iff = new (C, 2) IfNode( needgc_false, need_pf_bol, 1534 PROB_UNLIKELY_MAG(4), COUNT_UNKNOWN ); 1535 transform_later(need_pf_iff); 1536 1537 // true node, add prefetchdistance 1538 Node *need_pf_true = new (C, 1) IfTrueNode( need_pf_iff ); 1539 transform_later(need_pf_true); 1540 1541 Node *need_pf_false = new (C, 1) IfFalseNode( need_pf_iff ); 1542 transform_later(need_pf_false); 1543 1544 Node *new_pf_wmt = new (C, 4) AddPNode( top(), old_pf_wm, 1545 _igvn.MakeConX(AllocatePrefetchDistance) ); 1546 transform_later(new_pf_wmt ); 1547 new_pf_wmt->set_req(0, need_pf_true); 1548 1549 Node *store_new_wmt = new (C, 4) StorePNode( need_pf_true, 1550 contended_phi_rawmem, eden_pf_adr, 1551 TypeRawPtr::BOTTOM, new_pf_wmt ); 1552 transform_later(store_new_wmt); 1553 1554 // adding prefetches 1555 pf_phi_abio->init_req( fall_in_path, i_o ); 1556 1557 Node *prefetch_adr; 1558 Node *prefetch; 1559 uint lines = AllocatePrefetchDistance / AllocatePrefetchStepSize; 1560 uint step_size = AllocatePrefetchStepSize; 1561 uint distance = 0; 1562 1563 for ( uint i = 0; i < lines; i++ ) { 1564 prefetch_adr = new (C, 4) AddPNode( old_pf_wm, new_pf_wmt, 1565 _igvn.MakeConX(distance) ); 1566 transform_later(prefetch_adr); 1567 prefetch = new (C, 3) PrefetchWriteNode( i_o, prefetch_adr ); 1568 transform_later(prefetch); 1569 distance += step_size; 1570 i_o = prefetch; 1571 } 1572 pf_phi_abio->set_req( pf_path, i_o ); 1573 1574 pf_region->init_req( fall_in_path, need_pf_false ); 1575 pf_region->init_req( pf_path, need_pf_true ); 1576 1577 pf_phi_rawmem->init_req( fall_in_path, contended_phi_rawmem ); 1578 pf_phi_rawmem->init_req( pf_path, store_new_wmt ); 1579 1580 transform_later(pf_region); 1581 transform_later(pf_phi_rawmem); 1582 transform_later(pf_phi_abio); 1583 1584 needgc_false = pf_region; 1585 contended_phi_rawmem = pf_phi_rawmem; 1586 i_o = pf_phi_abio; 1587 } else if( UseTLAB && AllocatePrefetchStyle == 3 ) { 1588 // Insert a prefetch for each allocation only on the fast-path 1589 Node *pf_region = new (C, 3) RegionNode(3); 1590 Node *pf_phi_rawmem = new (C, 3) PhiNode( pf_region, Type::MEMORY, 1591 TypeRawPtr::BOTTOM ); 1592 1593 // Generate several prefetch instructions only for arrays. 1594 uint lines = (length != NULL) ? AllocatePrefetchLines : 1; 1595 uint step_size = AllocatePrefetchStepSize; 1596 uint distance = AllocatePrefetchDistance; 1597 1598 // Next cache address. 1599 Node *cache_adr = new (C, 4) AddPNode(old_eden_top, old_eden_top, 1600 _igvn.MakeConX(distance)); 1601 transform_later(cache_adr); 1602 cache_adr = new (C, 2) CastP2XNode(needgc_false, cache_adr); 1603 transform_later(cache_adr); 1604 Node* mask = _igvn.MakeConX(~(intptr_t)(step_size-1)); 1605 cache_adr = new (C, 3) AndXNode(cache_adr, mask); 1606 transform_later(cache_adr); 1607 cache_adr = new (C, 2) CastX2PNode(cache_adr); 1608 transform_later(cache_adr); 1609 1610 // Prefetch 1611 Node *prefetch = new (C, 3) PrefetchWriteNode( contended_phi_rawmem, cache_adr ); 1612 prefetch->set_req(0, needgc_false); 1613 transform_later(prefetch); 1614 contended_phi_rawmem = prefetch; 1615 Node *prefetch_adr; 1616 distance = step_size; 1617 for ( uint i = 1; i < lines; i++ ) { 1618 prefetch_adr = new (C, 4) AddPNode( cache_adr, cache_adr, 1619 _igvn.MakeConX(distance) ); 1620 transform_later(prefetch_adr); 1621 prefetch = new (C, 3) PrefetchWriteNode( contended_phi_rawmem, prefetch_adr ); 1622 transform_later(prefetch); 1623 distance += step_size; 1624 contended_phi_rawmem = prefetch; 1625 } 1626 } else if( AllocatePrefetchStyle > 0 ) { 1627 // Insert a prefetch for each allocation only on the fast-path 1628 Node *prefetch_adr; 1629 Node *prefetch; 1630 // Generate several prefetch instructions only for arrays. 1631 uint lines = (length != NULL) ? AllocatePrefetchLines : 1; 1632 uint step_size = AllocatePrefetchStepSize; 1633 uint distance = AllocatePrefetchDistance; 1634 for ( uint i = 0; i < lines; i++ ) { 1635 prefetch_adr = new (C, 4) AddPNode( old_eden_top, new_eden_top, 1636 _igvn.MakeConX(distance) ); 1637 transform_later(prefetch_adr); 1638 prefetch = new (C, 3) PrefetchWriteNode( i_o, prefetch_adr ); 1639 // Do not let it float too high, since if eden_top == eden_end, 1640 // both might be null. 1641 if( i == 0 ) { // Set control for first prefetch, next follows it 1642 prefetch->init_req(0, needgc_false); 1643 } 1644 transform_later(prefetch); 1645 distance += step_size; 1646 i_o = prefetch; 1647 } 1648 } 1649 return i_o; 1650 } 1651 1652 1653 void PhaseMacroExpand::expand_allocate(AllocateNode *alloc) { 1654 expand_allocate_common(alloc, NULL, 1655 OptoRuntime::new_instance_Type(), 1656 OptoRuntime::new_instance_Java()); 1657 } 1658 1659 void PhaseMacroExpand::expand_allocate_array(AllocateArrayNode *alloc) { 1660 Node* length = alloc->in(AllocateNode::ALength); 1661 expand_allocate_common(alloc, length, 1662 OptoRuntime::new_array_Type(), 1663 OptoRuntime::new_array_Java()); 1664 } 1665 1666 1667 // we have determined that this lock/unlock can be eliminated, we simply 1668 // eliminate the node without expanding it. 1669 // 1670 // Note: The membar's associated with the lock/unlock are currently not 1671 // eliminated. This should be investigated as a future enhancement. 1672 // 1673 bool PhaseMacroExpand::eliminate_locking_node(AbstractLockNode *alock) { 1674 1675 if (!alock->is_eliminated()) { 1676 return false; 1677 } 1678 if (alock->is_Lock() && !alock->is_coarsened()) { 1679 // Create new "eliminated" BoxLock node and use it 1680 // in monitor debug info for the same object. 1681 BoxLockNode* oldbox = alock->box_node()->as_BoxLock(); 1682 Node* obj = alock->obj_node(); 1683 if (!oldbox->is_eliminated()) { 1684 BoxLockNode* newbox = oldbox->clone()->as_BoxLock(); 1685 newbox->set_eliminated(); 1686 transform_later(newbox); 1687 // Replace old box node with new box for all users 1688 // of the same object. 1689 for (uint i = 0; i < oldbox->outcnt();) { 1690 1691 bool next_edge = true; 1692 Node* u = oldbox->raw_out(i); 1693 if (u == alock) { 1694 i++; 1695 continue; // It will be removed below 1696 } 1697 if (u->is_Lock() && 1698 u->as_Lock()->obj_node() == obj && 1699 // oldbox could be referenced in debug info also 1700 u->as_Lock()->box_node() == oldbox) { 1701 assert(u->as_Lock()->is_eliminated(), "sanity"); 1702 _igvn.hash_delete(u); 1703 u->set_req(TypeFunc::Parms + 1, newbox); 1704 next_edge = false; 1705 #ifdef ASSERT 1706 } else if (u->is_Unlock() && u->as_Unlock()->obj_node() == obj) { 1707 assert(u->as_Unlock()->is_eliminated(), "sanity"); 1708 #endif 1709 } 1710 // Replace old box in monitor debug info. 1711 if (u->is_SafePoint() && u->as_SafePoint()->jvms()) { 1712 SafePointNode* sfn = u->as_SafePoint(); 1713 JVMState* youngest_jvms = sfn->jvms(); 1714 int max_depth = youngest_jvms->depth(); 1715 for (int depth = 1; depth <= max_depth; depth++) { 1716 JVMState* jvms = youngest_jvms->of_depth(depth); 1717 int num_mon = jvms->nof_monitors(); 1718 // Loop over monitors 1719 for (int idx = 0; idx < num_mon; idx++) { 1720 Node* obj_node = sfn->monitor_obj(jvms, idx); 1721 Node* box_node = sfn->monitor_box(jvms, idx); 1722 if (box_node == oldbox && obj_node == obj) { 1723 int j = jvms->monitor_box_offset(idx); 1724 _igvn.hash_delete(u); 1725 u->set_req(j, newbox); 1726 next_edge = false; 1727 } 1728 } // for (int idx = 0; 1729 } // for (int depth = 1; 1730 } // if (u->is_SafePoint() 1731 if (next_edge) i++; 1732 } // for (uint i = 0; i < oldbox->outcnt();) 1733 } // if (!oldbox->is_eliminated()) 1734 } // if (alock->is_Lock() && !lock->is_coarsened()) 1735 1736 CompileLog* log = C->log(); 1737 if (log != NULL) { 1738 log->head("eliminate_lock lock='%d'", 1739 alock->is_Lock()); 1740 JVMState* p = alock->jvms(); 1741 while (p != NULL) { 1742 log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method())); 1743 p = p->caller(); 1744 } 1745 log->tail("eliminate_lock"); 1746 } 1747 1748 #ifndef PRODUCT 1749 if (PrintEliminateLocks) { 1750 if (alock->is_Lock()) { 1751 tty->print_cr("++++ Eliminating: %d Lock", alock->_idx); 1752 } else { 1753 tty->print_cr("++++ Eliminating: %d Unlock", alock->_idx); 1754 } 1755 } 1756 #endif 1757 1758 Node* mem = alock->in(TypeFunc::Memory); 1759 Node* ctrl = alock->in(TypeFunc::Control); 1760 1761 extract_call_projections(alock); 1762 // There are 2 projections from the lock. The lock node will 1763 // be deleted when its last use is subsumed below. 1764 assert(alock->outcnt() == 2 && 1765 _fallthroughproj != NULL && 1766 _memproj_fallthrough != NULL, 1767 "Unexpected projections from Lock/Unlock"); 1768 1769 Node* fallthroughproj = _fallthroughproj; 1770 Node* memproj_fallthrough = _memproj_fallthrough; 1771 1772 // The memory projection from a lock/unlock is RawMem 1773 // The input to a Lock is merged memory, so extract its RawMem input 1774 // (unless the MergeMem has been optimized away.) 1775 if (alock->is_Lock()) { 1776 // Seach for MemBarAcquire node and delete it also. 1777 MemBarNode* membar = fallthroughproj->unique_ctrl_out()->as_MemBar(); 1778 assert(membar != NULL && membar->Opcode() == Op_MemBarAcquire, ""); 1779 Node* ctrlproj = membar->proj_out(TypeFunc::Control); 1780 Node* memproj = membar->proj_out(TypeFunc::Memory); 1781 _igvn.replace_node(ctrlproj, fallthroughproj); 1782 _igvn.replace_node(memproj, memproj_fallthrough); 1783 1784 // Delete FastLock node also if this Lock node is unique user 1785 // (a loop peeling may clone a Lock node). 1786 Node* flock = alock->as_Lock()->fastlock_node(); 1787 if (flock->outcnt() == 1) { 1788 assert(flock->unique_out() == alock, "sanity"); 1789 _igvn.replace_node(flock, top()); 1790 } 1791 } 1792 1793 // Seach for MemBarRelease node and delete it also. 1794 if (alock->is_Unlock() && ctrl != NULL && ctrl->is_Proj() && 1795 ctrl->in(0)->is_MemBar()) { 1796 MemBarNode* membar = ctrl->in(0)->as_MemBar(); 1797 assert(membar->Opcode() == Op_MemBarRelease && 1798 mem->is_Proj() && membar == mem->in(0), ""); 1799 _igvn.replace_node(fallthroughproj, ctrl); 1800 _igvn.replace_node(memproj_fallthrough, mem); 1801 fallthroughproj = ctrl; 1802 memproj_fallthrough = mem; 1803 ctrl = membar->in(TypeFunc::Control); 1804 mem = membar->in(TypeFunc::Memory); 1805 } 1806 1807 _igvn.replace_node(fallthroughproj, ctrl); 1808 _igvn.replace_node(memproj_fallthrough, mem); 1809 return true; 1810 } 1811 1812 1813 //------------------------------expand_lock_node---------------------- 1814 void PhaseMacroExpand::expand_lock_node(LockNode *lock) { 1815 1816 Node* ctrl = lock->in(TypeFunc::Control); 1817 Node* mem = lock->in(TypeFunc::Memory); 1818 Node* obj = lock->obj_node(); 1819 Node* box = lock->box_node(); 1820 Node* flock = lock->fastlock_node(); 1821 1822 // Make the merge point 1823 Node *region; 1824 Node *mem_phi; 1825 Node *slow_path; 1826 1827 if (UseOptoBiasInlining) { 1828 /* 1829 * See the full description in MacroAssembler::biased_locking_enter(). 1830 * 1831 * if( (mark_word & biased_lock_mask) == biased_lock_pattern ) { 1832 * // The object is biased. 1833 * proto_node = klass->prototype_header; 1834 * o_node = thread | proto_node; 1835 * x_node = o_node ^ mark_word; 1836 * if( (x_node & ~age_mask) == 0 ) { // Biased to the current thread ? 1837 * // Done. 1838 * } else { 1839 * if( (x_node & biased_lock_mask) != 0 ) { 1840 * // The klass's prototype header is no longer biased. 1841 * cas(&mark_word, mark_word, proto_node) 1842 * goto cas_lock; 1843 * } else { 1844 * // The klass's prototype header is still biased. 1845 * if( (x_node & epoch_mask) != 0 ) { // Expired epoch? 1846 * old = mark_word; 1847 * new = o_node; 1848 * } else { 1849 * // Different thread or anonymous biased. 1850 * old = mark_word & (epoch_mask | age_mask | biased_lock_mask); 1851 * new = thread | old; 1852 * } 1853 * // Try to rebias. 1854 * if( cas(&mark_word, old, new) == 0 ) { 1855 * // Done. 1856 * } else { 1857 * goto slow_path; // Failed. 1858 * } 1859 * } 1860 * } 1861 * } else { 1862 * // The object is not biased. 1863 * cas_lock: 1864 * if( FastLock(obj) == 0 ) { 1865 * // Done. 1866 * } else { 1867 * slow_path: 1868 * OptoRuntime::complete_monitor_locking_Java(obj); 1869 * } 1870 * } 1871 */ 1872 1873 region = new (C, 5) RegionNode(5); 1874 // create a Phi for the memory state 1875 mem_phi = new (C, 5) PhiNode( region, Type::MEMORY, TypeRawPtr::BOTTOM); 1876 1877 Node* fast_lock_region = new (C, 3) RegionNode(3); 1878 Node* fast_lock_mem_phi = new (C, 3) PhiNode( fast_lock_region, Type::MEMORY, TypeRawPtr::BOTTOM); 1879 1880 // First, check mark word for the biased lock pattern. 1881 Node* mark_node = make_load(ctrl, mem, obj, oopDesc::mark_offset_in_bytes(), TypeX_X, TypeX_X->basic_type()); 1882 1883 // Get fast path - mark word has the biased lock pattern. 1884 ctrl = opt_bits_test(ctrl, fast_lock_region, 1, mark_node, 1885 markOopDesc::biased_lock_mask_in_place, 1886 markOopDesc::biased_lock_pattern, true); 1887 // fast_lock_region->in(1) is set to slow path. 1888 fast_lock_mem_phi->init_req(1, mem); 1889 1890 // Now check that the lock is biased to the current thread and has 1891 // the same epoch and bias as Klass::_prototype_header. 1892 1893 // Special-case a fresh allocation to avoid building nodes: 1894 Node* klass_node = AllocateNode::Ideal_klass(obj, &_igvn); 1895 if (klass_node == NULL) { 1896 Node* k_adr = basic_plus_adr(obj, oopDesc::klass_offset_in_bytes()); 1897 klass_node = transform_later( LoadKlassNode::make(_igvn, mem, k_adr, _igvn.type(k_adr)->is_ptr()) ); 1898 #ifdef _LP64 1899 if (UseCompressedOops && klass_node->is_DecodeN()) { 1900 assert(klass_node->in(1)->Opcode() == Op_LoadNKlass, "sanity"); 1901 klass_node->in(1)->init_req(0, ctrl); 1902 } else 1903 #endif 1904 klass_node->init_req(0, ctrl); 1905 } 1906 Node *proto_node = make_load(ctrl, mem, klass_node, Klass::prototype_header_offset_in_bytes() + sizeof(oopDesc), TypeX_X, TypeX_X->basic_type()); 1907 1908 Node* thread = transform_later(new (C, 1) ThreadLocalNode()); 1909 Node* cast_thread = transform_later(new (C, 2) CastP2XNode(ctrl, thread)); 1910 Node* o_node = transform_later(new (C, 3) OrXNode(cast_thread, proto_node)); 1911 Node* x_node = transform_later(new (C, 3) XorXNode(o_node, mark_node)); 1912 1913 // Get slow path - mark word does NOT match the value. 1914 Node* not_biased_ctrl = opt_bits_test(ctrl, region, 3, x_node, 1915 (~markOopDesc::age_mask_in_place), 0); 1916 // region->in(3) is set to fast path - the object is biased to the current thread. 1917 mem_phi->init_req(3, mem); 1918 1919 1920 // Mark word does NOT match the value (thread | Klass::_prototype_header). 1921 1922 1923 // First, check biased pattern. 1924 // Get fast path - _prototype_header has the same biased lock pattern. 1925 ctrl = opt_bits_test(not_biased_ctrl, fast_lock_region, 2, x_node, 1926 markOopDesc::biased_lock_mask_in_place, 0, true); 1927 1928 not_biased_ctrl = fast_lock_region->in(2); // Slow path 1929 // fast_lock_region->in(2) - the prototype header is no longer biased 1930 // and we have to revoke the bias on this object. 1931 // We are going to try to reset the mark of this object to the prototype 1932 // value and fall through to the CAS-based locking scheme. 1933 Node* adr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes()); 1934 Node* cas = new (C, 5) StoreXConditionalNode(not_biased_ctrl, mem, adr, 1935 proto_node, mark_node); 1936 transform_later(cas); 1937 Node* proj = transform_later( new (C, 1) SCMemProjNode(cas)); 1938 fast_lock_mem_phi->init_req(2, proj); 1939 1940 1941 // Second, check epoch bits. 1942 Node* rebiased_region = new (C, 3) RegionNode(3); 1943 Node* old_phi = new (C, 3) PhiNode( rebiased_region, TypeX_X); 1944 Node* new_phi = new (C, 3) PhiNode( rebiased_region, TypeX_X); 1945 1946 // Get slow path - mark word does NOT match epoch bits. 1947 Node* epoch_ctrl = opt_bits_test(ctrl, rebiased_region, 1, x_node, 1948 markOopDesc::epoch_mask_in_place, 0); 1949 // The epoch of the current bias is not valid, attempt to rebias the object 1950 // toward the current thread. 1951 rebiased_region->init_req(2, epoch_ctrl); 1952 old_phi->init_req(2, mark_node); 1953 new_phi->init_req(2, o_node); 1954 1955 // rebiased_region->in(1) is set to fast path. 1956 // The epoch of the current bias is still valid but we know 1957 // nothing about the owner; it might be set or it might be clear. 1958 Node* cmask = MakeConX(markOopDesc::biased_lock_mask_in_place | 1959 markOopDesc::age_mask_in_place | 1960 markOopDesc::epoch_mask_in_place); 1961 Node* old = transform_later(new (C, 3) AndXNode(mark_node, cmask)); 1962 cast_thread = transform_later(new (C, 2) CastP2XNode(ctrl, thread)); 1963 Node* new_mark = transform_later(new (C, 3) OrXNode(cast_thread, old)); 1964 old_phi->init_req(1, old); 1965 new_phi->init_req(1, new_mark); 1966 1967 transform_later(rebiased_region); 1968 transform_later(old_phi); 1969 transform_later(new_phi); 1970 1971 // Try to acquire the bias of the object using an atomic operation. 1972 // If this fails we will go in to the runtime to revoke the object's bias. 1973 cas = new (C, 5) StoreXConditionalNode(rebiased_region, mem, adr, 1974 new_phi, old_phi); 1975 transform_later(cas); 1976 proj = transform_later( new (C, 1) SCMemProjNode(cas)); 1977 1978 // Get slow path - Failed to CAS. 1979 not_biased_ctrl = opt_bits_test(rebiased_region, region, 4, cas, 0, 0); 1980 mem_phi->init_req(4, proj); 1981 // region->in(4) is set to fast path - the object is rebiased to the current thread. 1982 1983 // Failed to CAS. 1984 slow_path = new (C, 3) RegionNode(3); 1985 Node *slow_mem = new (C, 3) PhiNode( slow_path, Type::MEMORY, TypeRawPtr::BOTTOM); 1986 1987 slow_path->init_req(1, not_biased_ctrl); // Capture slow-control 1988 slow_mem->init_req(1, proj); 1989 1990 // Call CAS-based locking scheme (FastLock node). 1991 1992 transform_later(fast_lock_region); 1993 transform_later(fast_lock_mem_phi); 1994 1995 // Get slow path - FastLock failed to lock the object. 1996 ctrl = opt_bits_test(fast_lock_region, region, 2, flock, 0, 0); 1997 mem_phi->init_req(2, fast_lock_mem_phi); 1998 // region->in(2) is set to fast path - the object is locked to the current thread. 1999 2000 slow_path->init_req(2, ctrl); // Capture slow-control 2001 slow_mem->init_req(2, fast_lock_mem_phi); 2002 2003 transform_later(slow_path); 2004 transform_later(slow_mem); 2005 // Reset lock's memory edge. 2006 lock->set_req(TypeFunc::Memory, slow_mem); 2007 2008 } else { 2009 region = new (C, 3) RegionNode(3); 2010 // create a Phi for the memory state 2011 mem_phi = new (C, 3) PhiNode( region, Type::MEMORY, TypeRawPtr::BOTTOM); 2012 2013 // Optimize test; set region slot 2 2014 slow_path = opt_bits_test(ctrl, region, 2, flock, 0, 0); 2015 mem_phi->init_req(2, mem); 2016 } 2017 2018 // Make slow path call 2019 CallNode *call = make_slow_call( (CallNode *) lock, OptoRuntime::complete_monitor_enter_Type(), OptoRuntime::complete_monitor_locking_Java(), NULL, slow_path, obj, box ); 2020 2021 extract_call_projections(call); 2022 2023 // Slow path can only throw asynchronous exceptions, which are always 2024 // de-opted. So the compiler thinks the slow-call can never throw an 2025 // exception. If it DOES throw an exception we would need the debug 2026 // info removed first (since if it throws there is no monitor). 2027 assert ( _ioproj_fallthrough == NULL && _ioproj_catchall == NULL && 2028 _memproj_catchall == NULL && _catchallcatchproj == NULL, "Unexpected projection from Lock"); 2029 2030 // Capture slow path 2031 // disconnect fall-through projection from call and create a new one 2032 // hook up users of fall-through projection to region 2033 Node *slow_ctrl = _fallthroughproj->clone(); 2034 transform_later(slow_ctrl); 2035 _igvn.hash_delete(_fallthroughproj); 2036 _fallthroughproj->disconnect_inputs(NULL); 2037 region->init_req(1, slow_ctrl); 2038 // region inputs are now complete 2039 transform_later(region); 2040 _igvn.replace_node(_fallthroughproj, region); 2041 2042 Node *memproj = transform_later( new(C, 1) ProjNode(call, TypeFunc::Memory) ); 2043 mem_phi->init_req(1, memproj ); 2044 transform_later(mem_phi); 2045 _igvn.replace_node(_memproj_fallthrough, mem_phi); 2046 } 2047 2048 //------------------------------expand_unlock_node---------------------- 2049 void PhaseMacroExpand::expand_unlock_node(UnlockNode *unlock) { 2050 2051 Node* ctrl = unlock->in(TypeFunc::Control); 2052 Node* mem = unlock->in(TypeFunc::Memory); 2053 Node* obj = unlock->obj_node(); 2054 Node* box = unlock->box_node(); 2055 2056 // No need for a null check on unlock 2057 2058 // Make the merge point 2059 Node *region; 2060 Node *mem_phi; 2061 2062 if (UseOptoBiasInlining) { 2063 // Check for biased locking unlock case, which is a no-op. 2064 // See the full description in MacroAssembler::biased_locking_exit(). 2065 region = new (C, 4) RegionNode(4); 2066 // create a Phi for the memory state 2067 mem_phi = new (C, 4) PhiNode( region, Type::MEMORY, TypeRawPtr::BOTTOM); 2068 mem_phi->init_req(3, mem); 2069 2070 Node* mark_node = make_load(ctrl, mem, obj, oopDesc::mark_offset_in_bytes(), TypeX_X, TypeX_X->basic_type()); 2071 ctrl = opt_bits_test(ctrl, region, 3, mark_node, 2072 markOopDesc::biased_lock_mask_in_place, 2073 markOopDesc::biased_lock_pattern); 2074 } else { 2075 region = new (C, 3) RegionNode(3); 2076 // create a Phi for the memory state 2077 mem_phi = new (C, 3) PhiNode( region, Type::MEMORY, TypeRawPtr::BOTTOM); 2078 } 2079 2080 FastUnlockNode *funlock = new (C, 3) FastUnlockNode( ctrl, obj, box ); 2081 funlock = transform_later( funlock )->as_FastUnlock(); 2082 // Optimize test; set region slot 2 2083 Node *slow_path = opt_bits_test(ctrl, region, 2, funlock, 0, 0); 2084 2085 CallNode *call = make_slow_call( (CallNode *) unlock, OptoRuntime::complete_monitor_exit_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C), "complete_monitor_unlocking_C", slow_path, obj, box ); 2086 2087 extract_call_projections(call); 2088 2089 assert ( _ioproj_fallthrough == NULL && _ioproj_catchall == NULL && 2090 _memproj_catchall == NULL && _catchallcatchproj == NULL, "Unexpected projection from Lock"); 2091 2092 // No exceptions for unlocking 2093 // Capture slow path 2094 // disconnect fall-through projection from call and create a new one 2095 // hook up users of fall-through projection to region 2096 Node *slow_ctrl = _fallthroughproj->clone(); 2097 transform_later(slow_ctrl); 2098 _igvn.hash_delete(_fallthroughproj); 2099 _fallthroughproj->disconnect_inputs(NULL); 2100 region->init_req(1, slow_ctrl); 2101 // region inputs are now complete 2102 transform_later(region); 2103 _igvn.replace_node(_fallthroughproj, region); 2104 2105 Node *memproj = transform_later( new(C, 1) ProjNode(call, TypeFunc::Memory) ); 2106 mem_phi->init_req(1, memproj ); 2107 mem_phi->init_req(2, mem); 2108 transform_later(mem_phi); 2109 _igvn.replace_node(_memproj_fallthrough, mem_phi); 2110 } 2111 2112 //------------------------------expand_macro_nodes---------------------- 2113 // Returns true if a failure occurred. 2114 bool PhaseMacroExpand::expand_macro_nodes() { 2115 if (C->macro_count() == 0) 2116 return false; 2117 // First, attempt to eliminate locks 2118 bool progress = true; 2119 while (progress) { 2120 progress = false; 2121 for (int i = C->macro_count(); i > 0; i--) { 2122 Node * n = C->macro_node(i-1); 2123 bool success = false; 2124 debug_only(int old_macro_count = C->macro_count();); 2125 if (n->is_AbstractLock()) { 2126 success = eliminate_locking_node(n->as_AbstractLock()); 2127 } else if (n->Opcode() == Op_Opaque1 || n->Opcode() == Op_Opaque2) { 2128 _igvn.replace_node(n, n->in(1)); 2129 success = true; 2130 } 2131 assert(success == (C->macro_count() < old_macro_count), "elimination reduces macro count"); 2132 progress = progress || success; 2133 } 2134 } 2135 // Next, attempt to eliminate allocations 2136 progress = true; 2137 while (progress) { 2138 progress = false; 2139 for (int i = C->macro_count(); i > 0; i--) { 2140 Node * n = C->macro_node(i-1); 2141 bool success = false; 2142 debug_only(int old_macro_count = C->macro_count();); 2143 switch (n->class_id()) { 2144 case Node::Class_Allocate: 2145 case Node::Class_AllocateArray: 2146 success = eliminate_allocate_node(n->as_Allocate()); 2147 break; 2148 case Node::Class_Lock: 2149 case Node::Class_Unlock: 2150 assert(!n->as_AbstractLock()->is_eliminated(), "sanity"); 2151 break; 2152 default: 2153 assert(false, "unknown node type in macro list"); 2154 } 2155 assert(success == (C->macro_count() < old_macro_count), "elimination reduces macro count"); 2156 progress = progress || success; 2157 } 2158 } 2159 // Make sure expansion will not cause node limit to be exceeded. 2160 // Worst case is a macro node gets expanded into about 50 nodes. 2161 // Allow 50% more for optimization. 2162 if (C->check_node_count(C->macro_count() * 75, "out of nodes before macro expansion" ) ) 2163 return true; 2164 2165 // expand "macro" nodes 2166 // nodes are removed from the macro list as they are processed 2167 while (C->macro_count() > 0) { 2168 int macro_count = C->macro_count(); 2169 Node * n = C->macro_node(macro_count-1); 2170 assert(n->is_macro(), "only macro nodes expected here"); 2171 if (_igvn.type(n) == Type::TOP || n->in(0)->is_top() ) { 2172 // node is unreachable, so don't try to expand it 2173 C->remove_macro_node(n); 2174 continue; 2175 } 2176 switch (n->class_id()) { 2177 case Node::Class_Allocate: 2178 expand_allocate(n->as_Allocate()); 2179 break; 2180 case Node::Class_AllocateArray: 2181 expand_allocate_array(n->as_AllocateArray()); 2182 break; 2183 case Node::Class_Lock: 2184 expand_lock_node(n->as_Lock()); 2185 break; 2186 case Node::Class_Unlock: 2187 expand_unlock_node(n->as_Unlock()); 2188 break; 2189 default: 2190 assert(false, "unknown node type in macro list"); 2191 } 2192 assert(C->macro_count() < macro_count, "must have deleted a node from macro list"); 2193 if (C->failing()) return true; 2194 } 2195 2196 _igvn.set_delay_transform(false); 2197 _igvn.optimize(); 2198 return false; 2199 }