208 _ioproj_fallthrough = pn;
209 break;
210 case TypeFunc::Memory:
211 if (pn->_is_io_use)
212 _memproj_catchall = pn;
213 else
214 _memproj_fallthrough = pn;
215 break;
216 case TypeFunc::Parms:
217 _resproj = pn;
218 break;
219 default:
220 assert(false, "unexpected projection from allocation node.");
221 }
222 }
223
224 }
225
226 // Eliminate a card mark sequence. p2x is a ConvP2XNode
227 void PhaseMacroExpand::eliminate_card_mark(Node* p2x) {
228 assert(p2x->Opcode() == Op_CastP2X, "ConvP2XNode required");
229 if (!UseG1GC) {
230 // vanilla/CMS post barrier
231 Node *shift = p2x->unique_out();
232 Node *addp = shift->unique_out();
233 for (DUIterator_Last jmin, j = addp->last_outs(jmin); j >= jmin; --j) {
234 Node *mem = addp->last_out(j);
235 if (UseCondCardMark && mem->is_Load()) {
236 assert(mem->Opcode() == Op_LoadB, "unexpected code shape");
237 // The load is checking if the card has been written so
238 // replace it with zero to fold the test.
239 _igvn.replace_node(mem, intcon(0));
240 continue;
241 }
242 assert(mem->is_Store(), "store required");
243 _igvn.replace_node(mem, mem->in(MemNode::Memory));
244 }
245 } else {
246 // G1 pre/post barriers
247 assert(p2x->outcnt() <= 2, "expects 1 or 2 users: Xor and URShift nodes");
248 // It could be only one user, URShift node, in Object.clone() intrinsic
249 // but the new allocation is passed to arraycopy stub and it could not
250 // be scalar replaced. So we don't check the case.
251
252 // An other case of only one user (Xor) is when the value check for NULL
253 // in G1 post barrier is folded after CCP so the code which used URShift
254 // is removed.
255
256 // Take Region node before eliminating post barrier since it also
257 // eliminates CastP2X node when it has only one user.
258 Node* this_region = p2x->in(0);
259 assert(this_region != NULL, "");
260
261 // Remove G1 post barrier.
262
263 // Search for CastP2X->Xor->URShift->Cmp path which
264 // checks if the store done to a different from the value's region.
265 // And replace Cmp with #0 (false) to collapse G1 post barrier.
266 Node* xorx = p2x->find_out_with(Op_XorX);
267 if (xorx != NULL) {
268 Node* shift = xorx->unique_out();
269 Node* cmpx = shift->unique_out();
270 assert(cmpx->is_Cmp() && cmpx->unique_out()->is_Bool() &&
271 cmpx->unique_out()->as_Bool()->_test._test == BoolTest::ne,
272 "missing region check in G1 post barrier");
273 _igvn.replace_node(cmpx, makecon(TypeInt::CC_EQ));
274
275 // Remove G1 pre barrier.
276
277 // Search "if (marking != 0)" check and set it to "false".
278 // There is no G1 pre barrier if previous stored value is NULL
279 // (for example, after initialization).
280 if (this_region->is_Region() && this_region->req() == 3) {
281 int ind = 1;
282 if (!this_region->in(ind)->is_IfFalse()) {
283 ind = 2;
284 }
285 if (this_region->in(ind)->is_IfFalse()) {
286 Node* bol = this_region->in(ind)->in(0)->in(1);
287 assert(bol->is_Bool(), "");
288 cmpx = bol->in(1);
289 if (bol->as_Bool()->_test._test == BoolTest::ne &&
290 cmpx->is_Cmp() && cmpx->in(2) == intcon(0) &&
291 cmpx->in(1)->is_Load()) {
292 Node* adr = cmpx->in(1)->as_Load()->in(MemNode::Address);
293 const int marking_offset = in_bytes(JavaThread::satb_mark_queue_offset() +
294 SATBMarkQueue::byte_offset_of_active());
295 if (adr->is_AddP() && adr->in(AddPNode::Base) == top() &&
296 adr->in(AddPNode::Address)->Opcode() == Op_ThreadLocal &&
297 adr->in(AddPNode::Offset) == MakeConX(marking_offset)) {
298 _igvn.replace_node(cmpx, makecon(TypeInt::CC_EQ));
299 }
300 }
301 }
302 }
303 } else {
304 assert(!GraphKit::use_ReduceInitialCardMarks(), "can only happen with card marking");
305 // This is a G1 post barrier emitted by the Object.clone() intrinsic.
306 // Search for the CastP2X->URShiftX->AddP->LoadB->Cmp path which checks if the card
307 // is marked as young_gen and replace the Cmp with 0 (false) to collapse the barrier.
308 Node* shift = p2x->find_out_with(Op_URShiftX);
309 assert(shift != NULL, "missing G1 post barrier");
310 Node* addp = shift->unique_out();
311 Node* load = addp->find_out_with(Op_LoadB);
312 assert(load != NULL, "missing G1 post barrier");
313 Node* cmpx = load->unique_out();
314 assert(cmpx->is_Cmp() && cmpx->unique_out()->is_Bool() &&
315 cmpx->unique_out()->as_Bool()->_test._test == BoolTest::ne,
316 "missing card value check in G1 post barrier");
317 _igvn.replace_node(cmpx, makecon(TypeInt::CC_EQ));
318 // There is no G1 pre barrier in this case
319 }
320 // Now CastP2X can be removed since it is used only on dead path
321 // which currently still alive until igvn optimize it.
322 assert(p2x->outcnt() == 0 || p2x->unique_out()->Opcode() == Op_URShiftX, "");
323 _igvn.replace_node(p2x, top());
324 }
325 }
326
327 // Search for a memory operation for the specified memory slice.
328 static Node *scan_mem_chain(Node *mem, int alias_idx, int offset, Node *start_mem, Node *alloc, PhaseGVN *phase) {
329 Node *orig_mem = mem;
330 Node *alloc_mem = alloc->in(TypeFunc::Memory);
331 const TypeOopPtr *tinst = phase->C->get_adr_type(alias_idx)->isa_oopptr();
332 while (true) {
333 if (mem == alloc_mem || mem == start_mem ) {
334 return mem; // hit one of our sentinels
335 } else if (mem->is_MergeMem()) {
336 mem = mem->as_MergeMem()->memory_at(alias_idx);
337 } else if (mem->is_Proj() && mem->as_Proj()->_con == TypeFunc::Memory) {
338 Node *in = mem->in(0);
339 // we can safely skip over safepoints, calls, locks and membars because we
340 // already know that the object is safe to eliminate.
341 if (in->is_Initialize() && in->as_Initialize()->allocation() == alloc) {
342 return in;
372 return mem;
373 } else {
374 assert(adr_idx == Compile::AliasIdxRaw, "address must match or be raw");
375 }
376 mem = mem->in(MemNode::Memory);
377 } else if (mem->is_ClearArray()) {
378 if (!ClearArrayNode::step_through(&mem, alloc->_idx, phase)) {
379 // Can not bypass initialization of the instance
380 // we are looking.
381 debug_only(intptr_t offset;)
382 assert(alloc == AllocateNode::Ideal_allocation(mem->in(3), phase, offset), "sanity");
383 InitializeNode* init = alloc->as_Allocate()->initialization();
384 // We are looking for stored value, return Initialize node
385 // or memory edge from Allocate node.
386 if (init != NULL)
387 return init;
388 else
389 return alloc->in(TypeFunc::Memory); // It will produce zero value (see callers).
390 }
391 // Otherwise skip it (the call updated 'mem' value).
392 } else if (mem->Opcode() == Op_SCMemProj) {
393 mem = mem->in(0);
394 Node* adr = NULL;
395 if (mem->is_LoadStore()) {
396 adr = mem->in(MemNode::Address);
397 } else {
398 assert(mem->Opcode() == Op_EncodeISOArray ||
399 mem->Opcode() == Op_StrCompressedCopy, "sanity");
400 adr = mem->in(3); // Destination array
401 }
402 const TypePtr* atype = adr->bottom_type()->is_ptr();
403 int adr_idx = phase->C->get_alias_index(atype);
404 if (adr_idx == alias_idx) {
405 DEBUG_ONLY(mem->dump();)
406 assert(false, "Object is not scalar replaceable if a LoadStore node accesses its field");
407 return NULL;
408 }
409 mem = mem->in(MemNode::Memory);
410 } else if (mem->Opcode() == Op_StrInflatedCopy) {
411 Node* adr = mem->in(3); // Destination array
412 const TypePtr* atype = adr->bottom_type()->is_ptr();
413 int adr_idx = phase->C->get_alias_index(atype);
414 if (adr_idx == alias_idx) {
415 DEBUG_ONLY(mem->dump();)
416 assert(false, "Object is not scalar replaceable if a StrInflatedCopy node accesses its field");
417 return NULL;
418 }
419 mem = mem->in(MemNode::Memory);
420 } else {
421 return mem;
422 }
423 assert(mem != orig_mem, "dead memory loop");
424 }
425 }
426
427 // Generate loads from source of the arraycopy for fields of
428 // destination needed at a deoptimization point
429 Node* PhaseMacroExpand::make_arraycopy_load(ArrayCopyNode* ac, intptr_t offset, Node* ctl, BasicType ft, const Type *ftype, AllocateNode *alloc) {
430 BasicType bt = ft;
526 continue;
527 }
528 if (val->is_Initialize()) {
529 val = val->as_Initialize()->find_captured_store(offset, type2aelembytes(ft), &_igvn);
530 }
531 if (val == NULL) {
532 return NULL; // can't find a value on this path
533 }
534 if (val == mem) {
535 values.at_put(j, mem);
536 } else if (val->is_Store()) {
537 values.at_put(j, val->in(MemNode::ValueIn));
538 } else if(val->is_Proj() && val->in(0) == alloc) {
539 values.at_put(j, _igvn.zerocon(ft));
540 } else if (val->is_Phi()) {
541 val = value_from_mem_phi(val, ft, phi_type, adr_t, alloc, value_phis, level-1);
542 if (val == NULL) {
543 return NULL;
544 }
545 values.at_put(j, val);
546 } else if (val->Opcode() == Op_SCMemProj) {
547 assert(val->in(0)->is_LoadStore() ||
548 val->in(0)->Opcode() == Op_EncodeISOArray ||
549 val->in(0)->Opcode() == Op_StrCompressedCopy, "sanity");
550 assert(false, "Object is not scalar replaceable if a LoadStore node accesses its field");
551 return NULL;
552 } else if (val->is_ArrayCopy()) {
553 Node* res = make_arraycopy_load(val->as_ArrayCopy(), offset, val->in(0), ft, phi_type, alloc);
554 if (res == NULL) {
555 return NULL;
556 }
557 values.at_put(j, res);
558 } else {
559 #ifdef ASSERT
560 val->dump();
561 assert(false, "unknown node on this path");
562 #endif
563 return NULL; // unknown node on this path
564 }
565 }
566 }
567 // Set Phi's inputs
568 for (uint j = 1; j < length; j++) {
569 if (values.at(j) == mem) {
703 }
704 }
705
706 if (can_eliminate && res != NULL) {
707 for (DUIterator_Fast jmax, j = res->fast_outs(jmax);
708 j < jmax && can_eliminate; j++) {
709 Node* use = res->fast_out(j);
710
711 if (use->is_AddP()) {
712 const TypePtr* addp_type = _igvn.type(use)->is_ptr();
713 int offset = addp_type->offset();
714
715 if (offset == Type::OffsetTop || offset == Type::OffsetBot) {
716 NOT_PRODUCT(fail_eliminate = "Undefined field referrence";)
717 can_eliminate = false;
718 break;
719 }
720 for (DUIterator_Fast kmax, k = use->fast_outs(kmax);
721 k < kmax && can_eliminate; k++) {
722 Node* n = use->fast_out(k);
723 if (!n->is_Store() && n->Opcode() != Op_CastP2X &&
724 !(n->is_ArrayCopy() &&
725 n->as_ArrayCopy()->is_clonebasic() &&
726 n->in(ArrayCopyNode::Dest) == use)) {
727 DEBUG_ONLY(disq_node = n;)
728 if (n->is_Load() || n->is_LoadStore()) {
729 NOT_PRODUCT(fail_eliminate = "Field load";)
730 } else {
731 NOT_PRODUCT(fail_eliminate = "Not store field referrence";)
732 }
733 can_eliminate = false;
734 }
735 }
736 } else if (use->is_ArrayCopy() &&
737 (use->as_ArrayCopy()->is_arraycopy_validated() ||
738 use->as_ArrayCopy()->is_copyof_validated() ||
739 use->as_ArrayCopy()->is_copyofrange_validated()) &&
740 use->in(ArrayCopyNode::Dest) == res) {
741 // ok to eliminate
742 } else if (use->is_SafePoint()) {
743 SafePointNode* sfpt = use->as_SafePoint();
744 if (sfpt->is_Call() && sfpt->as_Call()->has_non_debug_use(res)) {
745 // Object is passed as argument.
746 DEBUG_ONLY(disq_node = use;)
747 NOT_PRODUCT(fail_eliminate = "Object is passed as argument";)
748 can_eliminate = false;
749 }
750 Node* sfptMem = sfpt->memory();
751 if (sfptMem == NULL || sfptMem->is_top()) {
752 DEBUG_ONLY(disq_node = use;)
753 NOT_PRODUCT(fail_eliminate = "NULL or TOP memory";)
754 can_eliminate = false;
755 } else {
756 safepoints.append_if_missing(sfpt);
757 }
758 } else if (use->Opcode() != Op_CastP2X) { // CastP2X is used by card mark
759 if (use->is_Phi()) {
760 if (use->outcnt() == 1 && use->unique_out()->Opcode() == Op_Return) {
761 NOT_PRODUCT(fail_eliminate = "Object is return value";)
762 } else {
763 NOT_PRODUCT(fail_eliminate = "Object is referenced by Phi";)
764 }
765 DEBUG_ONLY(disq_node = use;)
766 } else {
767 if (use->Opcode() == Op_Return) {
768 NOT_PRODUCT(fail_eliminate = "Object is return value";)
769 }else {
770 NOT_PRODUCT(fail_eliminate = "Object is referenced by node";)
771 }
772 DEBUG_ONLY(disq_node = use;)
773 }
774 can_eliminate = false;
775 }
776 }
777 }
778
779 #ifndef PRODUCT
780 if (PrintEliminateAllocations) {
781 if (can_eliminate) {
782 tty->print("Scalar ");
783 if (res == NULL)
784 alloc->dump();
785 else
786 res->dump();
787 } else if (alloc->_is_scalar_replaceable) {
1540 // MemBarStoreStore was already added. If the object does not
1541 // escape no need for a MemBarStoreStore. If the object does not
1542 // escape in its initializer and memory barrier (MemBarStoreStore or
1543 // stronger) is already added at exit of initializer, also no need
1544 // for a MemBarStoreStore. Otherwise we need a MemBarStoreStore
1545 // so that stores that initialize this object can't be reordered
1546 // with a subsequent store that makes this object accessible by
1547 // other threads.
1548 // Other threads include java threads and JVM internal threads
1549 // (for example concurrent GC threads). Current concurrent GC
1550 // implementation: CMS and G1 will not scan newly created object,
1551 // so it's safe to skip storestore barrier when allocation does
1552 // not escape.
1553 if (!alloc->does_not_escape_thread() &&
1554 !alloc->is_allocation_MemBar_redundant() &&
1555 (init == NULL || !init->is_complete_with_arraycopy())) {
1556 if (init == NULL || init->req() < InitializeNode::RawStores) {
1557 // No InitializeNode or no stores captured by zeroing
1558 // elimination. Simply add the MemBarStoreStore after object
1559 // initialization.
1560 MemBarNode* mb = MemBarNode::make(C, Op_MemBarStoreStore, Compile::AliasIdxBot);
1561 transform_later(mb);
1562
1563 mb->init_req(TypeFunc::Memory, fast_oop_rawmem);
1564 mb->init_req(TypeFunc::Control, fast_oop_ctrl);
1565 fast_oop_ctrl = new ProjNode(mb,TypeFunc::Control);
1566 transform_later(fast_oop_ctrl);
1567 fast_oop_rawmem = new ProjNode(mb,TypeFunc::Memory);
1568 transform_later(fast_oop_rawmem);
1569 } else {
1570 // Add the MemBarStoreStore after the InitializeNode so that
1571 // all stores performing the initialization that were moved
1572 // before the InitializeNode happen before the storestore
1573 // barrier.
1574
1575 Node* init_ctrl = init->proj_out(TypeFunc::Control);
1576 Node* init_mem = init->proj_out(TypeFunc::Memory);
1577
1578 MemBarNode* mb = MemBarNode::make(C, Op_MemBarStoreStore, Compile::AliasIdxBot);
1579 transform_later(mb);
1580
1581 Node* ctrl = new ProjNode(init,TypeFunc::Control);
1582 transform_later(ctrl);
1583 Node* mem = new ProjNode(init,TypeFunc::Memory);
1584 transform_later(mem);
1585
1586 // The MemBarStoreStore depends on control and memory coming
1587 // from the InitializeNode
1588 mb->init_req(TypeFunc::Memory, mem);
1589 mb->init_req(TypeFunc::Control, ctrl);
1590
1591 ctrl = new ProjNode(mb,TypeFunc::Control);
1592 transform_later(ctrl);
1593 mem = new ProjNode(mb,TypeFunc::Memory);
1594 transform_later(mem);
1595
1596 // All nodes that depended on the InitializeNode for control
1597 // and memory must now depend on the MemBarNode that itself
1598 // depends on the InitializeNode
2239 Node* mem = alock->in(TypeFunc::Memory);
2240 Node* ctrl = alock->in(TypeFunc::Control);
2241
2242 extract_call_projections(alock);
2243 // There are 2 projections from the lock. The lock node will
2244 // be deleted when its last use is subsumed below.
2245 assert(alock->outcnt() == 2 &&
2246 _fallthroughproj != NULL &&
2247 _memproj_fallthrough != NULL,
2248 "Unexpected projections from Lock/Unlock");
2249
2250 Node* fallthroughproj = _fallthroughproj;
2251 Node* memproj_fallthrough = _memproj_fallthrough;
2252
2253 // The memory projection from a lock/unlock is RawMem
2254 // The input to a Lock is merged memory, so extract its RawMem input
2255 // (unless the MergeMem has been optimized away.)
2256 if (alock->is_Lock()) {
2257 // Seach for MemBarAcquireLock node and delete it also.
2258 MemBarNode* membar = fallthroughproj->unique_ctrl_out()->as_MemBar();
2259 assert(membar != NULL && membar->Opcode() == Op_MemBarAcquireLock, "");
2260 Node* ctrlproj = membar->proj_out(TypeFunc::Control);
2261 Node* memproj = membar->proj_out(TypeFunc::Memory);
2262 _igvn.replace_node(ctrlproj, fallthroughproj);
2263 _igvn.replace_node(memproj, memproj_fallthrough);
2264
2265 // Delete FastLock node also if this Lock node is unique user
2266 // (a loop peeling may clone a Lock node).
2267 Node* flock = alock->as_Lock()->fastlock_node();
2268 if (flock->outcnt() == 1) {
2269 assert(flock->unique_out() == alock, "sanity");
2270 _igvn.replace_node(flock, top());
2271 }
2272 }
2273
2274 // Seach for MemBarReleaseLock node and delete it also.
2275 if (alock->is_Unlock() && ctrl != NULL && ctrl->is_Proj() &&
2276 ctrl->in(0)->is_MemBar()) {
2277 MemBarNode* membar = ctrl->in(0)->as_MemBar();
2278 assert(membar->Opcode() == Op_MemBarReleaseLock &&
2279 mem->is_Proj() && membar == mem->in(0), "");
2280 _igvn.replace_node(fallthroughproj, ctrl);
2281 _igvn.replace_node(memproj_fallthrough, mem);
2282 fallthroughproj = ctrl;
2283 memproj_fallthrough = mem;
2284 ctrl = membar->in(TypeFunc::Control);
2285 mem = membar->in(TypeFunc::Memory);
2286 }
2287
2288 _igvn.replace_node(fallthroughproj, ctrl);
2289 _igvn.replace_node(memproj_fallthrough, mem);
2290 return true;
2291 }
2292
2293
2294 //------------------------------expand_lock_node----------------------
2295 void PhaseMacroExpand::expand_lock_node(LockNode *lock) {
2296
2297 Node* ctrl = lock->in(TypeFunc::Control);
2298 Node* mem = lock->in(TypeFunc::Memory);
2363 // First, check mark word for the biased lock pattern.
2364 Node* mark_node = make_load(ctrl, mem, obj, oopDesc::mark_offset_in_bytes(), TypeX_X, TypeX_X->basic_type());
2365
2366 // Get fast path - mark word has the biased lock pattern.
2367 ctrl = opt_bits_test(ctrl, fast_lock_region, 1, mark_node,
2368 markOopDesc::biased_lock_mask_in_place,
2369 markOopDesc::biased_lock_pattern, true);
2370 // fast_lock_region->in(1) is set to slow path.
2371 fast_lock_mem_phi->init_req(1, mem);
2372
2373 // Now check that the lock is biased to the current thread and has
2374 // the same epoch and bias as Klass::_prototype_header.
2375
2376 // Special-case a fresh allocation to avoid building nodes:
2377 Node* klass_node = AllocateNode::Ideal_klass(obj, &_igvn);
2378 if (klass_node == NULL) {
2379 Node* k_adr = basic_plus_adr(obj, oopDesc::klass_offset_in_bytes());
2380 klass_node = transform_later(LoadKlassNode::make(_igvn, NULL, mem, k_adr, _igvn.type(k_adr)->is_ptr()));
2381 #ifdef _LP64
2382 if (UseCompressedClassPointers && klass_node->is_DecodeNKlass()) {
2383 assert(klass_node->in(1)->Opcode() == Op_LoadNKlass, "sanity");
2384 klass_node->in(1)->init_req(0, ctrl);
2385 } else
2386 #endif
2387 klass_node->init_req(0, ctrl);
2388 }
2389 Node *proto_node = make_load(ctrl, mem, klass_node, in_bytes(Klass::prototype_header_offset()), TypeX_X, TypeX_X->basic_type());
2390
2391 Node* thread = transform_later(new ThreadLocalNode());
2392 Node* cast_thread = transform_later(new CastP2XNode(ctrl, thread));
2393 Node* o_node = transform_later(new OrXNode(cast_thread, proto_node));
2394 Node* x_node = transform_later(new XorXNode(o_node, mark_node));
2395
2396 // Get slow path - mark word does NOT match the value.
2397 Node* not_biased_ctrl = opt_bits_test(ctrl, region, 3, x_node,
2398 (~markOopDesc::age_mask_in_place), 0);
2399 // region->in(3) is set to fast path - the object is biased to the current thread.
2400 mem_phi->init_req(3, mem);
2401
2402
2403 // Mark word does NOT match the value (thread | Klass::_prototype_header).
2636 for (int i = C->macro_count(); i > 0; i--) {
2637 Node * n = C->macro_node(i-1);
2638 bool success = false;
2639 debug_only(int old_macro_count = C->macro_count(););
2640 switch (n->class_id()) {
2641 case Node::Class_Allocate:
2642 case Node::Class_AllocateArray:
2643 success = eliminate_allocate_node(n->as_Allocate());
2644 break;
2645 case Node::Class_CallStaticJava:
2646 success = eliminate_boxing_node(n->as_CallStaticJava());
2647 break;
2648 case Node::Class_Lock:
2649 case Node::Class_Unlock:
2650 assert(!n->as_AbstractLock()->is_eliminated(), "sanity");
2651 _has_locks = true;
2652 break;
2653 case Node::Class_ArrayCopy:
2654 break;
2655 default:
2656 assert(n->Opcode() == Op_LoopLimit ||
2657 n->Opcode() == Op_Opaque1 ||
2658 n->Opcode() == Op_Opaque2 ||
2659 n->Opcode() == Op_Opaque3, "unknown node type in macro list");
2660 }
2661 assert(success == (C->macro_count() < old_macro_count), "elimination reduces macro count");
2662 progress = progress || success;
2663 }
2664 }
2665 }
2666
2667 //------------------------------expand_macro_nodes----------------------
2668 // Returns true if a failure occurred.
2669 bool PhaseMacroExpand::expand_macro_nodes() {
2670 // Last attempt to eliminate macro nodes.
2671 eliminate_macro_nodes();
2672
2673 // Make sure expansion will not cause node limit to be exceeded.
2674 // Worst case is a macro node gets expanded into about 200 nodes.
2675 // Allow 50% more for optimization.
2676 if (C->check_node_count(C->macro_count() * 300, "out of nodes before macro expansion" ) )
2677 return true;
2678
2679 // Eliminate Opaque and LoopLimit nodes. Do it after all loop optimizations.
2680 bool progress = true;
2681 while (progress) {
2682 progress = false;
2683 for (int i = C->macro_count(); i > 0; i--) {
2684 Node * n = C->macro_node(i-1);
2685 bool success = false;
2686 debug_only(int old_macro_count = C->macro_count(););
2687 if (n->Opcode() == Op_LoopLimit) {
2688 // Remove it from macro list and put on IGVN worklist to optimize.
2689 C->remove_macro_node(n);
2690 _igvn._worklist.push(n);
2691 success = true;
2692 } else if (n->Opcode() == Op_CallStaticJava) {
2693 // Remove it from macro list and put on IGVN worklist to optimize.
2694 C->remove_macro_node(n);
2695 _igvn._worklist.push(n);
2696 success = true;
2697 } else if (n->Opcode() == Op_Opaque1 || n->Opcode() == Op_Opaque2) {
2698 _igvn.replace_node(n, n->in(1));
2699 success = true;
2700 #if INCLUDE_RTM_OPT
2701 } else if ((n->Opcode() == Op_Opaque3) && ((Opaque3Node*)n)->rtm_opt()) {
2702 assert(C->profile_rtm(), "should be used only in rtm deoptimization code");
2703 assert((n->outcnt() == 1) && n->unique_out()->is_Cmp(), "");
2704 Node* cmp = n->unique_out();
2705 #ifdef ASSERT
2706 // Validate graph.
2707 assert((cmp->outcnt() == 1) && cmp->unique_out()->is_Bool(), "");
2708 BoolNode* bol = cmp->unique_out()->as_Bool();
2709 assert((bol->outcnt() == 1) && bol->unique_out()->is_If() &&
2710 (bol->_test._test == BoolTest::ne), "");
2711 IfNode* ifn = bol->unique_out()->as_If();
2712 assert((ifn->outcnt() == 2) &&
2713 ifn->proj_out(1)->is_uncommon_trap_proj(Deoptimization::Reason_rtm_state_change) != NULL, "");
2714 #endif
2715 Node* repl = n->in(1);
2716 if (!_has_locks) {
2717 // Remove RTM state check if there are no locks in the code.
2718 // Replace input to compare the same value.
2719 repl = (cmp->in(1) == n) ? cmp->in(2) : cmp->in(1);
2720 }
2721 _igvn.replace_node(n, repl);
|
208 _ioproj_fallthrough = pn;
209 break;
210 case TypeFunc::Memory:
211 if (pn->_is_io_use)
212 _memproj_catchall = pn;
213 else
214 _memproj_fallthrough = pn;
215 break;
216 case TypeFunc::Parms:
217 _resproj = pn;
218 break;
219 default:
220 assert(false, "unexpected projection from allocation node.");
221 }
222 }
223
224 }
225
226 // Eliminate a card mark sequence. p2x is a ConvP2XNode
227 void PhaseMacroExpand::eliminate_card_mark(Node* p2x) {
228 assert(p2x->Opcode() == Opcodes::Op_CastP2X, "ConvP2XNode required");
229 if (!UseG1GC) {
230 // vanilla/CMS post barrier
231 Node *shift = p2x->unique_out();
232 Node *addp = shift->unique_out();
233 for (DUIterator_Last jmin, j = addp->last_outs(jmin); j >= jmin; --j) {
234 Node *mem = addp->last_out(j);
235 if (UseCondCardMark && mem->is_Load()) {
236 assert(mem->Opcode() == Opcodes::Op_LoadB, "unexpected code shape");
237 // The load is checking if the card has been written so
238 // replace it with zero to fold the test.
239 _igvn.replace_node(mem, intcon(0));
240 continue;
241 }
242 assert(mem->is_Store(), "store required");
243 _igvn.replace_node(mem, mem->in(MemNode::Memory));
244 }
245 } else {
246 // G1 pre/post barriers
247 assert(p2x->outcnt() <= 2, "expects 1 or 2 users: Xor and URShift nodes");
248 // It could be only one user, URShift node, in Object.clone() intrinsic
249 // but the new allocation is passed to arraycopy stub and it could not
250 // be scalar replaced. So we don't check the case.
251
252 // An other case of only one user (Xor) is when the value check for NULL
253 // in G1 post barrier is folded after CCP so the code which used URShift
254 // is removed.
255
256 // Take Region node before eliminating post barrier since it also
257 // eliminates CastP2X node when it has only one user.
258 Node* this_region = p2x->in(0);
259 assert(this_region != NULL, "");
260
261 // Remove G1 post barrier.
262
263 // Search for CastP2X->Xor->URShift->Cmp path which
264 // checks if the store done to a different from the value's region.
265 // And replace Cmp with #0 (false) to collapse G1 post barrier.
266 Node* xorx = p2x->find_out_with(Opcodes::Op_XorX);
267 if (xorx != NULL) {
268 Node* shift = xorx->unique_out();
269 Node* cmpx = shift->unique_out();
270 assert(cmpx->is_Cmp() && cmpx->unique_out()->is_Bool() &&
271 cmpx->unique_out()->as_Bool()->_test._test == BoolTest::ne,
272 "missing region check in G1 post barrier");
273 _igvn.replace_node(cmpx, makecon(TypeInt::CC_EQ));
274
275 // Remove G1 pre barrier.
276
277 // Search "if (marking != 0)" check and set it to "false".
278 // There is no G1 pre barrier if previous stored value is NULL
279 // (for example, after initialization).
280 if (this_region->is_Region() && this_region->req() == 3) {
281 int ind = 1;
282 if (!this_region->in(ind)->is_IfFalse()) {
283 ind = 2;
284 }
285 if (this_region->in(ind)->is_IfFalse()) {
286 Node* bol = this_region->in(ind)->in(0)->in(1);
287 assert(bol->is_Bool(), "");
288 cmpx = bol->in(1);
289 if (bol->as_Bool()->_test._test == BoolTest::ne &&
290 cmpx->is_Cmp() && cmpx->in(2) == intcon(0) &&
291 cmpx->in(1)->is_Load()) {
292 Node* adr = cmpx->in(1)->as_Load()->in(MemNode::Address);
293 const int marking_offset = in_bytes(JavaThread::satb_mark_queue_offset() +
294 SATBMarkQueue::byte_offset_of_active());
295 if (adr->is_AddP() && adr->in(AddPNode::Base) == top() &&
296 adr->in(AddPNode::Address)->Opcode() == Opcodes::Op_ThreadLocal &&
297 adr->in(AddPNode::Offset) == MakeConX(marking_offset)) {
298 _igvn.replace_node(cmpx, makecon(TypeInt::CC_EQ));
299 }
300 }
301 }
302 }
303 } else {
304 assert(!GraphKit::use_ReduceInitialCardMarks(), "can only happen with card marking");
305 // This is a G1 post barrier emitted by the Object.clone() intrinsic.
306 // Search for the CastP2X->URShiftX->AddP->LoadB->Cmp path which checks if the card
307 // is marked as young_gen and replace the Cmp with 0 (false) to collapse the barrier.
308 Node* shift = p2x->find_out_with(Opcodes::Op_URShiftX);
309 assert(shift != NULL, "missing G1 post barrier");
310 Node* addp = shift->unique_out();
311 Node* load = addp->find_out_with(Opcodes::Op_LoadB);
312 assert(load != NULL, "missing G1 post barrier");
313 Node* cmpx = load->unique_out();
314 assert(cmpx->is_Cmp() && cmpx->unique_out()->is_Bool() &&
315 cmpx->unique_out()->as_Bool()->_test._test == BoolTest::ne,
316 "missing card value check in G1 post barrier");
317 _igvn.replace_node(cmpx, makecon(TypeInt::CC_EQ));
318 // There is no G1 pre barrier in this case
319 }
320 // Now CastP2X can be removed since it is used only on dead path
321 // which currently still alive until igvn optimize it.
322 assert(p2x->outcnt() == 0 || p2x->unique_out()->Opcode() == Opcodes::Op_URShiftX, "");
323 _igvn.replace_node(p2x, top());
324 }
325 }
326
327 // Search for a memory operation for the specified memory slice.
328 static Node *scan_mem_chain(Node *mem, int alias_idx, int offset, Node *start_mem, Node *alloc, PhaseGVN *phase) {
329 Node *orig_mem = mem;
330 Node *alloc_mem = alloc->in(TypeFunc::Memory);
331 const TypeOopPtr *tinst = phase->C->get_adr_type(alias_idx)->isa_oopptr();
332 while (true) {
333 if (mem == alloc_mem || mem == start_mem ) {
334 return mem; // hit one of our sentinels
335 } else if (mem->is_MergeMem()) {
336 mem = mem->as_MergeMem()->memory_at(alias_idx);
337 } else if (mem->is_Proj() && mem->as_Proj()->_con == TypeFunc::Memory) {
338 Node *in = mem->in(0);
339 // we can safely skip over safepoints, calls, locks and membars because we
340 // already know that the object is safe to eliminate.
341 if (in->is_Initialize() && in->as_Initialize()->allocation() == alloc) {
342 return in;
372 return mem;
373 } else {
374 assert(adr_idx == Compile::AliasIdxRaw, "address must match or be raw");
375 }
376 mem = mem->in(MemNode::Memory);
377 } else if (mem->is_ClearArray()) {
378 if (!ClearArrayNode::step_through(&mem, alloc->_idx, phase)) {
379 // Can not bypass initialization of the instance
380 // we are looking.
381 debug_only(intptr_t offset;)
382 assert(alloc == AllocateNode::Ideal_allocation(mem->in(3), phase, offset), "sanity");
383 InitializeNode* init = alloc->as_Allocate()->initialization();
384 // We are looking for stored value, return Initialize node
385 // or memory edge from Allocate node.
386 if (init != NULL)
387 return init;
388 else
389 return alloc->in(TypeFunc::Memory); // It will produce zero value (see callers).
390 }
391 // Otherwise skip it (the call updated 'mem' value).
392 } else if (mem->Opcode() == Opcodes::Op_SCMemProj) {
393 mem = mem->in(0);
394 Node* adr = NULL;
395 if (mem->is_LoadStore()) {
396 adr = mem->in(MemNode::Address);
397 } else {
398 assert(mem->Opcode() == Opcodes::Op_EncodeISOArray ||
399 mem->Opcode() == Opcodes::Op_StrCompressedCopy, "sanity");
400 adr = mem->in(3); // Destination array
401 }
402 const TypePtr* atype = adr->bottom_type()->is_ptr();
403 int adr_idx = phase->C->get_alias_index(atype);
404 if (adr_idx == alias_idx) {
405 DEBUG_ONLY(mem->dump();)
406 assert(false, "Object is not scalar replaceable if a LoadStore node accesses its field");
407 return NULL;
408 }
409 mem = mem->in(MemNode::Memory);
410 } else if (mem->Opcode() == Opcodes::Op_StrInflatedCopy) {
411 Node* adr = mem->in(3); // Destination array
412 const TypePtr* atype = adr->bottom_type()->is_ptr();
413 int adr_idx = phase->C->get_alias_index(atype);
414 if (adr_idx == alias_idx) {
415 DEBUG_ONLY(mem->dump();)
416 assert(false, "Object is not scalar replaceable if a StrInflatedCopy node accesses its field");
417 return NULL;
418 }
419 mem = mem->in(MemNode::Memory);
420 } else {
421 return mem;
422 }
423 assert(mem != orig_mem, "dead memory loop");
424 }
425 }
426
427 // Generate loads from source of the arraycopy for fields of
428 // destination needed at a deoptimization point
429 Node* PhaseMacroExpand::make_arraycopy_load(ArrayCopyNode* ac, intptr_t offset, Node* ctl, BasicType ft, const Type *ftype, AllocateNode *alloc) {
430 BasicType bt = ft;
526 continue;
527 }
528 if (val->is_Initialize()) {
529 val = val->as_Initialize()->find_captured_store(offset, type2aelembytes(ft), &_igvn);
530 }
531 if (val == NULL) {
532 return NULL; // can't find a value on this path
533 }
534 if (val == mem) {
535 values.at_put(j, mem);
536 } else if (val->is_Store()) {
537 values.at_put(j, val->in(MemNode::ValueIn));
538 } else if(val->is_Proj() && val->in(0) == alloc) {
539 values.at_put(j, _igvn.zerocon(ft));
540 } else if (val->is_Phi()) {
541 val = value_from_mem_phi(val, ft, phi_type, adr_t, alloc, value_phis, level-1);
542 if (val == NULL) {
543 return NULL;
544 }
545 values.at_put(j, val);
546 } else if (val->Opcode() == Opcodes::Op_SCMemProj) {
547 assert(val->in(0)->is_LoadStore() ||
548 val->in(0)->Opcode() == Opcodes::Op_EncodeISOArray ||
549 val->in(0)->Opcode() == Opcodes::Op_StrCompressedCopy, "sanity");
550 assert(false, "Object is not scalar replaceable if a LoadStore node accesses its field");
551 return NULL;
552 } else if (val->is_ArrayCopy()) {
553 Node* res = make_arraycopy_load(val->as_ArrayCopy(), offset, val->in(0), ft, phi_type, alloc);
554 if (res == NULL) {
555 return NULL;
556 }
557 values.at_put(j, res);
558 } else {
559 #ifdef ASSERT
560 val->dump();
561 assert(false, "unknown node on this path");
562 #endif
563 return NULL; // unknown node on this path
564 }
565 }
566 }
567 // Set Phi's inputs
568 for (uint j = 1; j < length; j++) {
569 if (values.at(j) == mem) {
703 }
704 }
705
706 if (can_eliminate && res != NULL) {
707 for (DUIterator_Fast jmax, j = res->fast_outs(jmax);
708 j < jmax && can_eliminate; j++) {
709 Node* use = res->fast_out(j);
710
711 if (use->is_AddP()) {
712 const TypePtr* addp_type = _igvn.type(use)->is_ptr();
713 int offset = addp_type->offset();
714
715 if (offset == Type::OffsetTop || offset == Type::OffsetBot) {
716 NOT_PRODUCT(fail_eliminate = "Undefined field referrence";)
717 can_eliminate = false;
718 break;
719 }
720 for (DUIterator_Fast kmax, k = use->fast_outs(kmax);
721 k < kmax && can_eliminate; k++) {
722 Node* n = use->fast_out(k);
723 if (!n->is_Store() && n->Opcode() != Opcodes::Op_CastP2X &&
724 !(n->is_ArrayCopy() &&
725 n->as_ArrayCopy()->is_clonebasic() &&
726 n->in(ArrayCopyNode::Dest) == use)) {
727 DEBUG_ONLY(disq_node = n;)
728 if (n->is_Load() || n->is_LoadStore()) {
729 NOT_PRODUCT(fail_eliminate = "Field load";)
730 } else {
731 NOT_PRODUCT(fail_eliminate = "Not store field referrence";)
732 }
733 can_eliminate = false;
734 }
735 }
736 } else if (use->is_ArrayCopy() &&
737 (use->as_ArrayCopy()->is_arraycopy_validated() ||
738 use->as_ArrayCopy()->is_copyof_validated() ||
739 use->as_ArrayCopy()->is_copyofrange_validated()) &&
740 use->in(ArrayCopyNode::Dest) == res) {
741 // ok to eliminate
742 } else if (use->is_SafePoint()) {
743 SafePointNode* sfpt = use->as_SafePoint();
744 if (sfpt->is_Call() && sfpt->as_Call()->has_non_debug_use(res)) {
745 // Object is passed as argument.
746 DEBUG_ONLY(disq_node = use;)
747 NOT_PRODUCT(fail_eliminate = "Object is passed as argument";)
748 can_eliminate = false;
749 }
750 Node* sfptMem = sfpt->memory();
751 if (sfptMem == NULL || sfptMem->is_top()) {
752 DEBUG_ONLY(disq_node = use;)
753 NOT_PRODUCT(fail_eliminate = "NULL or TOP memory";)
754 can_eliminate = false;
755 } else {
756 safepoints.append_if_missing(sfpt);
757 }
758 } else if (use->Opcode() != Opcodes::Op_CastP2X) { // CastP2X is used by card mark
759 if (use->is_Phi()) {
760 if (use->outcnt() == 1 && use->unique_out()->Opcode() == Opcodes::Op_Return) {
761 NOT_PRODUCT(fail_eliminate = "Object is return value";)
762 } else {
763 NOT_PRODUCT(fail_eliminate = "Object is referenced by Phi";)
764 }
765 DEBUG_ONLY(disq_node = use;)
766 } else {
767 if (use->Opcode() == Opcodes::Op_Return) {
768 NOT_PRODUCT(fail_eliminate = "Object is return value";)
769 }else {
770 NOT_PRODUCT(fail_eliminate = "Object is referenced by node";)
771 }
772 DEBUG_ONLY(disq_node = use;)
773 }
774 can_eliminate = false;
775 }
776 }
777 }
778
779 #ifndef PRODUCT
780 if (PrintEliminateAllocations) {
781 if (can_eliminate) {
782 tty->print("Scalar ");
783 if (res == NULL)
784 alloc->dump();
785 else
786 res->dump();
787 } else if (alloc->_is_scalar_replaceable) {
1540 // MemBarStoreStore was already added. If the object does not
1541 // escape no need for a MemBarStoreStore. If the object does not
1542 // escape in its initializer and memory barrier (MemBarStoreStore or
1543 // stronger) is already added at exit of initializer, also no need
1544 // for a MemBarStoreStore. Otherwise we need a MemBarStoreStore
1545 // so that stores that initialize this object can't be reordered
1546 // with a subsequent store that makes this object accessible by
1547 // other threads.
1548 // Other threads include java threads and JVM internal threads
1549 // (for example concurrent GC threads). Current concurrent GC
1550 // implementation: CMS and G1 will not scan newly created object,
1551 // so it's safe to skip storestore barrier when allocation does
1552 // not escape.
1553 if (!alloc->does_not_escape_thread() &&
1554 !alloc->is_allocation_MemBar_redundant() &&
1555 (init == NULL || !init->is_complete_with_arraycopy())) {
1556 if (init == NULL || init->req() < InitializeNode::RawStores) {
1557 // No InitializeNode or no stores captured by zeroing
1558 // elimination. Simply add the MemBarStoreStore after object
1559 // initialization.
1560 MemBarNode* mb = MemBarNode::make(C, Opcodes::Op_MemBarStoreStore, Compile::AliasIdxBot);
1561 transform_later(mb);
1562
1563 mb->init_req(TypeFunc::Memory, fast_oop_rawmem);
1564 mb->init_req(TypeFunc::Control, fast_oop_ctrl);
1565 fast_oop_ctrl = new ProjNode(mb,TypeFunc::Control);
1566 transform_later(fast_oop_ctrl);
1567 fast_oop_rawmem = new ProjNode(mb,TypeFunc::Memory);
1568 transform_later(fast_oop_rawmem);
1569 } else {
1570 // Add the MemBarStoreStore after the InitializeNode so that
1571 // all stores performing the initialization that were moved
1572 // before the InitializeNode happen before the storestore
1573 // barrier.
1574
1575 Node* init_ctrl = init->proj_out(TypeFunc::Control);
1576 Node* init_mem = init->proj_out(TypeFunc::Memory);
1577
1578 MemBarNode* mb = MemBarNode::make(C, Opcodes::Op_MemBarStoreStore, Compile::AliasIdxBot);
1579 transform_later(mb);
1580
1581 Node* ctrl = new ProjNode(init,TypeFunc::Control);
1582 transform_later(ctrl);
1583 Node* mem = new ProjNode(init,TypeFunc::Memory);
1584 transform_later(mem);
1585
1586 // The MemBarStoreStore depends on control and memory coming
1587 // from the InitializeNode
1588 mb->init_req(TypeFunc::Memory, mem);
1589 mb->init_req(TypeFunc::Control, ctrl);
1590
1591 ctrl = new ProjNode(mb,TypeFunc::Control);
1592 transform_later(ctrl);
1593 mem = new ProjNode(mb,TypeFunc::Memory);
1594 transform_later(mem);
1595
1596 // All nodes that depended on the InitializeNode for control
1597 // and memory must now depend on the MemBarNode that itself
1598 // depends on the InitializeNode
2239 Node* mem = alock->in(TypeFunc::Memory);
2240 Node* ctrl = alock->in(TypeFunc::Control);
2241
2242 extract_call_projections(alock);
2243 // There are 2 projections from the lock. The lock node will
2244 // be deleted when its last use is subsumed below.
2245 assert(alock->outcnt() == 2 &&
2246 _fallthroughproj != NULL &&
2247 _memproj_fallthrough != NULL,
2248 "Unexpected projections from Lock/Unlock");
2249
2250 Node* fallthroughproj = _fallthroughproj;
2251 Node* memproj_fallthrough = _memproj_fallthrough;
2252
2253 // The memory projection from a lock/unlock is RawMem
2254 // The input to a Lock is merged memory, so extract its RawMem input
2255 // (unless the MergeMem has been optimized away.)
2256 if (alock->is_Lock()) {
2257 // Seach for MemBarAcquireLock node and delete it also.
2258 MemBarNode* membar = fallthroughproj->unique_ctrl_out()->as_MemBar();
2259 assert(membar != NULL && membar->Opcode() == Opcodes::Op_MemBarAcquireLock, "");
2260 Node* ctrlproj = membar->proj_out(TypeFunc::Control);
2261 Node* memproj = membar->proj_out(TypeFunc::Memory);
2262 _igvn.replace_node(ctrlproj, fallthroughproj);
2263 _igvn.replace_node(memproj, memproj_fallthrough);
2264
2265 // Delete FastLock node also if this Lock node is unique user
2266 // (a loop peeling may clone a Lock node).
2267 Node* flock = alock->as_Lock()->fastlock_node();
2268 if (flock->outcnt() == 1) {
2269 assert(flock->unique_out() == alock, "sanity");
2270 _igvn.replace_node(flock, top());
2271 }
2272 }
2273
2274 // Seach for MemBarReleaseLock node and delete it also.
2275 if (alock->is_Unlock() && ctrl != NULL && ctrl->is_Proj() &&
2276 ctrl->in(0)->is_MemBar()) {
2277 MemBarNode* membar = ctrl->in(0)->as_MemBar();
2278 assert(membar->Opcode() == Opcodes::Op_MemBarReleaseLock &&
2279 mem->is_Proj() && membar == mem->in(0), "");
2280 _igvn.replace_node(fallthroughproj, ctrl);
2281 _igvn.replace_node(memproj_fallthrough, mem);
2282 fallthroughproj = ctrl;
2283 memproj_fallthrough = mem;
2284 ctrl = membar->in(TypeFunc::Control);
2285 mem = membar->in(TypeFunc::Memory);
2286 }
2287
2288 _igvn.replace_node(fallthroughproj, ctrl);
2289 _igvn.replace_node(memproj_fallthrough, mem);
2290 return true;
2291 }
2292
2293
2294 //------------------------------expand_lock_node----------------------
2295 void PhaseMacroExpand::expand_lock_node(LockNode *lock) {
2296
2297 Node* ctrl = lock->in(TypeFunc::Control);
2298 Node* mem = lock->in(TypeFunc::Memory);
2363 // First, check mark word for the biased lock pattern.
2364 Node* mark_node = make_load(ctrl, mem, obj, oopDesc::mark_offset_in_bytes(), TypeX_X, TypeX_X->basic_type());
2365
2366 // Get fast path - mark word has the biased lock pattern.
2367 ctrl = opt_bits_test(ctrl, fast_lock_region, 1, mark_node,
2368 markOopDesc::biased_lock_mask_in_place,
2369 markOopDesc::biased_lock_pattern, true);
2370 // fast_lock_region->in(1) is set to slow path.
2371 fast_lock_mem_phi->init_req(1, mem);
2372
2373 // Now check that the lock is biased to the current thread and has
2374 // the same epoch and bias as Klass::_prototype_header.
2375
2376 // Special-case a fresh allocation to avoid building nodes:
2377 Node* klass_node = AllocateNode::Ideal_klass(obj, &_igvn);
2378 if (klass_node == NULL) {
2379 Node* k_adr = basic_plus_adr(obj, oopDesc::klass_offset_in_bytes());
2380 klass_node = transform_later(LoadKlassNode::make(_igvn, NULL, mem, k_adr, _igvn.type(k_adr)->is_ptr()));
2381 #ifdef _LP64
2382 if (UseCompressedClassPointers && klass_node->is_DecodeNKlass()) {
2383 assert(klass_node->in(1)->Opcode() == Opcodes::Op_LoadNKlass, "sanity");
2384 klass_node->in(1)->init_req(0, ctrl);
2385 } else
2386 #endif
2387 klass_node->init_req(0, ctrl);
2388 }
2389 Node *proto_node = make_load(ctrl, mem, klass_node, in_bytes(Klass::prototype_header_offset()), TypeX_X, TypeX_X->basic_type());
2390
2391 Node* thread = transform_later(new ThreadLocalNode());
2392 Node* cast_thread = transform_later(new CastP2XNode(ctrl, thread));
2393 Node* o_node = transform_later(new OrXNode(cast_thread, proto_node));
2394 Node* x_node = transform_later(new XorXNode(o_node, mark_node));
2395
2396 // Get slow path - mark word does NOT match the value.
2397 Node* not_biased_ctrl = opt_bits_test(ctrl, region, 3, x_node,
2398 (~markOopDesc::age_mask_in_place), 0);
2399 // region->in(3) is set to fast path - the object is biased to the current thread.
2400 mem_phi->init_req(3, mem);
2401
2402
2403 // Mark word does NOT match the value (thread | Klass::_prototype_header).
2636 for (int i = C->macro_count(); i > 0; i--) {
2637 Node * n = C->macro_node(i-1);
2638 bool success = false;
2639 debug_only(int old_macro_count = C->macro_count(););
2640 switch (n->class_id()) {
2641 case Node::Class_Allocate:
2642 case Node::Class_AllocateArray:
2643 success = eliminate_allocate_node(n->as_Allocate());
2644 break;
2645 case Node::Class_CallStaticJava:
2646 success = eliminate_boxing_node(n->as_CallStaticJava());
2647 break;
2648 case Node::Class_Lock:
2649 case Node::Class_Unlock:
2650 assert(!n->as_AbstractLock()->is_eliminated(), "sanity");
2651 _has_locks = true;
2652 break;
2653 case Node::Class_ArrayCopy:
2654 break;
2655 default:
2656 assert(n->Opcode() == Opcodes::Op_LoopLimit ||
2657 n->Opcode() == Opcodes::Op_Opaque1 ||
2658 n->Opcode() == Opcodes::Op_Opaque2 ||
2659 n->Opcode() == Opcodes::Op_Opaque3, "unknown node type in macro list");
2660 }
2661 assert(success == (C->macro_count() < old_macro_count), "elimination reduces macro count");
2662 progress = progress || success;
2663 }
2664 }
2665 }
2666
2667 //------------------------------expand_macro_nodes----------------------
2668 // Returns true if a failure occurred.
2669 bool PhaseMacroExpand::expand_macro_nodes() {
2670 // Last attempt to eliminate macro nodes.
2671 eliminate_macro_nodes();
2672
2673 // Make sure expansion will not cause node limit to be exceeded.
2674 // Worst case is a macro node gets expanded into about 200 nodes.
2675 // Allow 50% more for optimization.
2676 if (C->check_node_count(C->macro_count() * 300, "out of nodes before macro expansion" ) )
2677 return true;
2678
2679 // Eliminate Opaque and LoopLimit nodes. Do it after all loop optimizations.
2680 bool progress = true;
2681 while (progress) {
2682 progress = false;
2683 for (int i = C->macro_count(); i > 0; i--) {
2684 Node * n = C->macro_node(i-1);
2685 bool success = false;
2686 debug_only(int old_macro_count = C->macro_count(););
2687 if (n->Opcode() == Opcodes::Op_LoopLimit) {
2688 // Remove it from macro list and put on IGVN worklist to optimize.
2689 C->remove_macro_node(n);
2690 _igvn._worklist.push(n);
2691 success = true;
2692 } else if (n->Opcode() == Opcodes::Op_CallStaticJava) {
2693 // Remove it from macro list and put on IGVN worklist to optimize.
2694 C->remove_macro_node(n);
2695 _igvn._worklist.push(n);
2696 success = true;
2697 } else if (n->Opcode() == Opcodes::Op_Opaque1 || n->Opcode() == Opcodes::Op_Opaque2) {
2698 _igvn.replace_node(n, n->in(1));
2699 success = true;
2700 #if INCLUDE_RTM_OPT
2701 } else if ((n->Opcode() == Opcodes::Op_Opaque3) && ((Opaque3Node*)n)->rtm_opt()) {
2702 assert(C->profile_rtm(), "should be used only in rtm deoptimization code");
2703 assert((n->outcnt() == 1) && n->unique_out()->is_Cmp(), "");
2704 Node* cmp = n->unique_out();
2705 #ifdef ASSERT
2706 // Validate graph.
2707 assert((cmp->outcnt() == 1) && cmp->unique_out()->is_Bool(), "");
2708 BoolNode* bol = cmp->unique_out()->as_Bool();
2709 assert((bol->outcnt() == 1) && bol->unique_out()->is_If() &&
2710 (bol->_test._test == BoolTest::ne), "");
2711 IfNode* ifn = bol->unique_out()->as_If();
2712 assert((ifn->outcnt() == 2) &&
2713 ifn->proj_out(1)->is_uncommon_trap_proj(Deoptimization::Reason_rtm_state_change) != NULL, "");
2714 #endif
2715 Node* repl = n->in(1);
2716 if (!_has_locks) {
2717 // Remove RTM state check if there are no locks in the code.
2718 // Replace input to compare the same value.
2719 repl = (cmp->in(1) == n) ? cmp->in(2) : cmp->in(1);
2720 }
2721 _igvn.replace_node(n, repl);
|