143 add_node_to_connection_graph(n, &delayed_worklist);
144 PointsToNode* ptn = ptnode_adr(n->_idx);
145 if (ptn != NULL && ptn != phantom_obj) {
146 ptnodes_worklist.append(ptn);
147 if (ptn->is_JavaObject()) {
148 java_objects_worklist.append(ptn->as_JavaObject());
149 if ((n->is_Allocate() || n->is_CallStaticJava()) &&
150 (ptn->escape_state() < PointsToNode::GlobalEscape)) {
151 // Only allocations and java static calls results are interesting.
152 non_escaped_worklist.append(ptn->as_JavaObject());
153 }
154 } else if (ptn->is_Field() && ptn->as_Field()->is_oop()) {
155 oop_fields_worklist.append(ptn->as_Field());
156 }
157 }
158 if (n->is_MergeMem()) {
159 // Collect all MergeMem nodes to add memory slices for
160 // scalar replaceable objects in split_unique_types().
161 _mergemem_worklist.append(n->as_MergeMem());
162 } else if (OptimizePtrCompare && n->is_Cmp() &&
163 (n->Opcode() == Op_CmpP || n->Opcode() == Op_CmpN)) {
164 // Collect compare pointers nodes.
165 ptr_cmp_worklist.append(n);
166 } else if (n->is_MemBarStoreStore()) {
167 // Collect all MemBarStoreStore nodes so that depending on the
168 // escape status of the associated Allocate node some of them
169 // may be eliminated.
170 storestore_worklist.append(n);
171 } else if (n->is_MemBar() && (n->Opcode() == Op_MemBarRelease) &&
172 (n->req() > MemBarNode::Precedent)) {
173 record_for_optimizer(n);
174 #ifdef ASSERT
175 } else if (n->is_AddP()) {
176 // Collect address nodes for graph verification.
177 addp_worklist.append(n);
178 #endif
179 } else if (n->is_ArrayCopy()) {
180 // Keep a list of ArrayCopy nodes so if one of its input is non
181 // escaping, we can record a unique type
182 arraycopy_worklist.append(n->as_ArrayCopy());
183 }
184 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
185 Node* m = n->fast_out(i); // Get user
186 ideal_nodes.push(m);
187 }
188 }
189 if (non_escaped_worklist.length() == 0) {
190 _collecting = false;
191 return false; // Nothing to do.
362 if (name != NULL && strcmp(name, "uncommon_trap") == 0)
363 return; // Skip uncommon traps
364 }
365 // Don't mark as processed since call's arguments have to be processed.
366 delayed_worklist->push(n);
367 // Check if a call returns an object.
368 if ((n->as_Call()->returns_pointer() &&
369 n->as_Call()->proj_out(TypeFunc::Parms) != NULL) ||
370 (n->is_CallStaticJava() &&
371 n->as_CallStaticJava()->is_boxing_method())) {
372 add_call_node(n->as_Call());
373 }
374 }
375 return;
376 }
377 // Put this check here to process call arguments since some call nodes
378 // point to phantom_obj.
379 if (n_ptn == phantom_obj || n_ptn == null_obj)
380 return; // Skip predefined nodes.
381
382 int opcode = n->Opcode();
383 switch (opcode) {
384 case Op_AddP: {
385 Node* base = get_addp_base(n);
386 PointsToNode* ptn_base = ptnode_adr(base->_idx);
387 // Field nodes are created for all field types. They are used in
388 // adjust_scalar_replaceable_state() and split_unique_types().
389 // Note, non-oop fields will have only base edges in Connection
390 // Graph because such fields are not used for oop loads and stores.
391 int offset = address_offset(n, igvn);
392 add_field(n, PointsToNode::NoEscape, offset);
393 if (ptn_base == NULL) {
394 delayed_worklist->push(n); // Process it later.
395 } else {
396 n_ptn = ptnode_adr(n_idx);
397 add_base(n_ptn->as_Field(), ptn_base);
398 }
399 break;
400 }
401 case Op_CastX2P: {
402 map_ideal_node(n, phantom_obj);
403 break;
404 }
405 case Op_CastPP:
406 case Op_CheckCastPP:
407 case Op_EncodeP:
408 case Op_DecodeN:
409 case Op_EncodePKlass:
410 case Op_DecodeNKlass: {
411 add_local_var_and_edge(n, PointsToNode::NoEscape,
412 n->in(1), delayed_worklist);
413 break;
414 }
415 case Op_CMoveP: {
416 add_local_var(n, PointsToNode::NoEscape);
417 // Do not add edges during first iteration because some could be
418 // not defined yet.
419 delayed_worklist->push(n);
420 break;
421 }
422 case Op_ConP:
423 case Op_ConN:
424 case Op_ConNKlass: {
425 // assume all oop constants globally escape except for null
426 PointsToNode::EscapeState es;
427 const Type* t = igvn->type(n);
428 if (t == TypePtr::NULL_PTR || t == TypeNarrowOop::NULL_PTR) {
429 es = PointsToNode::NoEscape;
430 } else {
431 es = PointsToNode::GlobalEscape;
432 }
433 add_java_object(n, es);
434 break;
435 }
436 case Op_CreateEx: {
437 // assume that all exception objects globally escape
438 map_ideal_node(n, phantom_obj);
439 break;
440 }
441 case Op_LoadKlass:
442 case Op_LoadNKlass: {
443 // Unknown class is loaded
444 map_ideal_node(n, phantom_obj);
445 break;
446 }
447 case Op_LoadP:
448 case Op_LoadN:
449 case Op_LoadPLocked: {
450 add_objload_to_connection_graph(n, delayed_worklist);
451 break;
452 }
453 case Op_Parm: {
454 map_ideal_node(n, phantom_obj);
455 break;
456 }
457 case Op_PartialSubtypeCheck: {
458 // Produces Null or notNull and is used in only in CmpP so
459 // phantom_obj could be used.
460 map_ideal_node(n, phantom_obj); // Result is unknown
461 break;
462 }
463 case Op_Phi: {
464 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
465 // ThreadLocal has RawPtr type.
466 const Type* t = n->as_Phi()->type();
467 if (t->make_ptr() != NULL) {
468 add_local_var(n, PointsToNode::NoEscape);
469 // Do not add edges during first iteration because some could be
470 // not defined yet.
471 delayed_worklist->push(n);
472 }
473 break;
474 }
475 case Op_Proj: {
476 // we are only interested in the oop result projection from a call
477 if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() &&
478 n->in(0)->as_Call()->returns_pointer()) {
479 add_local_var_and_edge(n, PointsToNode::NoEscape,
480 n->in(0), delayed_worklist);
481 }
482 break;
483 }
484 case Op_Rethrow: // Exception object escapes
485 case Op_Return: {
486 if (n->req() > TypeFunc::Parms &&
487 igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) {
488 // Treat Return value as LocalVar with GlobalEscape escape state.
489 add_local_var_and_edge(n, PointsToNode::GlobalEscape,
490 n->in(TypeFunc::Parms), delayed_worklist);
491 }
492 break;
493 }
494 case Op_CompareAndExchangeP:
495 case Op_CompareAndExchangeN:
496 case Op_GetAndSetP:
497 case Op_GetAndSetN: {
498 add_objload_to_connection_graph(n, delayed_worklist);
499 // fallthrough
500 }
501 case Op_StoreP:
502 case Op_StoreN:
503 case Op_StoreNKlass:
504 case Op_StorePConditional:
505 case Op_WeakCompareAndSwapP:
506 case Op_WeakCompareAndSwapN:
507 case Op_CompareAndSwapP:
508 case Op_CompareAndSwapN: {
509 Node* adr = n->in(MemNode::Address);
510 const Type *adr_type = igvn->type(adr);
511 adr_type = adr_type->make_ptr();
512 if (adr_type == NULL) {
513 break; // skip dead nodes
514 }
515 if (adr_type->isa_oopptr() ||
516 (opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass) &&
517 (adr_type == TypeRawPtr::NOTNULL &&
518 adr->in(AddPNode::Address)->is_Proj() &&
519 adr->in(AddPNode::Address)->in(0)->is_Allocate())) {
520 delayed_worklist->push(n); // Process it later.
521 #ifdef ASSERT
522 assert(adr->is_AddP(), "expecting an AddP");
523 if (adr_type == TypeRawPtr::NOTNULL) {
524 // Verify a raw address for a store captured by Initialize node.
525 int offs = (int)igvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
526 assert(offs != Type::OffsetBot, "offset must be a constant");
527 }
528 #endif
529 } else {
530 // Ignore copy the displaced header to the BoxNode (OSR compilation).
531 if (adr->is_BoxLock())
532 break;
533 // Stored value escapes in unsafe access.
534 if ((opcode == Op_StoreP) && adr_type->isa_rawptr()) {
535 // Pointer stores in G1 barriers looks like unsafe access.
536 // Ignore such stores to be able scalar replace non-escaping
537 // allocations.
538 if (UseG1GC && adr->is_AddP()) {
539 Node* base = get_addp_base(adr);
540 if (base->Opcode() == Op_LoadP &&
541 base->in(MemNode::Address)->is_AddP()) {
542 adr = base->in(MemNode::Address);
543 Node* tls = get_addp_base(adr);
544 if (tls->Opcode() == Op_ThreadLocal) {
545 int offs = (int)igvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
546 if (offs == in_bytes(JavaThread::satb_mark_queue_offset() +
547 SATBMarkQueue::byte_offset_of_buf())) {
548 break; // G1 pre barrier previous oop value store.
549 }
550 if (offs == in_bytes(JavaThread::dirty_card_queue_offset() +
551 DirtyCardQueue::byte_offset_of_buf())) {
552 break; // G1 post barrier card address store.
553 }
554 }
555 }
556 }
557 delayed_worklist->push(n); // Process unsafe access later.
558 break;
559 }
560 #ifdef ASSERT
561 n->dump(1);
562 assert(false, "not unsafe or G1 barrier raw StoreP");
563 #endif
564 }
565 break;
566 }
567 case Op_AryEq:
568 case Op_HasNegatives:
569 case Op_StrComp:
570 case Op_StrEquals:
571 case Op_StrIndexOf:
572 case Op_StrIndexOfChar:
573 case Op_StrInflatedCopy:
574 case Op_StrCompressedCopy:
575 case Op_EncodeISOArray: {
576 add_local_var(n, PointsToNode::ArgEscape);
577 delayed_worklist->push(n); // Process it later.
578 break;
579 }
580 case Op_ThreadLocal: {
581 add_java_object(n, PointsToNode::ArgEscape);
582 break;
583 }
584 default:
585 ; // Do nothing for nodes not related to EA.
586 }
587 return;
588 }
589
590 #ifdef ASSERT
591 #define ELSE_FAIL(name) \
592 /* Should not be called for not pointer type. */ \
593 n->dump(1); \
594 assert(false, name); \
595 break;
596 #else
597 #define ELSE_FAIL(name) \
598 break;
599 #endif
600
601 // Add final simple edges to graph.
602 void ConnectionGraph::add_final_edges(Node *n) {
603 PointsToNode* n_ptn = ptnode_adr(n->_idx);
604 #ifdef ASSERT
605 if (_verify && n_ptn->is_JavaObject())
606 return; // This method does not change graph for JavaObject.
607 #endif
608
609 if (n->is_Call()) {
610 process_call_arguments(n->as_Call());
611 return;
612 }
613 assert(n->is_Store() || n->is_LoadStore() ||
614 (n_ptn != NULL) && (n_ptn->ideal_node() != NULL),
615 "node should be registered already");
616 int opcode = n->Opcode();
617 switch (opcode) {
618 case Op_AddP: {
619 Node* base = get_addp_base(n);
620 PointsToNode* ptn_base = ptnode_adr(base->_idx);
621 assert(ptn_base != NULL, "field's base should be registered");
622 add_base(n_ptn->as_Field(), ptn_base);
623 break;
624 }
625 case Op_CastPP:
626 case Op_CheckCastPP:
627 case Op_EncodeP:
628 case Op_DecodeN:
629 case Op_EncodePKlass:
630 case Op_DecodeNKlass: {
631 add_local_var_and_edge(n, PointsToNode::NoEscape,
632 n->in(1), NULL);
633 break;
634 }
635 case Op_CMoveP: {
636 for (uint i = CMoveNode::IfFalse; i < n->req(); i++) {
637 Node* in = n->in(i);
638 if (in == NULL)
639 continue; // ignore NULL
640 Node* uncast_in = in->uncast();
641 if (uncast_in->is_top() || uncast_in == n)
642 continue; // ignore top or inputs which go back this node
643 PointsToNode* ptn = ptnode_adr(in->_idx);
644 assert(ptn != NULL, "node should be registered");
645 add_edge(n_ptn, ptn);
646 }
647 break;
648 }
649 case Op_LoadP:
650 case Op_LoadN:
651 case Op_LoadPLocked: {
652 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
653 // ThreadLocal has RawPtr type.
654 const Type* t = _igvn->type(n);
655 if (t->make_ptr() != NULL) {
656 Node* adr = n->in(MemNode::Address);
657 add_local_var_and_edge(n, PointsToNode::NoEscape, adr, NULL);
658 break;
659 }
660 ELSE_FAIL("Op_LoadP");
661 }
662 case Op_Phi: {
663 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
664 // ThreadLocal has RawPtr type.
665 const Type* t = n->as_Phi()->type();
666 if (t->make_ptr() != NULL) {
667 for (uint i = 1; i < n->req(); i++) {
668 Node* in = n->in(i);
669 if (in == NULL)
670 continue; // ignore NULL
671 Node* uncast_in = in->uncast();
672 if (uncast_in->is_top() || uncast_in == n)
673 continue; // ignore top or inputs which go back this node
674 PointsToNode* ptn = ptnode_adr(in->_idx);
675 assert(ptn != NULL, "node should be registered");
676 add_edge(n_ptn, ptn);
677 }
678 break;
679 }
680 ELSE_FAIL("Op_Phi");
681 }
682 case Op_Proj: {
683 // we are only interested in the oop result projection from a call
684 if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() &&
685 n->in(0)->as_Call()->returns_pointer()) {
686 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), NULL);
687 break;
688 }
689 ELSE_FAIL("Op_Proj");
690 }
691 case Op_Rethrow: // Exception object escapes
692 case Op_Return: {
693 if (n->req() > TypeFunc::Parms &&
694 _igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) {
695 // Treat Return value as LocalVar with GlobalEscape escape state.
696 add_local_var_and_edge(n, PointsToNode::GlobalEscape,
697 n->in(TypeFunc::Parms), NULL);
698 break;
699 }
700 ELSE_FAIL("Op_Return");
701 }
702 case Op_StoreP:
703 case Op_StoreN:
704 case Op_StoreNKlass:
705 case Op_StorePConditional:
706 case Op_CompareAndExchangeP:
707 case Op_CompareAndExchangeN:
708 case Op_CompareAndSwapP:
709 case Op_CompareAndSwapN:
710 case Op_WeakCompareAndSwapP:
711 case Op_WeakCompareAndSwapN:
712 case Op_GetAndSetP:
713 case Op_GetAndSetN: {
714 Node* adr = n->in(MemNode::Address);
715 const Type *adr_type = _igvn->type(adr);
716 adr_type = adr_type->make_ptr();
717 #ifdef ASSERT
718 if (adr_type == NULL) {
719 n->dump(1);
720 assert(adr_type != NULL, "dead node should not be on list");
721 break;
722 }
723 #endif
724 if (opcode == Op_GetAndSetP || opcode == Op_GetAndSetN ||
725 opcode == Op_CompareAndExchangeN || opcode == Op_CompareAndExchangeP) {
726 add_local_var_and_edge(n, PointsToNode::NoEscape, adr, NULL);
727 }
728 if (adr_type->isa_oopptr() ||
729 (opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass) &&
730 (adr_type == TypeRawPtr::NOTNULL &&
731 adr->in(AddPNode::Address)->is_Proj() &&
732 adr->in(AddPNode::Address)->in(0)->is_Allocate())) {
733 // Point Address to Value
734 PointsToNode* adr_ptn = ptnode_adr(adr->_idx);
735 assert(adr_ptn != NULL &&
736 adr_ptn->as_Field()->is_oop(), "node should be registered");
737 Node *val = n->in(MemNode::ValueIn);
738 PointsToNode* ptn = ptnode_adr(val->_idx);
739 assert(ptn != NULL, "node should be registered");
740 add_edge(adr_ptn, ptn);
741 break;
742 } else if ((opcode == Op_StoreP) && adr_type->isa_rawptr()) {
743 // Stored value escapes in unsafe access.
744 Node *val = n->in(MemNode::ValueIn);
745 PointsToNode* ptn = ptnode_adr(val->_idx);
746 assert(ptn != NULL, "node should be registered");
747 set_escape_state(ptn, PointsToNode::GlobalEscape);
748 // Add edge to object for unsafe access with offset.
749 PointsToNode* adr_ptn = ptnode_adr(adr->_idx);
750 assert(adr_ptn != NULL, "node should be registered");
751 if (adr_ptn->is_Field()) {
752 assert(adr_ptn->as_Field()->is_oop(), "should be oop field");
753 add_edge(adr_ptn, ptn);
754 }
755 break;
756 }
757 ELSE_FAIL("Op_StoreP");
758 }
759 case Op_AryEq:
760 case Op_HasNegatives:
761 case Op_StrComp:
762 case Op_StrEquals:
763 case Op_StrIndexOf:
764 case Op_StrIndexOfChar:
765 case Op_StrInflatedCopy:
766 case Op_StrCompressedCopy:
767 case Op_EncodeISOArray: {
768 // char[]/byte[] arrays passed to string intrinsic do not escape but
769 // they are not scalar replaceable. Adjust escape state for them.
770 // Start from in(2) edge since in(1) is memory edge.
771 for (uint i = 2; i < n->req(); i++) {
772 Node* adr = n->in(i);
773 const Type* at = _igvn->type(adr);
774 if (!adr->is_top() && at->isa_ptr()) {
775 assert(at == Type::TOP || at == TypePtr::NULL_PTR ||
776 at->isa_ptr() != NULL, "expecting a pointer");
777 if (adr->is_AddP()) {
778 adr = get_addp_base(adr);
779 }
780 PointsToNode* ptn = ptnode_adr(adr->_idx);
781 assert(ptn != NULL, "node should be registered");
782 add_edge(n_ptn, ptn);
783 }
784 }
785 break;
786 }
787 default: {
887 const TypeTuple* d = call->tf()->domain();
888 bool ret_arg = false;
889 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
890 if (d->field_at(i)->isa_ptr() != NULL &&
891 call_analyzer->is_arg_returned(i - TypeFunc::Parms)) {
892 ret_arg = true;
893 break;
894 }
895 }
896 if (ret_arg) {
897 add_local_var(call, PointsToNode::ArgEscape);
898 } else {
899 // Returns unknown object.
900 map_ideal_node(call, phantom_obj);
901 }
902 }
903 }
904 } else {
905 // An other type of call, assume the worst case:
906 // returned value is unknown and globally escapes.
907 assert(call->Opcode() == Op_CallDynamicJava, "add failed case check");
908 map_ideal_node(call, phantom_obj);
909 }
910 }
911
912 void ConnectionGraph::process_call_arguments(CallNode *call) {
913 bool is_arraycopy = false;
914 switch (call->Opcode()) {
915 #ifdef ASSERT
916 case Op_Allocate:
917 case Op_AllocateArray:
918 case Op_Lock:
919 case Op_Unlock:
920 assert(false, "should be done already");
921 break;
922 #endif
923 case Op_ArrayCopy:
924 case Op_CallLeafNoFP:
925 // Most array copies are ArrayCopy nodes at this point but there
926 // are still a few direct calls to the copy subroutines (See
927 // PhaseStringOpts::copy_string())
928 is_arraycopy = (call->Opcode() == Op_ArrayCopy) ||
929 call->as_CallLeaf()->is_call_to_arraycopystub();
930 // fall through
931 case Op_CallLeaf: {
932 // Stub calls, objects do not escape but they are not scale replaceable.
933 // Adjust escape state for outgoing arguments.
934 const TypeTuple * d = call->tf()->domain();
935 bool src_has_oops = false;
936 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
937 const Type* at = d->field_at(i);
938 Node *arg = call->in(i);
939 if (arg == NULL) {
940 continue;
941 }
942 const Type *aat = _igvn->type(arg);
943 if (arg->is_top() || !at->isa_ptr() || !aat->isa_ptr())
944 continue;
945 if (arg->is_AddP()) {
946 //
947 // The inline_native_clone() case when the arraycopy stub is called
948 // after the allocation before Initialize and CheckCastPP nodes.
949 // Or normal arraycopy for object arrays case.
950 //
951 // Set AddP's base (Allocate) as not scalar replaceable since
1027 if (arg_is_arraycopy_dest) {
1028 Node* src = call->in(TypeFunc::Parms);
1029 if (src->is_AddP()) {
1030 src = get_addp_base(src);
1031 }
1032 PointsToNode* src_ptn = ptnode_adr(src->_idx);
1033 assert(src_ptn != NULL, "should be registered");
1034 if (arg_ptn != src_ptn) {
1035 // Special arraycopy edge:
1036 // A destination object's field can't have the source object
1037 // as base since objects escape states are not related.
1038 // Only escape state of destination object's fields affects
1039 // escape state of fields in source object.
1040 add_arraycopy(call, es, src_ptn, arg_ptn);
1041 }
1042 }
1043 }
1044 }
1045 break;
1046 }
1047 case Op_CallStaticJava: {
1048 // For a static call, we know exactly what method is being called.
1049 // Use bytecode estimator to record the call's escape affects
1050 #ifdef ASSERT
1051 const char* name = call->as_CallStaticJava()->_name;
1052 assert((name == NULL || strcmp(name, "uncommon_trap") != 0), "normal calls only");
1053 #endif
1054 ciMethod* meth = call->as_CallJava()->method();
1055 if ((meth != NULL) && meth->is_boxing_method()) {
1056 break; // Boxing methods do not modify any oops.
1057 }
1058 BCEscapeAnalyzer* call_analyzer = (meth !=NULL) ? meth->get_bcea() : NULL;
1059 // fall-through if not a Java method or no analyzer information
1060 if (call_analyzer != NULL) {
1061 PointsToNode* call_ptn = ptnode_adr(call->_idx);
1062 const TypeTuple* d = call->tf()->domain();
1063 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1064 const Type* at = d->field_at(i);
1065 int k = i - TypeFunc::Parms;
1066 Node* arg = call->in(i);
1067 PointsToNode* arg_ptn = ptnode_adr(arg->_idx);
1855 alock->log_lock_optimization(C, "eliminate_lock_set_non_esc3");
1856 #endif
1857 alock->set_non_esc_obj();
1858 }
1859 }
1860 }
1861 }
1862 }
1863
1864 if (OptimizePtrCompare) {
1865 // Add ConI(#CC_GT) and ConI(#CC_EQ).
1866 _pcmp_neq = igvn->makecon(TypeInt::CC_GT);
1867 _pcmp_eq = igvn->makecon(TypeInt::CC_EQ);
1868 // Optimize objects compare.
1869 while (ptr_cmp_worklist.length() != 0) {
1870 Node *n = ptr_cmp_worklist.pop();
1871 Node *res = optimize_ptr_compare(n);
1872 if (res != NULL) {
1873 #ifndef PRODUCT
1874 if (PrintOptimizePtrCompare) {
1875 tty->print_cr("++++ Replaced: %d %s(%d,%d) --> %s", n->_idx, (n->Opcode() == Op_CmpP ? "CmpP" : "CmpN"), n->in(1)->_idx, n->in(2)->_idx, (res == _pcmp_eq ? "EQ" : "NotEQ"));
1876 if (Verbose) {
1877 n->dump(1);
1878 }
1879 }
1880 #endif
1881 igvn->replace_node(n, res);
1882 }
1883 }
1884 // cleanup
1885 if (_pcmp_neq->outcnt() == 0)
1886 igvn->hash_delete(_pcmp_neq);
1887 if (_pcmp_eq->outcnt() == 0)
1888 igvn->hash_delete(_pcmp_eq);
1889 }
1890
1891 // For MemBarStoreStore nodes added in library_call.cpp, check
1892 // escape status of associated AllocateNode and optimize out
1893 // MemBarStoreStore node if the allocated object never escapes.
1894 while (storestore_worklist.length() != 0) {
1895 Node *n = storestore_worklist.pop();
1896 MemBarStoreStoreNode *storestore = n ->as_MemBarStoreStore();
1897 Node *alloc = storestore->in(MemBarNode::Precedent)->in(0);
1898 assert (alloc->is_Allocate(), "storestore should point to AllocateNode");
1899 if (not_global_escape(alloc)) {
1900 MemBarNode* mb = MemBarNode::make(C, Op_MemBarCPUOrder, Compile::AliasIdxBot);
1901 mb->init_req(TypeFunc::Memory, storestore->in(TypeFunc::Memory));
1902 mb->init_req(TypeFunc::Control, storestore->in(TypeFunc::Control));
1903 igvn->register_new_node_with_optimizer(mb);
1904 igvn->replace_node(storestore, mb);
1905 }
1906 }
1907 }
1908
1909 // Optimize objects compare.
1910 Node* ConnectionGraph::optimize_ptr_compare(Node* n) {
1911 assert(OptimizePtrCompare, "sanity");
1912 PointsToNode* ptn1 = ptnode_adr(n->in(1)->_idx);
1913 PointsToNode* ptn2 = ptnode_adr(n->in(2)->_idx);
1914 JavaObjectNode* jobj1 = unique_java_object(n->in(1));
1915 JavaObjectNode* jobj2 = unique_java_object(n->in(2));
1916 assert(ptn1->is_JavaObject() || ptn1->is_LocalVar(), "sanity");
1917 assert(ptn2->is_JavaObject() || ptn2->is_LocalVar(), "sanity");
1918
1919 // Check simple cases first.
1920 if (jobj1 != NULL) {
2050 bool ConnectionGraph::is_oop_field(Node* n, int offset, bool* unsafe) {
2051 const Type* adr_type = n->as_AddP()->bottom_type();
2052 BasicType bt = T_INT;
2053 if (offset == Type::OffsetBot) {
2054 // Check only oop fields.
2055 if (!adr_type->isa_aryptr() ||
2056 (adr_type->isa_aryptr()->klass() == NULL) ||
2057 adr_type->isa_aryptr()->klass()->is_obj_array_klass()) {
2058 // OffsetBot is used to reference array's element. Ignore first AddP.
2059 if (find_second_addp(n, n->in(AddPNode::Base)) == NULL) {
2060 bt = T_OBJECT;
2061 }
2062 }
2063 } else if (offset != oopDesc::klass_offset_in_bytes()) {
2064 if (adr_type->isa_instptr()) {
2065 ciField* field = _compile->alias_type(adr_type->isa_instptr())->field();
2066 if (field != NULL) {
2067 bt = field->layout_type();
2068 } else {
2069 // Check for unsafe oop field access
2070 if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN)) {
2071 bt = T_OBJECT;
2072 (*unsafe) = true;
2073 }
2074 }
2075 } else if (adr_type->isa_aryptr()) {
2076 if (offset == arrayOopDesc::length_offset_in_bytes()) {
2077 // Ignore array length load.
2078 } else if (find_second_addp(n, n->in(AddPNode::Base)) != NULL) {
2079 // Ignore first AddP.
2080 } else {
2081 const Type* elemtype = adr_type->isa_aryptr()->elem();
2082 bt = elemtype->array_element_basic_type();
2083 }
2084 } else if (adr_type->isa_rawptr() || adr_type->isa_klassptr()) {
2085 // Allocation initialization, ThreadLocal field access, unsafe access
2086 if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN)) {
2087 bt = T_OBJECT;
2088 }
2089 }
2090 }
2091 return (bt == T_OBJECT || bt == T_NARROWOOP || bt == T_ARRAY);
2092 }
2093
2094 // Returns unique pointed java object or NULL.
2095 JavaObjectNode* ConnectionGraph::unique_java_object(Node *n) {
2096 assert(!_collecting, "should not call when contructed graph");
2097 // If the node was created after the escape computation we can't answer.
2098 uint idx = n->_idx;
2099 if (idx >= nodes_size()) {
2100 return NULL;
2101 }
2102 PointsToNode* ptn = ptnode_adr(idx);
2103 if (ptn->is_JavaObject()) {
2104 return ptn->as_JavaObject();
2105 }
2106 assert(ptn->is_LocalVar(), "sanity");
2290 // LoadKlass
2291 // | |
2292 // AddP ( base == address )
2293 //
2294 // case #8. narrow Klass's field reference.
2295 // LoadNKlass
2296 // |
2297 // DecodeN
2298 // | |
2299 // AddP ( base == address )
2300 //
2301 Node *base = addp->in(AddPNode::Base);
2302 if (base->uncast()->is_top()) { // The AddP case #3 and #6.
2303 base = addp->in(AddPNode::Address);
2304 while (base->is_AddP()) {
2305 // Case #6 (unsafe access) may have several chained AddP nodes.
2306 assert(base->in(AddPNode::Base)->uncast()->is_top(), "expected unsafe access address only");
2307 base = base->in(AddPNode::Address);
2308 }
2309 Node* uncast_base = base->uncast();
2310 int opcode = uncast_base->Opcode();
2311 assert(opcode == Op_ConP || opcode == Op_ThreadLocal ||
2312 opcode == Op_CastX2P || uncast_base->is_DecodeNarrowPtr() ||
2313 (uncast_base->is_Mem() && (uncast_base->bottom_type()->isa_rawptr() != NULL)) ||
2314 (uncast_base->is_Proj() && uncast_base->in(0)->is_Allocate()), "sanity");
2315 }
2316 return base;
2317 }
2318
2319 Node* ConnectionGraph::find_second_addp(Node* addp, Node* n) {
2320 assert(addp->is_AddP() && addp->outcnt() > 0, "Don't process dead nodes");
2321 Node* addp2 = addp->raw_out(0);
2322 if (addp->outcnt() == 1 && addp2->is_AddP() &&
2323 addp2->in(AddPNode::Base) == n &&
2324 addp2->in(AddPNode::Address) == addp) {
2325 assert(addp->in(AddPNode::Base) == n, "expecting the same base");
2326 //
2327 // Find array's offset to push it on worklist first and
2328 // as result process an array's element offset first (pushed second)
2329 // to avoid CastPP for the array's offset.
2330 // Otherwise the inserted CastPP (LocalVar) will point to what
2331 // the AddP (Field) points to. Which would be wrong since
2332 // the algorithm expects the CastPP has the same point as
2604 // Don't move related membars.
2605 record_for_optimizer(use);
2606 continue;
2607 }
2608 tp = use->as_MemBar()->adr_type()->isa_ptr();
2609 if (tp != NULL && C->get_alias_index(tp) == alias_idx ||
2610 alias_idx == general_idx) {
2611 continue; // Nothing to do
2612 }
2613 // Move to general memory slice.
2614 uint orig_uniq = C->unique();
2615 Node* m = find_inst_mem(n, general_idx, orig_phis);
2616 assert(orig_uniq == C->unique(), "no new nodes");
2617 igvn->hash_delete(use);
2618 imax -= use->replace_edge(n, m);
2619 igvn->hash_insert(use);
2620 record_for_optimizer(use);
2621 --i;
2622 #ifdef ASSERT
2623 } else if (use->is_Mem()) {
2624 if (use->Opcode() == Op_StoreCM && use->in(MemNode::OopStore) == n) {
2625 // Don't move related cardmark.
2626 continue;
2627 }
2628 // Memory nodes should have new memory input.
2629 tp = igvn->type(use->in(MemNode::Address))->isa_ptr();
2630 assert(tp != NULL, "ptr type");
2631 int idx = C->get_alias_index(tp);
2632 assert(get_map(use->_idx) != NULL || idx == alias_idx,
2633 "Following memory nodes should have new memory input or be on the same memory slice");
2634 } else if (use->is_Phi()) {
2635 // Phi nodes should be split and moved already.
2636 tp = use->as_Phi()->adr_type()->isa_ptr();
2637 assert(tp != NULL, "ptr type");
2638 int idx = C->get_alias_index(tp);
2639 assert(idx == alias_idx, "Following Phi nodes should be on the same memory slice");
2640 } else {
2641 use->dump();
2642 assert(false, "should not be here");
2643 #endif
2644 }
2720 return NULL;
2721 }
2722 mmem->set_memory_at(alias_idx, result);
2723 }
2724 } else if (result->is_Phi() &&
2725 C->get_alias_index(result->as_Phi()->adr_type()) != alias_idx) {
2726 Node *un = result->as_Phi()->unique_input(igvn);
2727 if (un != NULL) {
2728 orig_phis.append_if_missing(result->as_Phi());
2729 result = un;
2730 } else {
2731 break;
2732 }
2733 } else if (result->is_ClearArray()) {
2734 if (!ClearArrayNode::step_through(&result, (uint)toop->instance_id(), igvn)) {
2735 // Can not bypass initialization of the instance
2736 // we are looking for.
2737 break;
2738 }
2739 // Otherwise skip it (the call updated 'result' value).
2740 } else if (result->Opcode() == Op_SCMemProj) {
2741 Node* mem = result->in(0);
2742 Node* adr = NULL;
2743 if (mem->is_LoadStore()) {
2744 adr = mem->in(MemNode::Address);
2745 } else {
2746 assert(mem->Opcode() == Op_EncodeISOArray ||
2747 mem->Opcode() == Op_StrCompressedCopy, "sanity");
2748 adr = mem->in(3); // Memory edge corresponds to destination array
2749 }
2750 const Type *at = igvn->type(adr);
2751 if (at != Type::TOP) {
2752 assert(at->isa_ptr() != NULL, "pointer type required.");
2753 int idx = C->get_alias_index(at->is_ptr());
2754 if (idx == alias_idx) {
2755 // Assert in debug mode
2756 assert(false, "Object is not scalar replaceable if a LoadStore node accesses its field");
2757 break; // In product mode return SCMemProj node
2758 }
2759 }
2760 result = mem->in(MemNode::Memory);
2761 } else if (result->Opcode() == Op_StrInflatedCopy) {
2762 Node* adr = result->in(3); // Memory edge corresponds to destination array
2763 const Type *at = igvn->type(adr);
2764 if (at != Type::TOP) {
2765 assert(at->isa_ptr() != NULL, "pointer type required.");
2766 int idx = C->get_alias_index(at->is_ptr());
2767 if (idx == alias_idx) {
2768 // Assert in debug mode
2769 assert(false, "Object is not scalar replaceable if a StrInflatedCopy node accesses its field");
2770 break; // In product mode return SCMemProj node
2771 }
2772 }
2773 result = result->in(MemNode::Memory);
2774 }
2775 }
2776 if (result->is_Phi()) {
2777 PhiNode *mphi = result->as_Phi();
2778 assert(mphi->bottom_type() == Type::MEMORY, "memory phi required");
2779 const TypePtr *t = mphi->adr_type();
2780 if (!is_instance) {
2781 // Push all non-instance Phis on the orig_phis worklist to update inputs
3026 }
3027 }
3028 }
3029 } else if (n->is_AddP()) {
3030 JavaObjectNode* jobj = unique_java_object(get_addp_base(n));
3031 if (jobj == NULL || jobj == phantom_obj) {
3032 #ifdef ASSERT
3033 ptnode_adr(get_addp_base(n)->_idx)->dump();
3034 ptnode_adr(n->_idx)->dump();
3035 assert(jobj != NULL && jobj != phantom_obj, "escaped allocation");
3036 #endif
3037 _compile->record_failure(C2Compiler::retry_no_escape_analysis());
3038 return;
3039 }
3040 Node *base = get_map(jobj->idx()); // CheckCastPP node
3041 if (!split_AddP(n, base)) continue; // wrong type from dead path
3042 } else if (n->is_Phi() ||
3043 n->is_CheckCastPP() ||
3044 n->is_EncodeP() ||
3045 n->is_DecodeN() ||
3046 (n->is_ConstraintCast() && n->Opcode() == Op_CastPP)) {
3047 if (visited.test_set(n->_idx)) {
3048 assert(n->is_Phi(), "loops only through Phi's");
3049 continue; // already processed
3050 }
3051 JavaObjectNode* jobj = unique_java_object(n);
3052 if (jobj == NULL || jobj == phantom_obj) {
3053 #ifdef ASSERT
3054 ptnode_adr(n->_idx)->dump();
3055 assert(jobj != NULL && jobj != phantom_obj, "escaped allocation");
3056 #endif
3057 _compile->record_failure(C2Compiler::retry_no_escape_analysis());
3058 return;
3059 } else {
3060 Node *val = get_map(jobj->idx()); // CheckCastPP node
3061 TypeNode *tn = n->as_Type();
3062 const TypeOopPtr* tinst = igvn->type(val)->isa_oopptr();
3063 assert(tinst != NULL && tinst->is_known_instance() &&
3064 tinst->instance_id() == jobj->idx() , "instance type expected.");
3065
3066 const Type *tn_type = igvn->type(tn);
3096 // push allocation's users on appropriate worklist
3097 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
3098 Node *use = n->fast_out(i);
3099 if(use->is_Mem() && use->in(MemNode::Address) == n) {
3100 // Load/store to instance's field
3101 memnode_worklist.append_if_missing(use);
3102 } else if (use->is_MemBar()) {
3103 if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge
3104 memnode_worklist.append_if_missing(use);
3105 }
3106 } else if (use->is_AddP() && use->outcnt() > 0) { // No dead nodes
3107 Node* addp2 = find_second_addp(use, n);
3108 if (addp2 != NULL) {
3109 alloc_worklist.append_if_missing(addp2);
3110 }
3111 alloc_worklist.append_if_missing(use);
3112 } else if (use->is_Phi() ||
3113 use->is_CheckCastPP() ||
3114 use->is_EncodeNarrowPtr() ||
3115 use->is_DecodeNarrowPtr() ||
3116 (use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) {
3117 alloc_worklist.append_if_missing(use);
3118 #ifdef ASSERT
3119 } else if (use->is_Mem()) {
3120 assert(use->in(MemNode::Address) != n, "EA: missing allocation reference path");
3121 } else if (use->is_MergeMem()) {
3122 assert(_mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
3123 } else if (use->is_SafePoint()) {
3124 // Look for MergeMem nodes for calls which reference unique allocation
3125 // (through CheckCastPP nodes) even for debug info.
3126 Node* m = use->in(TypeFunc::Memory);
3127 if (m->is_MergeMem()) {
3128 assert(_mergemem_worklist.contains(m->as_MergeMem()), "EA: missing MergeMem node in the worklist");
3129 }
3130 } else if (use->Opcode() == Op_EncodeISOArray) {
3131 if (use->in(MemNode::Memory) == n || use->in(3) == n) {
3132 // EncodeISOArray overwrites destination array
3133 memnode_worklist.append_if_missing(use);
3134 }
3135 } else {
3136 uint op = use->Opcode();
3137 if ((use->in(MemNode::Memory) == n) &&
3138 (op == Op_StrCompressedCopy || op == Op_StrInflatedCopy)) {
3139 // They overwrite memory edge corresponding to destination array,
3140 memnode_worklist.append_if_missing(use);
3141 } else if (!(op == Op_CmpP || op == Op_Conv2B ||
3142 op == Op_CastP2X || op == Op_StoreCM ||
3143 op == Op_FastLock || op == Op_AryEq || op == Op_StrComp || op == Op_HasNegatives ||
3144 op == Op_StrCompressedCopy || op == Op_StrInflatedCopy ||
3145 op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar)) {
3146 n->dump();
3147 use->dump();
3148 assert(false, "EA: missing allocation reference path");
3149 }
3150 #endif
3151 }
3152 }
3153
3154 }
3155
3156 // Go over all ArrayCopy nodes and if one of the inputs has a unique
3157 // type, record it in the ArrayCopy node so we know what memory this
3158 // node uses/modified.
3159 for (int next = 0; next < arraycopy_worklist.length(); next++) {
3160 ArrayCopyNode* ac = arraycopy_worklist.at(next);
3161 Node* dest = ac->in(ArrayCopyNode::Dest);
3162 if (dest->is_AddP()) {
3163 dest = get_addp_base(dest);
3164 }
3165 JavaObjectNode* jobj = unique_java_object(dest);
3187 // New alias types were created in split_AddP().
3188 uint new_index_end = (uint) _compile->num_alias_types();
3189 assert(unique_old == _compile->unique(), "there should be no new ideal nodes after Phase 1");
3190
3191 // Phase 2: Process MemNode's from memnode_worklist. compute new address type and
3192 // compute new values for Memory inputs (the Memory inputs are not
3193 // actually updated until phase 4.)
3194 if (memnode_worklist.length() == 0)
3195 return; // nothing to do
3196 while (memnode_worklist.length() != 0) {
3197 Node *n = memnode_worklist.pop();
3198 if (visited.test_set(n->_idx))
3199 continue;
3200 if (n->is_Phi() || n->is_ClearArray()) {
3201 // we don't need to do anything, but the users must be pushed
3202 } else if (n->is_MemBar()) { // Initialize, MemBar nodes
3203 // we don't need to do anything, but the users must be pushed
3204 n = n->as_MemBar()->proj_out(TypeFunc::Memory);
3205 if (n == NULL)
3206 continue;
3207 } else if (n->Opcode() == Op_StrCompressedCopy ||
3208 n->Opcode() == Op_EncodeISOArray) {
3209 // get the memory projection
3210 n = n->find_out_with(Op_SCMemProj);
3211 assert(n->Opcode() == Op_SCMemProj, "memory projection required");
3212 } else {
3213 assert(n->is_Mem(), "memory node required.");
3214 Node *addr = n->in(MemNode::Address);
3215 const Type *addr_t = igvn->type(addr);
3216 if (addr_t == Type::TOP)
3217 continue;
3218 assert (addr_t->isa_ptr() != NULL, "pointer type required.");
3219 int alias_idx = _compile->get_alias_index(addr_t->is_ptr());
3220 assert ((uint)alias_idx < new_index_end, "wrong alias index");
3221 Node *mem = find_inst_mem(n->in(MemNode::Memory), alias_idx, orig_phis);
3222 if (_compile->failing()) {
3223 return;
3224 }
3225 if (mem != n->in(MemNode::Memory)) {
3226 // We delay the memory edge update since we need old one in
3227 // MergeMem code below when instances memory slices are separated.
3228 set_map(n, mem);
3229 }
3230 if (n->is_Load()) {
3231 continue; // don't push users
3232 } else if (n->is_LoadStore()) {
3233 // get the memory projection
3234 n = n->find_out_with(Op_SCMemProj);
3235 assert(n->Opcode() == Op_SCMemProj, "memory projection required");
3236 }
3237 }
3238 // push user on appropriate worklist
3239 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
3240 Node *use = n->fast_out(i);
3241 if (use->is_Phi() || use->is_ClearArray()) {
3242 memnode_worklist.append_if_missing(use);
3243 } else if (use->is_Mem() && use->in(MemNode::Memory) == n) {
3244 if (use->Opcode() == Op_StoreCM) // Ignore cardmark stores
3245 continue;
3246 memnode_worklist.append_if_missing(use);
3247 } else if (use->is_MemBar()) {
3248 if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge
3249 memnode_worklist.append_if_missing(use);
3250 }
3251 #ifdef ASSERT
3252 } else if(use->is_Mem()) {
3253 assert(use->in(MemNode::Memory) != n, "EA: missing memory path");
3254 } else if (use->is_MergeMem()) {
3255 assert(_mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
3256 } else if (use->Opcode() == Op_EncodeISOArray) {
3257 if (use->in(MemNode::Memory) == n || use->in(3) == n) {
3258 // EncodeISOArray overwrites destination array
3259 memnode_worklist.append_if_missing(use);
3260 }
3261 } else {
3262 uint op = use->Opcode();
3263 if ((use->in(MemNode::Memory) == n) &&
3264 (op == Op_StrCompressedCopy || op == Op_StrInflatedCopy)) {
3265 // They overwrite memory edge corresponding to destination array,
3266 memnode_worklist.append_if_missing(use);
3267 } else if (!(op == Op_StoreCM ||
3268 (op == Op_CallLeaf && use->as_CallLeaf()->_name != NULL &&
3269 strcmp(use->as_CallLeaf()->_name, "g1_wb_pre") == 0) ||
3270 op == Op_AryEq || op == Op_StrComp || op == Op_HasNegatives ||
3271 op == Op_StrCompressedCopy || op == Op_StrInflatedCopy ||
3272 op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar)) {
3273 n->dump();
3274 use->dump();
3275 assert(false, "EA: missing memory path");
3276 }
3277 #endif
3278 }
3279 }
3280 }
3281
3282 // Phase 3: Process MergeMem nodes from mergemem_worklist.
3283 // Walk each memory slice moving the first node encountered of each
3284 // instance type to the the input corresponding to its alias index.
3285 uint length = _mergemem_worklist.length();
3286 for( uint next = 0; next < length; ++next ) {
3287 MergeMemNode* nmm = _mergemem_worklist.at(next);
3288 assert(!visited.test_set(nmm->_idx), "should not be visited before");
3289 // Note: we don't want to use MergeMemStream here because we only want to
3290 // scan inputs which exist at the start, not ones we add during processing.
3291 // Note 2: MergeMem may already contains instance memory slices added
3292 // during find_inst_mem() call when memory nodes were processed above.
|
143 add_node_to_connection_graph(n, &delayed_worklist);
144 PointsToNode* ptn = ptnode_adr(n->_idx);
145 if (ptn != NULL && ptn != phantom_obj) {
146 ptnodes_worklist.append(ptn);
147 if (ptn->is_JavaObject()) {
148 java_objects_worklist.append(ptn->as_JavaObject());
149 if ((n->is_Allocate() || n->is_CallStaticJava()) &&
150 (ptn->escape_state() < PointsToNode::GlobalEscape)) {
151 // Only allocations and java static calls results are interesting.
152 non_escaped_worklist.append(ptn->as_JavaObject());
153 }
154 } else if (ptn->is_Field() && ptn->as_Field()->is_oop()) {
155 oop_fields_worklist.append(ptn->as_Field());
156 }
157 }
158 if (n->is_MergeMem()) {
159 // Collect all MergeMem nodes to add memory slices for
160 // scalar replaceable objects in split_unique_types().
161 _mergemem_worklist.append(n->as_MergeMem());
162 } else if (OptimizePtrCompare && n->is_Cmp() &&
163 (n->Opcode() == Opcodes::Op_CmpP || n->Opcode() == Opcodes::Op_CmpN)) {
164 // Collect compare pointers nodes.
165 ptr_cmp_worklist.append(n);
166 } else if (n->is_MemBarStoreStore()) {
167 // Collect all MemBarStoreStore nodes so that depending on the
168 // escape status of the associated Allocate node some of them
169 // may be eliminated.
170 storestore_worklist.append(n);
171 } else if (n->is_MemBar() && (n->Opcode() == Opcodes::Op_MemBarRelease) &&
172 (n->req() > MemBarNode::Precedent)) {
173 record_for_optimizer(n);
174 #ifdef ASSERT
175 } else if (n->is_AddP()) {
176 // Collect address nodes for graph verification.
177 addp_worklist.append(n);
178 #endif
179 } else if (n->is_ArrayCopy()) {
180 // Keep a list of ArrayCopy nodes so if one of its input is non
181 // escaping, we can record a unique type
182 arraycopy_worklist.append(n->as_ArrayCopy());
183 }
184 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
185 Node* m = n->fast_out(i); // Get user
186 ideal_nodes.push(m);
187 }
188 }
189 if (non_escaped_worklist.length() == 0) {
190 _collecting = false;
191 return false; // Nothing to do.
362 if (name != NULL && strcmp(name, "uncommon_trap") == 0)
363 return; // Skip uncommon traps
364 }
365 // Don't mark as processed since call's arguments have to be processed.
366 delayed_worklist->push(n);
367 // Check if a call returns an object.
368 if ((n->as_Call()->returns_pointer() &&
369 n->as_Call()->proj_out(TypeFunc::Parms) != NULL) ||
370 (n->is_CallStaticJava() &&
371 n->as_CallStaticJava()->is_boxing_method())) {
372 add_call_node(n->as_Call());
373 }
374 }
375 return;
376 }
377 // Put this check here to process call arguments since some call nodes
378 // point to phantom_obj.
379 if (n_ptn == phantom_obj || n_ptn == null_obj)
380 return; // Skip predefined nodes.
381
382 Opcodes opcode = n->Opcode();
383 switch (opcode) {
384 case Opcodes::Op_AddP: {
385 Node* base = get_addp_base(n);
386 PointsToNode* ptn_base = ptnode_adr(base->_idx);
387 // Field nodes are created for all field types. They are used in
388 // adjust_scalar_replaceable_state() and split_unique_types().
389 // Note, non-oop fields will have only base edges in Connection
390 // Graph because such fields are not used for oop loads and stores.
391 int offset = address_offset(n, igvn);
392 add_field(n, PointsToNode::NoEscape, offset);
393 if (ptn_base == NULL) {
394 delayed_worklist->push(n); // Process it later.
395 } else {
396 n_ptn = ptnode_adr(n_idx);
397 add_base(n_ptn->as_Field(), ptn_base);
398 }
399 break;
400 }
401 case Opcodes::Op_CastX2P: {
402 map_ideal_node(n, phantom_obj);
403 break;
404 }
405 case Opcodes::Op_CastPP:
406 case Opcodes::Op_CheckCastPP:
407 case Opcodes::Op_EncodeP:
408 case Opcodes::Op_DecodeN:
409 case Opcodes::Op_EncodePKlass:
410 case Opcodes::Op_DecodeNKlass: {
411 add_local_var_and_edge(n, PointsToNode::NoEscape,
412 n->in(1), delayed_worklist);
413 break;
414 }
415 case Opcodes::Op_CMoveP: {
416 add_local_var(n, PointsToNode::NoEscape);
417 // Do not add edges during first iteration because some could be
418 // not defined yet.
419 delayed_worklist->push(n);
420 break;
421 }
422 case Opcodes::Op_ConP:
423 case Opcodes::Op_ConN:
424 case Opcodes::Op_ConNKlass: {
425 // assume all oop constants globally escape except for null
426 PointsToNode::EscapeState es;
427 const Type* t = igvn->type(n);
428 if (t == TypePtr::NULL_PTR || t == TypeNarrowOop::NULL_PTR) {
429 es = PointsToNode::NoEscape;
430 } else {
431 es = PointsToNode::GlobalEscape;
432 }
433 add_java_object(n, es);
434 break;
435 }
436 case Opcodes::Op_CreateEx: {
437 // assume that all exception objects globally escape
438 map_ideal_node(n, phantom_obj);
439 break;
440 }
441 case Opcodes::Op_LoadKlass:
442 case Opcodes::Op_LoadNKlass: {
443 // Unknown class is loaded
444 map_ideal_node(n, phantom_obj);
445 break;
446 }
447 case Opcodes::Op_LoadP:
448 case Opcodes::Op_LoadN:
449 case Opcodes::Op_LoadPLocked: {
450 add_objload_to_connection_graph(n, delayed_worklist);
451 break;
452 }
453 case Opcodes::Op_Parm: {
454 map_ideal_node(n, phantom_obj);
455 break;
456 }
457 case Opcodes::Op_PartialSubtypeCheck: {
458 // Produces Null or notNull and is used in only in CmpP so
459 // phantom_obj could be used.
460 map_ideal_node(n, phantom_obj); // Result is unknown
461 break;
462 }
463 case Opcodes::Op_Phi: {
464 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
465 // ThreadLocal has RawPtr type.
466 const Type* t = n->as_Phi()->type();
467 if (t->make_ptr() != NULL) {
468 add_local_var(n, PointsToNode::NoEscape);
469 // Do not add edges during first iteration because some could be
470 // not defined yet.
471 delayed_worklist->push(n);
472 }
473 break;
474 }
475 case Opcodes::Op_Proj: {
476 // we are only interested in the oop result projection from a call
477 if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() &&
478 n->in(0)->as_Call()->returns_pointer()) {
479 add_local_var_and_edge(n, PointsToNode::NoEscape,
480 n->in(0), delayed_worklist);
481 }
482 break;
483 }
484 case Opcodes::Op_Rethrow: // Exception object escapes
485 case Opcodes::Op_Return: {
486 if (n->req() > TypeFunc::Parms &&
487 igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) {
488 // Treat Return value as LocalVar with GlobalEscape escape state.
489 add_local_var_and_edge(n, PointsToNode::GlobalEscape,
490 n->in(TypeFunc::Parms), delayed_worklist);
491 }
492 break;
493 }
494 case Opcodes::Op_CompareAndExchangeP:
495 case Opcodes::Op_CompareAndExchangeN:
496 case Opcodes::Op_GetAndSetP:
497 case Opcodes::Op_GetAndSetN: {
498 add_objload_to_connection_graph(n, delayed_worklist);
499 // fallthrough
500 }
501 case Opcodes::Op_StoreP:
502 case Opcodes::Op_StoreN:
503 case Opcodes::Op_StoreNKlass:
504 case Opcodes::Op_StorePConditional:
505 case Opcodes::Op_WeakCompareAndSwapP:
506 case Opcodes::Op_WeakCompareAndSwapN:
507 case Opcodes::Op_CompareAndSwapP:
508 case Opcodes::Op_CompareAndSwapN: {
509 Node* adr = n->in(MemNode::Address);
510 const Type *adr_type = igvn->type(adr);
511 adr_type = adr_type->make_ptr();
512 if (adr_type == NULL) {
513 break; // skip dead nodes
514 }
515 if (adr_type->isa_oopptr() ||
516 (opcode == Opcodes::Op_StoreP || opcode == Opcodes::Op_StoreN || opcode == Opcodes::Op_StoreNKlass) &&
517 (adr_type == TypeRawPtr::NOTNULL &&
518 adr->in(AddPNode::Address)->is_Proj() &&
519 adr->in(AddPNode::Address)->in(0)->is_Allocate())) {
520 delayed_worklist->push(n); // Process it later.
521 #ifdef ASSERT
522 assert(adr->is_AddP(), "expecting an AddP");
523 if (adr_type == TypeRawPtr::NOTNULL) {
524 // Verify a raw address for a store captured by Initialize node.
525 int offs = (int)igvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
526 assert(offs != Type::OffsetBot, "offset must be a constant");
527 }
528 #endif
529 } else {
530 // Ignore copy the displaced header to the BoxNode (OSR compilation).
531 if (adr->is_BoxLock())
532 break;
533 // Stored value escapes in unsafe access.
534 if ((opcode == Opcodes::Op_StoreP) && adr_type->isa_rawptr()) {
535 // Pointer stores in G1 barriers looks like unsafe access.
536 // Ignore such stores to be able scalar replace non-escaping
537 // allocations.
538 if (UseG1GC && adr->is_AddP()) {
539 Node* base = get_addp_base(adr);
540 if (base->Opcode() == Opcodes::Op_LoadP &&
541 base->in(MemNode::Address)->is_AddP()) {
542 adr = base->in(MemNode::Address);
543 Node* tls = get_addp_base(adr);
544 if (tls->Opcode() == Opcodes::Op_ThreadLocal) {
545 int offs = (int)igvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
546 if (offs == in_bytes(JavaThread::satb_mark_queue_offset() +
547 SATBMarkQueue::byte_offset_of_buf())) {
548 break; // G1 pre barrier previous oop value store.
549 }
550 if (offs == in_bytes(JavaThread::dirty_card_queue_offset() +
551 DirtyCardQueue::byte_offset_of_buf())) {
552 break; // G1 post barrier card address store.
553 }
554 }
555 }
556 }
557 delayed_worklist->push(n); // Process unsafe access later.
558 break;
559 }
560 #ifdef ASSERT
561 n->dump(1);
562 assert(false, "not unsafe or G1 barrier raw StoreP");
563 #endif
564 }
565 break;
566 }
567 case Opcodes::Op_AryEq:
568 case Opcodes::Op_HasNegatives:
569 case Opcodes::Op_StrComp:
570 case Opcodes::Op_StrEquals:
571 case Opcodes::Op_StrIndexOf:
572 case Opcodes::Op_StrIndexOfChar:
573 case Opcodes::Op_StrInflatedCopy:
574 case Opcodes::Op_StrCompressedCopy:
575 case Opcodes::Op_EncodeISOArray: {
576 add_local_var(n, PointsToNode::ArgEscape);
577 delayed_worklist->push(n); // Process it later.
578 break;
579 }
580 case Opcodes::Op_ThreadLocal: {
581 add_java_object(n, PointsToNode::ArgEscape);
582 break;
583 }
584 default:
585 ; // Do nothing for nodes not related to EA.
586 }
587 return;
588 }
589
590 #ifdef ASSERT
591 #define ELSE_FAIL(name) \
592 /* Should not be called for not pointer type. */ \
593 n->dump(1); \
594 assert(false, name); \
595 break;
596 #else
597 #define ELSE_FAIL(name) \
598 break;
599 #endif
600
601 // Add final simple edges to graph.
602 void ConnectionGraph::add_final_edges(Node *n) {
603 PointsToNode* n_ptn = ptnode_adr(n->_idx);
604 #ifdef ASSERT
605 if (_verify && n_ptn->is_JavaObject())
606 return; // This method does not change graph for JavaObject.
607 #endif
608
609 if (n->is_Call()) {
610 process_call_arguments(n->as_Call());
611 return;
612 }
613 assert(n->is_Store() || n->is_LoadStore() ||
614 (n_ptn != NULL) && (n_ptn->ideal_node() != NULL),
615 "node should be registered already");
616 Opcodes opcode = n->Opcode();
617 switch (opcode) {
618 case Opcodes::Op_AddP: {
619 Node* base = get_addp_base(n);
620 PointsToNode* ptn_base = ptnode_adr(base->_idx);
621 assert(ptn_base != NULL, "field's base should be registered");
622 add_base(n_ptn->as_Field(), ptn_base);
623 break;
624 }
625 case Opcodes::Op_CastPP:
626 case Opcodes::Op_CheckCastPP:
627 case Opcodes::Op_EncodeP:
628 case Opcodes::Op_DecodeN:
629 case Opcodes::Op_EncodePKlass:
630 case Opcodes::Op_DecodeNKlass: {
631 add_local_var_and_edge(n, PointsToNode::NoEscape,
632 n->in(1), NULL);
633 break;
634 }
635 case Opcodes::Op_CMoveP: {
636 for (uint i = CMoveNode::IfFalse; i < n->req(); i++) {
637 Node* in = n->in(i);
638 if (in == NULL)
639 continue; // ignore NULL
640 Node* uncast_in = in->uncast();
641 if (uncast_in->is_top() || uncast_in == n)
642 continue; // ignore top or inputs which go back this node
643 PointsToNode* ptn = ptnode_adr(in->_idx);
644 assert(ptn != NULL, "node should be registered");
645 add_edge(n_ptn, ptn);
646 }
647 break;
648 }
649 case Opcodes::Op_LoadP:
650 case Opcodes::Op_LoadN:
651 case Opcodes::Op_LoadPLocked: {
652 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
653 // ThreadLocal has RawPtr type.
654 const Type* t = _igvn->type(n);
655 if (t->make_ptr() != NULL) {
656 Node* adr = n->in(MemNode::Address);
657 add_local_var_and_edge(n, PointsToNode::NoEscape, adr, NULL);
658 break;
659 }
660 ELSE_FAIL("Op_LoadP");
661 }
662 case Opcodes::Op_Phi: {
663 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
664 // ThreadLocal has RawPtr type.
665 const Type* t = n->as_Phi()->type();
666 if (t->make_ptr() != NULL) {
667 for (uint i = 1; i < n->req(); i++) {
668 Node* in = n->in(i);
669 if (in == NULL)
670 continue; // ignore NULL
671 Node* uncast_in = in->uncast();
672 if (uncast_in->is_top() || uncast_in == n)
673 continue; // ignore top or inputs which go back this node
674 PointsToNode* ptn = ptnode_adr(in->_idx);
675 assert(ptn != NULL, "node should be registered");
676 add_edge(n_ptn, ptn);
677 }
678 break;
679 }
680 ELSE_FAIL("Op_Phi");
681 }
682 case Opcodes::Op_Proj: {
683 // we are only interested in the oop result projection from a call
684 if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() &&
685 n->in(0)->as_Call()->returns_pointer()) {
686 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), NULL);
687 break;
688 }
689 ELSE_FAIL("Op_Proj");
690 }
691 case Opcodes::Op_Rethrow: // Exception object escapes
692 case Opcodes::Op_Return: {
693 if (n->req() > TypeFunc::Parms &&
694 _igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) {
695 // Treat Return value as LocalVar with GlobalEscape escape state.
696 add_local_var_and_edge(n, PointsToNode::GlobalEscape,
697 n->in(TypeFunc::Parms), NULL);
698 break;
699 }
700 ELSE_FAIL("Op_Return");
701 }
702 case Opcodes::Op_StoreP:
703 case Opcodes::Op_StoreN:
704 case Opcodes::Op_StoreNKlass:
705 case Opcodes::Op_StorePConditional:
706 case Opcodes::Op_CompareAndExchangeP:
707 case Opcodes::Op_CompareAndExchangeN:
708 case Opcodes::Op_CompareAndSwapP:
709 case Opcodes::Op_CompareAndSwapN:
710 case Opcodes::Op_WeakCompareAndSwapP:
711 case Opcodes::Op_WeakCompareAndSwapN:
712 case Opcodes::Op_GetAndSetP:
713 case Opcodes::Op_GetAndSetN: {
714 Node* adr = n->in(MemNode::Address);
715 const Type *adr_type = _igvn->type(adr);
716 adr_type = adr_type->make_ptr();
717 #ifdef ASSERT
718 if (adr_type == NULL) {
719 n->dump(1);
720 assert(adr_type != NULL, "dead node should not be on list");
721 break;
722 }
723 #endif
724 if (opcode == Opcodes::Op_GetAndSetP || opcode == Opcodes::Op_GetAndSetN ||
725 opcode == Opcodes::Op_CompareAndExchangeN || opcode == Opcodes::Op_CompareAndExchangeP) {
726 add_local_var_and_edge(n, PointsToNode::NoEscape, adr, NULL);
727 }
728 if (adr_type->isa_oopptr() ||
729 (opcode == Opcodes::Op_StoreP || opcode == Opcodes::Op_StoreN || opcode == Opcodes::Op_StoreNKlass) &&
730 (adr_type == TypeRawPtr::NOTNULL &&
731 adr->in(AddPNode::Address)->is_Proj() &&
732 adr->in(AddPNode::Address)->in(0)->is_Allocate())) {
733 // Point Address to Value
734 PointsToNode* adr_ptn = ptnode_adr(adr->_idx);
735 assert(adr_ptn != NULL &&
736 adr_ptn->as_Field()->is_oop(), "node should be registered");
737 Node *val = n->in(MemNode::ValueIn);
738 PointsToNode* ptn = ptnode_adr(val->_idx);
739 assert(ptn != NULL, "node should be registered");
740 add_edge(adr_ptn, ptn);
741 break;
742 } else if ((opcode == Opcodes::Op_StoreP) && adr_type->isa_rawptr()) {
743 // Stored value escapes in unsafe access.
744 Node *val = n->in(MemNode::ValueIn);
745 PointsToNode* ptn = ptnode_adr(val->_idx);
746 assert(ptn != NULL, "node should be registered");
747 set_escape_state(ptn, PointsToNode::GlobalEscape);
748 // Add edge to object for unsafe access with offset.
749 PointsToNode* adr_ptn = ptnode_adr(adr->_idx);
750 assert(adr_ptn != NULL, "node should be registered");
751 if (adr_ptn->is_Field()) {
752 assert(adr_ptn->as_Field()->is_oop(), "should be oop field");
753 add_edge(adr_ptn, ptn);
754 }
755 break;
756 }
757 ELSE_FAIL("Op_StoreP");
758 }
759 case Opcodes::Op_AryEq:
760 case Opcodes::Op_HasNegatives:
761 case Opcodes::Op_StrComp:
762 case Opcodes::Op_StrEquals:
763 case Opcodes::Op_StrIndexOf:
764 case Opcodes::Op_StrIndexOfChar:
765 case Opcodes::Op_StrInflatedCopy:
766 case Opcodes::Op_StrCompressedCopy:
767 case Opcodes::Op_EncodeISOArray: {
768 // char[]/byte[] arrays passed to string intrinsic do not escape but
769 // they are not scalar replaceable. Adjust escape state for them.
770 // Start from in(2) edge since in(1) is memory edge.
771 for (uint i = 2; i < n->req(); i++) {
772 Node* adr = n->in(i);
773 const Type* at = _igvn->type(adr);
774 if (!adr->is_top() && at->isa_ptr()) {
775 assert(at == Type::TOP || at == TypePtr::NULL_PTR ||
776 at->isa_ptr() != NULL, "expecting a pointer");
777 if (adr->is_AddP()) {
778 adr = get_addp_base(adr);
779 }
780 PointsToNode* ptn = ptnode_adr(adr->_idx);
781 assert(ptn != NULL, "node should be registered");
782 add_edge(n_ptn, ptn);
783 }
784 }
785 break;
786 }
787 default: {
887 const TypeTuple* d = call->tf()->domain();
888 bool ret_arg = false;
889 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
890 if (d->field_at(i)->isa_ptr() != NULL &&
891 call_analyzer->is_arg_returned(i - TypeFunc::Parms)) {
892 ret_arg = true;
893 break;
894 }
895 }
896 if (ret_arg) {
897 add_local_var(call, PointsToNode::ArgEscape);
898 } else {
899 // Returns unknown object.
900 map_ideal_node(call, phantom_obj);
901 }
902 }
903 }
904 } else {
905 // An other type of call, assume the worst case:
906 // returned value is unknown and globally escapes.
907 assert(call->Opcode() == Opcodes::Op_CallDynamicJava, "add failed case check");
908 map_ideal_node(call, phantom_obj);
909 }
910 }
911
912 void ConnectionGraph::process_call_arguments(CallNode *call) {
913 bool is_arraycopy = false;
914 switch (call->Opcode()) {
915 #ifdef ASSERT
916 case Opcodes::Op_Allocate:
917 case Opcodes::Op_AllocateArray:
918 case Opcodes::Op_Lock:
919 case Opcodes::Op_Unlock:
920 assert(false, "should be done already");
921 break;
922 #endif
923 case Opcodes::Op_ArrayCopy:
924 case Opcodes::Op_CallLeafNoFP:
925 // Most array copies are ArrayCopy nodes at this point but there
926 // are still a few direct calls to the copy subroutines (See
927 // PhaseStringOpts::copy_string())
928 is_arraycopy = (call->Opcode() == Opcodes::Op_ArrayCopy) ||
929 call->as_CallLeaf()->is_call_to_arraycopystub();
930 // fall through
931 case Opcodes::Op_CallLeaf: {
932 // Stub calls, objects do not escape but they are not scale replaceable.
933 // Adjust escape state for outgoing arguments.
934 const TypeTuple * d = call->tf()->domain();
935 bool src_has_oops = false;
936 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
937 const Type* at = d->field_at(i);
938 Node *arg = call->in(i);
939 if (arg == NULL) {
940 continue;
941 }
942 const Type *aat = _igvn->type(arg);
943 if (arg->is_top() || !at->isa_ptr() || !aat->isa_ptr())
944 continue;
945 if (arg->is_AddP()) {
946 //
947 // The inline_native_clone() case when the arraycopy stub is called
948 // after the allocation before Initialize and CheckCastPP nodes.
949 // Or normal arraycopy for object arrays case.
950 //
951 // Set AddP's base (Allocate) as not scalar replaceable since
1027 if (arg_is_arraycopy_dest) {
1028 Node* src = call->in(TypeFunc::Parms);
1029 if (src->is_AddP()) {
1030 src = get_addp_base(src);
1031 }
1032 PointsToNode* src_ptn = ptnode_adr(src->_idx);
1033 assert(src_ptn != NULL, "should be registered");
1034 if (arg_ptn != src_ptn) {
1035 // Special arraycopy edge:
1036 // A destination object's field can't have the source object
1037 // as base since objects escape states are not related.
1038 // Only escape state of destination object's fields affects
1039 // escape state of fields in source object.
1040 add_arraycopy(call, es, src_ptn, arg_ptn);
1041 }
1042 }
1043 }
1044 }
1045 break;
1046 }
1047 case Opcodes::Op_CallStaticJava: {
1048 // For a static call, we know exactly what method is being called.
1049 // Use bytecode estimator to record the call's escape affects
1050 #ifdef ASSERT
1051 const char* name = call->as_CallStaticJava()->_name;
1052 assert((name == NULL || strcmp(name, "uncommon_trap") != 0), "normal calls only");
1053 #endif
1054 ciMethod* meth = call->as_CallJava()->method();
1055 if ((meth != NULL) && meth->is_boxing_method()) {
1056 break; // Boxing methods do not modify any oops.
1057 }
1058 BCEscapeAnalyzer* call_analyzer = (meth !=NULL) ? meth->get_bcea() : NULL;
1059 // fall-through if not a Java method or no analyzer information
1060 if (call_analyzer != NULL) {
1061 PointsToNode* call_ptn = ptnode_adr(call->_idx);
1062 const TypeTuple* d = call->tf()->domain();
1063 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1064 const Type* at = d->field_at(i);
1065 int k = i - TypeFunc::Parms;
1066 Node* arg = call->in(i);
1067 PointsToNode* arg_ptn = ptnode_adr(arg->_idx);
1855 alock->log_lock_optimization(C, "eliminate_lock_set_non_esc3");
1856 #endif
1857 alock->set_non_esc_obj();
1858 }
1859 }
1860 }
1861 }
1862 }
1863
1864 if (OptimizePtrCompare) {
1865 // Add ConI(#CC_GT) and ConI(#CC_EQ).
1866 _pcmp_neq = igvn->makecon(TypeInt::CC_GT);
1867 _pcmp_eq = igvn->makecon(TypeInt::CC_EQ);
1868 // Optimize objects compare.
1869 while (ptr_cmp_worklist.length() != 0) {
1870 Node *n = ptr_cmp_worklist.pop();
1871 Node *res = optimize_ptr_compare(n);
1872 if (res != NULL) {
1873 #ifndef PRODUCT
1874 if (PrintOptimizePtrCompare) {
1875 tty->print_cr("++++ Replaced: %d %s(%d,%d) --> %s", n->_idx, (n->Opcode() == Opcodes::Op_CmpP ? "CmpP" : "CmpN"), n->in(1)->_idx, n->in(2)->_idx, (res == _pcmp_eq ? "EQ" : "NotEQ"));
1876 if (Verbose) {
1877 n->dump(1);
1878 }
1879 }
1880 #endif
1881 igvn->replace_node(n, res);
1882 }
1883 }
1884 // cleanup
1885 if (_pcmp_neq->outcnt() == 0)
1886 igvn->hash_delete(_pcmp_neq);
1887 if (_pcmp_eq->outcnt() == 0)
1888 igvn->hash_delete(_pcmp_eq);
1889 }
1890
1891 // For MemBarStoreStore nodes added in library_call.cpp, check
1892 // escape status of associated AllocateNode and optimize out
1893 // MemBarStoreStore node if the allocated object never escapes.
1894 while (storestore_worklist.length() != 0) {
1895 Node *n = storestore_worklist.pop();
1896 MemBarStoreStoreNode *storestore = n ->as_MemBarStoreStore();
1897 Node *alloc = storestore->in(MemBarNode::Precedent)->in(0);
1898 assert (alloc->is_Allocate(), "storestore should point to AllocateNode");
1899 if (not_global_escape(alloc)) {
1900 MemBarNode* mb = MemBarNode::make(C, Opcodes::Op_MemBarCPUOrder, Compile::AliasIdxBot);
1901 mb->init_req(TypeFunc::Memory, storestore->in(TypeFunc::Memory));
1902 mb->init_req(TypeFunc::Control, storestore->in(TypeFunc::Control));
1903 igvn->register_new_node_with_optimizer(mb);
1904 igvn->replace_node(storestore, mb);
1905 }
1906 }
1907 }
1908
1909 // Optimize objects compare.
1910 Node* ConnectionGraph::optimize_ptr_compare(Node* n) {
1911 assert(OptimizePtrCompare, "sanity");
1912 PointsToNode* ptn1 = ptnode_adr(n->in(1)->_idx);
1913 PointsToNode* ptn2 = ptnode_adr(n->in(2)->_idx);
1914 JavaObjectNode* jobj1 = unique_java_object(n->in(1));
1915 JavaObjectNode* jobj2 = unique_java_object(n->in(2));
1916 assert(ptn1->is_JavaObject() || ptn1->is_LocalVar(), "sanity");
1917 assert(ptn2->is_JavaObject() || ptn2->is_LocalVar(), "sanity");
1918
1919 // Check simple cases first.
1920 if (jobj1 != NULL) {
2050 bool ConnectionGraph::is_oop_field(Node* n, int offset, bool* unsafe) {
2051 const Type* adr_type = n->as_AddP()->bottom_type();
2052 BasicType bt = T_INT;
2053 if (offset == Type::OffsetBot) {
2054 // Check only oop fields.
2055 if (!adr_type->isa_aryptr() ||
2056 (adr_type->isa_aryptr()->klass() == NULL) ||
2057 adr_type->isa_aryptr()->klass()->is_obj_array_klass()) {
2058 // OffsetBot is used to reference array's element. Ignore first AddP.
2059 if (find_second_addp(n, n->in(AddPNode::Base)) == NULL) {
2060 bt = T_OBJECT;
2061 }
2062 }
2063 } else if (offset != oopDesc::klass_offset_in_bytes()) {
2064 if (adr_type->isa_instptr()) {
2065 ciField* field = _compile->alias_type(adr_type->isa_instptr())->field();
2066 if (field != NULL) {
2067 bt = field->layout_type();
2068 } else {
2069 // Check for unsafe oop field access
2070 if (n->has_out_with(Opcodes::Op_StoreP, Opcodes::Op_LoadP, Opcodes::Op_StoreN, Opcodes::Op_LoadN)) {
2071 bt = T_OBJECT;
2072 (*unsafe) = true;
2073 }
2074 }
2075 } else if (adr_type->isa_aryptr()) {
2076 if (offset == arrayOopDesc::length_offset_in_bytes()) {
2077 // Ignore array length load.
2078 } else if (find_second_addp(n, n->in(AddPNode::Base)) != NULL) {
2079 // Ignore first AddP.
2080 } else {
2081 const Type* elemtype = adr_type->isa_aryptr()->elem();
2082 bt = elemtype->array_element_basic_type();
2083 }
2084 } else if (adr_type->isa_rawptr() || adr_type->isa_klassptr()) {
2085 // Allocation initialization, ThreadLocal field access, unsafe access
2086 if (n->has_out_with(Opcodes::Op_StoreP, Opcodes::Op_LoadP, Opcodes::Op_StoreN, Opcodes::Op_LoadN)) {
2087 bt = T_OBJECT;
2088 }
2089 }
2090 }
2091 return (bt == T_OBJECT || bt == T_NARROWOOP || bt == T_ARRAY);
2092 }
2093
2094 // Returns unique pointed java object or NULL.
2095 JavaObjectNode* ConnectionGraph::unique_java_object(Node *n) {
2096 assert(!_collecting, "should not call when contructed graph");
2097 // If the node was created after the escape computation we can't answer.
2098 uint idx = n->_idx;
2099 if (idx >= nodes_size()) {
2100 return NULL;
2101 }
2102 PointsToNode* ptn = ptnode_adr(idx);
2103 if (ptn->is_JavaObject()) {
2104 return ptn->as_JavaObject();
2105 }
2106 assert(ptn->is_LocalVar(), "sanity");
2290 // LoadKlass
2291 // | |
2292 // AddP ( base == address )
2293 //
2294 // case #8. narrow Klass's field reference.
2295 // LoadNKlass
2296 // |
2297 // DecodeN
2298 // | |
2299 // AddP ( base == address )
2300 //
2301 Node *base = addp->in(AddPNode::Base);
2302 if (base->uncast()->is_top()) { // The AddP case #3 and #6.
2303 base = addp->in(AddPNode::Address);
2304 while (base->is_AddP()) {
2305 // Case #6 (unsafe access) may have several chained AddP nodes.
2306 assert(base->in(AddPNode::Base)->uncast()->is_top(), "expected unsafe access address only");
2307 base = base->in(AddPNode::Address);
2308 }
2309 Node* uncast_base = base->uncast();
2310 Opcodes opcode = uncast_base->Opcode();
2311 assert(opcode == Opcodes::Op_ConP || opcode == Opcodes::Op_ThreadLocal ||
2312 opcode == Opcodes::Op_CastX2P || uncast_base->is_DecodeNarrowPtr() ||
2313 (uncast_base->is_Mem() && (uncast_base->bottom_type()->isa_rawptr() != NULL)) ||
2314 (uncast_base->is_Proj() && uncast_base->in(0)->is_Allocate()), "sanity");
2315 }
2316 return base;
2317 }
2318
2319 Node* ConnectionGraph::find_second_addp(Node* addp, Node* n) {
2320 assert(addp->is_AddP() && addp->outcnt() > 0, "Don't process dead nodes");
2321 Node* addp2 = addp->raw_out(0);
2322 if (addp->outcnt() == 1 && addp2->is_AddP() &&
2323 addp2->in(AddPNode::Base) == n &&
2324 addp2->in(AddPNode::Address) == addp) {
2325 assert(addp->in(AddPNode::Base) == n, "expecting the same base");
2326 //
2327 // Find array's offset to push it on worklist first and
2328 // as result process an array's element offset first (pushed second)
2329 // to avoid CastPP for the array's offset.
2330 // Otherwise the inserted CastPP (LocalVar) will point to what
2331 // the AddP (Field) points to. Which would be wrong since
2332 // the algorithm expects the CastPP has the same point as
2604 // Don't move related membars.
2605 record_for_optimizer(use);
2606 continue;
2607 }
2608 tp = use->as_MemBar()->adr_type()->isa_ptr();
2609 if (tp != NULL && C->get_alias_index(tp) == alias_idx ||
2610 alias_idx == general_idx) {
2611 continue; // Nothing to do
2612 }
2613 // Move to general memory slice.
2614 uint orig_uniq = C->unique();
2615 Node* m = find_inst_mem(n, general_idx, orig_phis);
2616 assert(orig_uniq == C->unique(), "no new nodes");
2617 igvn->hash_delete(use);
2618 imax -= use->replace_edge(n, m);
2619 igvn->hash_insert(use);
2620 record_for_optimizer(use);
2621 --i;
2622 #ifdef ASSERT
2623 } else if (use->is_Mem()) {
2624 if (use->Opcode() == Opcodes::Op_StoreCM && use->in(MemNode::OopStore) == n) {
2625 // Don't move related cardmark.
2626 continue;
2627 }
2628 // Memory nodes should have new memory input.
2629 tp = igvn->type(use->in(MemNode::Address))->isa_ptr();
2630 assert(tp != NULL, "ptr type");
2631 int idx = C->get_alias_index(tp);
2632 assert(get_map(use->_idx) != NULL || idx == alias_idx,
2633 "Following memory nodes should have new memory input or be on the same memory slice");
2634 } else if (use->is_Phi()) {
2635 // Phi nodes should be split and moved already.
2636 tp = use->as_Phi()->adr_type()->isa_ptr();
2637 assert(tp != NULL, "ptr type");
2638 int idx = C->get_alias_index(tp);
2639 assert(idx == alias_idx, "Following Phi nodes should be on the same memory slice");
2640 } else {
2641 use->dump();
2642 assert(false, "should not be here");
2643 #endif
2644 }
2720 return NULL;
2721 }
2722 mmem->set_memory_at(alias_idx, result);
2723 }
2724 } else if (result->is_Phi() &&
2725 C->get_alias_index(result->as_Phi()->adr_type()) != alias_idx) {
2726 Node *un = result->as_Phi()->unique_input(igvn);
2727 if (un != NULL) {
2728 orig_phis.append_if_missing(result->as_Phi());
2729 result = un;
2730 } else {
2731 break;
2732 }
2733 } else if (result->is_ClearArray()) {
2734 if (!ClearArrayNode::step_through(&result, (uint)toop->instance_id(), igvn)) {
2735 // Can not bypass initialization of the instance
2736 // we are looking for.
2737 break;
2738 }
2739 // Otherwise skip it (the call updated 'result' value).
2740 } else if (result->Opcode() == Opcodes::Op_SCMemProj) {
2741 Node* mem = result->in(0);
2742 Node* adr = NULL;
2743 if (mem->is_LoadStore()) {
2744 adr = mem->in(MemNode::Address);
2745 } else {
2746 assert(mem->Opcode() == Opcodes::Op_EncodeISOArray ||
2747 mem->Opcode() == Opcodes::Op_StrCompressedCopy, "sanity");
2748 adr = mem->in(3); // Memory edge corresponds to destination array
2749 }
2750 const Type *at = igvn->type(adr);
2751 if (at != Type::TOP) {
2752 assert(at->isa_ptr() != NULL, "pointer type required.");
2753 int idx = C->get_alias_index(at->is_ptr());
2754 if (idx == alias_idx) {
2755 // Assert in debug mode
2756 assert(false, "Object is not scalar replaceable if a LoadStore node accesses its field");
2757 break; // In product mode return SCMemProj node
2758 }
2759 }
2760 result = mem->in(MemNode::Memory);
2761 } else if (result->Opcode() == Opcodes::Op_StrInflatedCopy) {
2762 Node* adr = result->in(3); // Memory edge corresponds to destination array
2763 const Type *at = igvn->type(adr);
2764 if (at != Type::TOP) {
2765 assert(at->isa_ptr() != NULL, "pointer type required.");
2766 int idx = C->get_alias_index(at->is_ptr());
2767 if (idx == alias_idx) {
2768 // Assert in debug mode
2769 assert(false, "Object is not scalar replaceable if a StrInflatedCopy node accesses its field");
2770 break; // In product mode return SCMemProj node
2771 }
2772 }
2773 result = result->in(MemNode::Memory);
2774 }
2775 }
2776 if (result->is_Phi()) {
2777 PhiNode *mphi = result->as_Phi();
2778 assert(mphi->bottom_type() == Type::MEMORY, "memory phi required");
2779 const TypePtr *t = mphi->adr_type();
2780 if (!is_instance) {
2781 // Push all non-instance Phis on the orig_phis worklist to update inputs
3026 }
3027 }
3028 }
3029 } else if (n->is_AddP()) {
3030 JavaObjectNode* jobj = unique_java_object(get_addp_base(n));
3031 if (jobj == NULL || jobj == phantom_obj) {
3032 #ifdef ASSERT
3033 ptnode_adr(get_addp_base(n)->_idx)->dump();
3034 ptnode_adr(n->_idx)->dump();
3035 assert(jobj != NULL && jobj != phantom_obj, "escaped allocation");
3036 #endif
3037 _compile->record_failure(C2Compiler::retry_no_escape_analysis());
3038 return;
3039 }
3040 Node *base = get_map(jobj->idx()); // CheckCastPP node
3041 if (!split_AddP(n, base)) continue; // wrong type from dead path
3042 } else if (n->is_Phi() ||
3043 n->is_CheckCastPP() ||
3044 n->is_EncodeP() ||
3045 n->is_DecodeN() ||
3046 (n->is_ConstraintCast() && n->Opcode() == Opcodes::Op_CastPP)) {
3047 if (visited.test_set(n->_idx)) {
3048 assert(n->is_Phi(), "loops only through Phi's");
3049 continue; // already processed
3050 }
3051 JavaObjectNode* jobj = unique_java_object(n);
3052 if (jobj == NULL || jobj == phantom_obj) {
3053 #ifdef ASSERT
3054 ptnode_adr(n->_idx)->dump();
3055 assert(jobj != NULL && jobj != phantom_obj, "escaped allocation");
3056 #endif
3057 _compile->record_failure(C2Compiler::retry_no_escape_analysis());
3058 return;
3059 } else {
3060 Node *val = get_map(jobj->idx()); // CheckCastPP node
3061 TypeNode *tn = n->as_Type();
3062 const TypeOopPtr* tinst = igvn->type(val)->isa_oopptr();
3063 assert(tinst != NULL && tinst->is_known_instance() &&
3064 tinst->instance_id() == jobj->idx() , "instance type expected.");
3065
3066 const Type *tn_type = igvn->type(tn);
3096 // push allocation's users on appropriate worklist
3097 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
3098 Node *use = n->fast_out(i);
3099 if(use->is_Mem() && use->in(MemNode::Address) == n) {
3100 // Load/store to instance's field
3101 memnode_worklist.append_if_missing(use);
3102 } else if (use->is_MemBar()) {
3103 if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge
3104 memnode_worklist.append_if_missing(use);
3105 }
3106 } else if (use->is_AddP() && use->outcnt() > 0) { // No dead nodes
3107 Node* addp2 = find_second_addp(use, n);
3108 if (addp2 != NULL) {
3109 alloc_worklist.append_if_missing(addp2);
3110 }
3111 alloc_worklist.append_if_missing(use);
3112 } else if (use->is_Phi() ||
3113 use->is_CheckCastPP() ||
3114 use->is_EncodeNarrowPtr() ||
3115 use->is_DecodeNarrowPtr() ||
3116 (use->is_ConstraintCast() && use->Opcode() == Opcodes::Op_CastPP)) {
3117 alloc_worklist.append_if_missing(use);
3118 #ifdef ASSERT
3119 } else if (use->is_Mem()) {
3120 assert(use->in(MemNode::Address) != n, "EA: missing allocation reference path");
3121 } else if (use->is_MergeMem()) {
3122 assert(_mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
3123 } else if (use->is_SafePoint()) {
3124 // Look for MergeMem nodes for calls which reference unique allocation
3125 // (through CheckCastPP nodes) even for debug info.
3126 Node* m = use->in(TypeFunc::Memory);
3127 if (m->is_MergeMem()) {
3128 assert(_mergemem_worklist.contains(m->as_MergeMem()), "EA: missing MergeMem node in the worklist");
3129 }
3130 } else if (use->Opcode() == Opcodes::Op_EncodeISOArray) {
3131 if (use->in(MemNode::Memory) == n || use->in(3) == n) {
3132 // EncodeISOArray overwrites destination array
3133 memnode_worklist.append_if_missing(use);
3134 }
3135 } else {
3136 Opcodes op = use->Opcode();
3137 if ((use->in(MemNode::Memory) == n) &&
3138 (op == Opcodes::Op_StrCompressedCopy || op == Opcodes::Op_StrInflatedCopy)) {
3139 // They overwrite memory edge corresponding to destination array,
3140 memnode_worklist.append_if_missing(use);
3141 } else if (!(op == Opcodes::Op_CmpP || op == Opcodes::Op_Conv2B ||
3142 op == Opcodes::Op_CastP2X || op == Opcodes::Op_StoreCM ||
3143 op == Opcodes::Op_FastLock || op == Opcodes::Op_AryEq || op == Opcodes::Op_StrComp || op == Opcodes::Op_HasNegatives ||
3144 op == Opcodes::Op_StrCompressedCopy || op == Opcodes::Op_StrInflatedCopy ||
3145 op == Opcodes::Op_StrEquals || op == Opcodes::Op_StrIndexOf || op == Opcodes::Op_StrIndexOfChar)) {
3146 n->dump();
3147 use->dump();
3148 assert(false, "EA: missing allocation reference path");
3149 }
3150 #endif
3151 }
3152 }
3153
3154 }
3155
3156 // Go over all ArrayCopy nodes and if one of the inputs has a unique
3157 // type, record it in the ArrayCopy node so we know what memory this
3158 // node uses/modified.
3159 for (int next = 0; next < arraycopy_worklist.length(); next++) {
3160 ArrayCopyNode* ac = arraycopy_worklist.at(next);
3161 Node* dest = ac->in(ArrayCopyNode::Dest);
3162 if (dest->is_AddP()) {
3163 dest = get_addp_base(dest);
3164 }
3165 JavaObjectNode* jobj = unique_java_object(dest);
3187 // New alias types were created in split_AddP().
3188 uint new_index_end = (uint) _compile->num_alias_types();
3189 assert(unique_old == _compile->unique(), "there should be no new ideal nodes after Phase 1");
3190
3191 // Phase 2: Process MemNode's from memnode_worklist. compute new address type and
3192 // compute new values for Memory inputs (the Memory inputs are not
3193 // actually updated until phase 4.)
3194 if (memnode_worklist.length() == 0)
3195 return; // nothing to do
3196 while (memnode_worklist.length() != 0) {
3197 Node *n = memnode_worklist.pop();
3198 if (visited.test_set(n->_idx))
3199 continue;
3200 if (n->is_Phi() || n->is_ClearArray()) {
3201 // we don't need to do anything, but the users must be pushed
3202 } else if (n->is_MemBar()) { // Initialize, MemBar nodes
3203 // we don't need to do anything, but the users must be pushed
3204 n = n->as_MemBar()->proj_out(TypeFunc::Memory);
3205 if (n == NULL)
3206 continue;
3207 } else if (n->Opcode() == Opcodes::Op_StrCompressedCopy ||
3208 n->Opcode() == Opcodes::Op_EncodeISOArray) {
3209 // get the memory projection
3210 n = n->find_out_with(Opcodes::Op_SCMemProj);
3211 assert(n->Opcode() == Opcodes::Op_SCMemProj, "memory projection required");
3212 } else {
3213 assert(n->is_Mem(), "memory node required.");
3214 Node *addr = n->in(MemNode::Address);
3215 const Type *addr_t = igvn->type(addr);
3216 if (addr_t == Type::TOP)
3217 continue;
3218 assert (addr_t->isa_ptr() != NULL, "pointer type required.");
3219 int alias_idx = _compile->get_alias_index(addr_t->is_ptr());
3220 assert ((uint)alias_idx < new_index_end, "wrong alias index");
3221 Node *mem = find_inst_mem(n->in(MemNode::Memory), alias_idx, orig_phis);
3222 if (_compile->failing()) {
3223 return;
3224 }
3225 if (mem != n->in(MemNode::Memory)) {
3226 // We delay the memory edge update since we need old one in
3227 // MergeMem code below when instances memory slices are separated.
3228 set_map(n, mem);
3229 }
3230 if (n->is_Load()) {
3231 continue; // don't push users
3232 } else if (n->is_LoadStore()) {
3233 // get the memory projection
3234 n = n->find_out_with(Opcodes::Op_SCMemProj);
3235 assert(n->Opcode() == Opcodes::Op_SCMemProj, "memory projection required");
3236 }
3237 }
3238 // push user on appropriate worklist
3239 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
3240 Node *use = n->fast_out(i);
3241 if (use->is_Phi() || use->is_ClearArray()) {
3242 memnode_worklist.append_if_missing(use);
3243 } else if (use->is_Mem() && use->in(MemNode::Memory) == n) {
3244 if (use->Opcode() == Opcodes::Op_StoreCM) // Ignore cardmark stores
3245 continue;
3246 memnode_worklist.append_if_missing(use);
3247 } else if (use->is_MemBar()) {
3248 if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge
3249 memnode_worklist.append_if_missing(use);
3250 }
3251 #ifdef ASSERT
3252 } else if(use->is_Mem()) {
3253 assert(use->in(MemNode::Memory) != n, "EA: missing memory path");
3254 } else if (use->is_MergeMem()) {
3255 assert(_mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
3256 } else if (use->Opcode() == Opcodes::Op_EncodeISOArray) {
3257 if (use->in(MemNode::Memory) == n || use->in(3) == n) {
3258 // EncodeISOArray overwrites destination array
3259 memnode_worklist.append_if_missing(use);
3260 }
3261 } else {
3262 Opcodes op = use->Opcode();
3263 if ((use->in(MemNode::Memory) == n) &&
3264 (op == Opcodes::Op_StrCompressedCopy || op == Opcodes::Op_StrInflatedCopy)) {
3265 // They overwrite memory edge corresponding to destination array,
3266 memnode_worklist.append_if_missing(use);
3267 } else if (!(op == Opcodes::Op_StoreCM ||
3268 (op == Opcodes::Op_CallLeaf && use->as_CallLeaf()->_name != NULL &&
3269 strcmp(use->as_CallLeaf()->_name, "g1_wb_pre") == 0) ||
3270 op == Opcodes::Op_AryEq || op == Opcodes::Op_StrComp || op == Opcodes::Op_HasNegatives ||
3271 op == Opcodes::Op_StrCompressedCopy || op == Opcodes::Op_StrInflatedCopy ||
3272 op == Opcodes::Op_StrEquals || op == Opcodes::Op_StrIndexOf || op == Opcodes::Op_StrIndexOfChar)) {
3273 n->dump();
3274 use->dump();
3275 assert(false, "EA: missing memory path");
3276 }
3277 #endif
3278 }
3279 }
3280 }
3281
3282 // Phase 3: Process MergeMem nodes from mergemem_worklist.
3283 // Walk each memory slice moving the first node encountered of each
3284 // instance type to the the input corresponding to its alias index.
3285 uint length = _mergemem_worklist.length();
3286 for( uint next = 0; next < length; ++next ) {
3287 MergeMemNode* nmm = _mergemem_worklist.at(next);
3288 assert(!visited.test_set(nmm->_idx), "should not be visited before");
3289 // Note: we don't want to use MergeMemStream here because we only want to
3290 // scan inputs which exist at the start, not ones we add during processing.
3291 // Note 2: MergeMem may already contains instance memory slices added
3292 // during find_inst_mem() call when memory nodes were processed above.
|