< prev index next >

src/share/vm/opto/escape.cpp

Print this page




 353       // Put Lock and Unlock nodes on IGVN worklist to process them during
 354       // first IGVN optimization when escape information is still available.
 355       record_for_optimizer(n);
 356     } else if (n->is_Allocate()) {
 357       add_call_node(n->as_Call());
 358       record_for_optimizer(n);
 359     } else {
 360       if (n->is_CallStaticJava()) {
 361         const char* name = n->as_CallStaticJava()->_name;
 362         if (name != NULL && strcmp(name, "uncommon_trap") == 0)
 363           return; // Skip uncommon traps
 364       }
 365       // Don't mark as processed since call's arguments have to be processed.
 366       delayed_worklist->push(n);
 367       // Check if a call returns an object.
 368       if ((n->as_Call()->returns_pointer() &&
 369            n->as_Call()->proj_out(TypeFunc::Parms) != NULL) ||
 370           (n->is_CallStaticJava() &&
 371            n->as_CallStaticJava()->is_boxing_method())) {
 372         add_call_node(n->as_Call());











 373       }
 374     }
 375     return;
 376   }
 377   // Put this check here to process call arguments since some call nodes
 378   // point to phantom_obj.
 379   if (n_ptn == phantom_obj || n_ptn == null_obj)
 380     return; // Skip predefined nodes.
 381 
 382   int opcode = n->Opcode();
 383   switch (opcode) {
 384     case Op_AddP: {
 385       Node* base = get_addp_base(n);
 386       PointsToNode* ptn_base = ptnode_adr(base->_idx);
 387       // Field nodes are created for all field types. They are used in
 388       // adjust_scalar_replaceable_state() and split_unique_types().
 389       // Note, non-oop fields will have only base edges in Connection
 390       // Graph because such fields are not used for oop loads and stores.
 391       int offset = address_offset(n, igvn);
 392       add_field(n, PointsToNode::NoEscape, offset);


 457     case Op_PartialSubtypeCheck: {
 458       // Produces Null or notNull and is used in only in CmpP so
 459       // phantom_obj could be used.
 460       map_ideal_node(n, phantom_obj); // Result is unknown
 461       break;
 462     }
 463     case Op_Phi: {
 464       // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
 465       // ThreadLocal has RawPtr type.
 466       const Type* t = n->as_Phi()->type();
 467       if (t->make_ptr() != NULL) {
 468         add_local_var(n, PointsToNode::NoEscape);
 469         // Do not add edges during first iteration because some could be
 470         // not defined yet.
 471         delayed_worklist->push(n);
 472       }
 473       break;
 474     }
 475     case Op_Proj: {
 476       // we are only interested in the oop result projection from a call
 477       if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() &&
 478           n->in(0)->as_Call()->returns_pointer()) {


 479         add_local_var_and_edge(n, PointsToNode::NoEscape,
 480                                n->in(0), delayed_worklist);
 481       }
 482       break;
 483     }
 484     case Op_Rethrow: // Exception object escapes
 485     case Op_Return: {
 486       if (n->req() > TypeFunc::Parms &&
 487           igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) {
 488         // Treat Return value as LocalVar with GlobalEscape escape state.
 489         add_local_var_and_edge(n, PointsToNode::GlobalEscape,
 490                                n->in(TypeFunc::Parms), delayed_worklist);
 491       }
 492       break;
 493     }
 494     case Op_CompareAndExchangeP:
 495     case Op_CompareAndExchangeN:
 496     case Op_GetAndSetP:
 497     case Op_GetAndSetN: {
 498       add_objload_to_connection_graph(n, delayed_worklist);


 664       // ThreadLocal has RawPtr type.
 665       const Type* t = n->as_Phi()->type();
 666       if (t->make_ptr() != NULL) {
 667         for (uint i = 1; i < n->req(); i++) {
 668           Node* in = n->in(i);
 669           if (in == NULL)
 670             continue;  // ignore NULL
 671           Node* uncast_in = in->uncast();
 672           if (uncast_in->is_top() || uncast_in == n)
 673             continue;  // ignore top or inputs which go back this node
 674           PointsToNode* ptn = ptnode_adr(in->_idx);
 675           assert(ptn != NULL, "node should be registered");
 676           add_edge(n_ptn, ptn);
 677         }
 678         break;
 679       }
 680       ELSE_FAIL("Op_Phi");
 681     }
 682     case Op_Proj: {
 683       // we are only interested in the oop result projection from a call
 684       if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() &&
 685           n->in(0)->as_Call()->returns_pointer()) {


 686         add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), NULL);
 687         break;
 688       }
 689       ELSE_FAIL("Op_Proj");
 690     }
 691     case Op_Rethrow: // Exception object escapes
 692     case Op_Return: {
 693       if (n->req() > TypeFunc::Parms &&
 694           _igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) {
 695         // Treat Return value as LocalVar with GlobalEscape escape state.
 696         add_local_var_and_edge(n, PointsToNode::GlobalEscape,
 697                                n->in(TypeFunc::Parms), NULL);
 698         break;
 699       }
 700       ELSE_FAIL("Op_Return");
 701     }
 702     case Op_StoreP:
 703     case Op_StoreN:
 704     case Op_StoreNKlass:
 705     case Op_StorePConditional:


 780           PointsToNode* ptn = ptnode_adr(adr->_idx);
 781           assert(ptn != NULL, "node should be registered");
 782           add_edge(n_ptn, ptn);
 783         }
 784       }
 785       break;
 786     }
 787     default: {
 788       // This method should be called only for EA specific nodes which may
 789       // miss some edges when they were created.
 790 #ifdef ASSERT
 791       n->dump(1);
 792 #endif
 793       guarantee(false, "unknown node");
 794     }
 795   }
 796   return;
 797 }
 798 
 799 void ConnectionGraph::add_call_node(CallNode* call) {
 800   assert(call->returns_pointer(), "only for call which returns pointer");
 801   uint call_idx = call->_idx;
 802   if (call->is_Allocate()) {
 803     Node* k = call->in(AllocateNode::KlassNode);
 804     const TypeKlassPtr* kt = k->bottom_type()->isa_klassptr();
 805     assert(kt != NULL, "TypeKlassPtr  required.");
 806     ciKlass* cik = kt->klass();
 807     PointsToNode::EscapeState es = PointsToNode::NoEscape;
 808     bool scalar_replaceable = true;
 809     if (call->is_AllocateArray()) {
 810       if (!cik->is_array_klass()) { // StressReflectiveCode
 811         es = PointsToNode::GlobalEscape;
 812       } else {
 813         int length = call->in(AllocateNode::ALength)->find_int_con(-1);
 814         if (length < 0 || length > EliminateAllocationArraySizeLimit) {
 815           // Not scalar replaceable if the length is not constant or too big.
 816           scalar_replaceable = false;
 817         }
 818       }
 819     } else {  // Allocate instance
 820       if (cik->is_subclass_of(_compile->env()->Thread_klass()) ||




 353       // Put Lock and Unlock nodes on IGVN worklist to process them during
 354       // first IGVN optimization when escape information is still available.
 355       record_for_optimizer(n);
 356     } else if (n->is_Allocate()) {
 357       add_call_node(n->as_Call());
 358       record_for_optimizer(n);
 359     } else {
 360       if (n->is_CallStaticJava()) {
 361         const char* name = n->as_CallStaticJava()->_name;
 362         if (name != NULL && strcmp(name, "uncommon_trap") == 0)
 363           return; // Skip uncommon traps
 364       }
 365       // Don't mark as processed since call's arguments have to be processed.
 366       delayed_worklist->push(n);
 367       // Check if a call returns an object.
 368       if ((n->as_Call()->returns_pointer() &&
 369            n->as_Call()->proj_out(TypeFunc::Parms) != NULL) ||
 370           (n->is_CallStaticJava() &&
 371            n->as_CallStaticJava()->is_boxing_method())) {
 372         add_call_node(n->as_Call());
 373       } else if (n->as_Call()->tf()->returns_value_type_as_fields()) {
 374         bool returns_oop = false;
 375         for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax && !returns_oop; i++) {
 376           ProjNode* pn = n->fast_out(i)->as_Proj();
 377           if (pn->_con >= TypeFunc::Parms && pn->bottom_type()->isa_oopptr()) {
 378             returns_oop = true;
 379           }
 380         }
 381         if (returns_oop) {
 382           add_call_node(n->as_Call());
 383         }
 384       }
 385     }
 386     return;
 387   }
 388   // Put this check here to process call arguments since some call nodes
 389   // point to phantom_obj.
 390   if (n_ptn == phantom_obj || n_ptn == null_obj)
 391     return; // Skip predefined nodes.
 392 
 393   int opcode = n->Opcode();
 394   switch (opcode) {
 395     case Op_AddP: {
 396       Node* base = get_addp_base(n);
 397       PointsToNode* ptn_base = ptnode_adr(base->_idx);
 398       // Field nodes are created for all field types. They are used in
 399       // adjust_scalar_replaceable_state() and split_unique_types().
 400       // Note, non-oop fields will have only base edges in Connection
 401       // Graph because such fields are not used for oop loads and stores.
 402       int offset = address_offset(n, igvn);
 403       add_field(n, PointsToNode::NoEscape, offset);


 468     case Op_PartialSubtypeCheck: {
 469       // Produces Null or notNull and is used in only in CmpP so
 470       // phantom_obj could be used.
 471       map_ideal_node(n, phantom_obj); // Result is unknown
 472       break;
 473     }
 474     case Op_Phi: {
 475       // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
 476       // ThreadLocal has RawPtr type.
 477       const Type* t = n->as_Phi()->type();
 478       if (t->make_ptr() != NULL) {
 479         add_local_var(n, PointsToNode::NoEscape);
 480         // Do not add edges during first iteration because some could be
 481         // not defined yet.
 482         delayed_worklist->push(n);
 483       }
 484       break;
 485     }
 486     case Op_Proj: {
 487       // we are only interested in the oop result projection from a call
 488       if (n->as_Proj()->_con >= TypeFunc::Parms && n->in(0)->is_Call() &&
 489           (n->in(0)->as_Call()->returns_pointer() || n->bottom_type()->isa_oopptr())) {
 490         assert((n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->as_Call()->returns_pointer()) ||
 491                n->in(0)->as_Call()->tf()->returns_value_type_as_fields(), "what kind of oop return is it?");
 492         add_local_var_and_edge(n, PointsToNode::NoEscape,
 493                                n->in(0), delayed_worklist);
 494       }
 495       break;
 496     }
 497     case Op_Rethrow: // Exception object escapes
 498     case Op_Return: {
 499       if (n->req() > TypeFunc::Parms &&
 500           igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) {
 501         // Treat Return value as LocalVar with GlobalEscape escape state.
 502         add_local_var_and_edge(n, PointsToNode::GlobalEscape,
 503                                n->in(TypeFunc::Parms), delayed_worklist);
 504       }
 505       break;
 506     }
 507     case Op_CompareAndExchangeP:
 508     case Op_CompareAndExchangeN:
 509     case Op_GetAndSetP:
 510     case Op_GetAndSetN: {
 511       add_objload_to_connection_graph(n, delayed_worklist);


 677       // ThreadLocal has RawPtr type.
 678       const Type* t = n->as_Phi()->type();
 679       if (t->make_ptr() != NULL) {
 680         for (uint i = 1; i < n->req(); i++) {
 681           Node* in = n->in(i);
 682           if (in == NULL)
 683             continue;  // ignore NULL
 684           Node* uncast_in = in->uncast();
 685           if (uncast_in->is_top() || uncast_in == n)
 686             continue;  // ignore top or inputs which go back this node
 687           PointsToNode* ptn = ptnode_adr(in->_idx);
 688           assert(ptn != NULL, "node should be registered");
 689           add_edge(n_ptn, ptn);
 690         }
 691         break;
 692       }
 693       ELSE_FAIL("Op_Phi");
 694     }
 695     case Op_Proj: {
 696       // we are only interested in the oop result projection from a call
 697       if (n->as_Proj()->_con >= TypeFunc::Parms && n->in(0)->is_Call() &&
 698           (n->in(0)->as_Call()->returns_pointer()|| n->bottom_type()->isa_oopptr())) {
 699         assert((n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->as_Call()->returns_pointer()) ||
 700                n->in(0)->as_Call()->tf()->returns_value_type_as_fields(), "what kind of oop return is it?");
 701         add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), NULL);
 702         break;
 703       }
 704       ELSE_FAIL("Op_Proj");
 705     }
 706     case Op_Rethrow: // Exception object escapes
 707     case Op_Return: {
 708       if (n->req() > TypeFunc::Parms &&
 709           _igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) {
 710         // Treat Return value as LocalVar with GlobalEscape escape state.
 711         add_local_var_and_edge(n, PointsToNode::GlobalEscape,
 712                                n->in(TypeFunc::Parms), NULL);
 713         break;
 714       }
 715       ELSE_FAIL("Op_Return");
 716     }
 717     case Op_StoreP:
 718     case Op_StoreN:
 719     case Op_StoreNKlass:
 720     case Op_StorePConditional:


 795           PointsToNode* ptn = ptnode_adr(adr->_idx);
 796           assert(ptn != NULL, "node should be registered");
 797           add_edge(n_ptn, ptn);
 798         }
 799       }
 800       break;
 801     }
 802     default: {
 803       // This method should be called only for EA specific nodes which may
 804       // miss some edges when they were created.
 805 #ifdef ASSERT
 806       n->dump(1);
 807 #endif
 808       guarantee(false, "unknown node");
 809     }
 810   }
 811   return;
 812 }
 813 
 814 void ConnectionGraph::add_call_node(CallNode* call) {
 815   assert(call->returns_pointer() || call->tf()->returns_value_type_as_fields(), "only for call which returns pointer");
 816   uint call_idx = call->_idx;
 817   if (call->is_Allocate()) {
 818     Node* k = call->in(AllocateNode::KlassNode);
 819     const TypeKlassPtr* kt = k->bottom_type()->isa_klassptr();
 820     assert(kt != NULL, "TypeKlassPtr  required.");
 821     ciKlass* cik = kt->klass();
 822     PointsToNode::EscapeState es = PointsToNode::NoEscape;
 823     bool scalar_replaceable = true;
 824     if (call->is_AllocateArray()) {
 825       if (!cik->is_array_klass()) { // StressReflectiveCode
 826         es = PointsToNode::GlobalEscape;
 827       } else {
 828         int length = call->in(AllocateNode::ALength)->find_int_con(-1);
 829         if (length < 0 || length > EliminateAllocationArraySizeLimit) {
 830           // Not scalar replaceable if the length is not constant or too big.
 831           scalar_replaceable = false;
 832         }
 833       }
 834     } else {  // Allocate instance
 835       if (cik->is_subclass_of(_compile->env()->Thread_klass()) ||


< prev index next >