< prev index next >

src/share/vm/opto/escape.cpp

Print this page

        

*** 158,176 **** if (n->is_MergeMem()) { // Collect all MergeMem nodes to add memory slices for // scalar replaceable objects in split_unique_types(). _mergemem_worklist.append(n->as_MergeMem()); } else if (OptimizePtrCompare && n->is_Cmp() && ! (n->Opcode() == Op_CmpP || n->Opcode() == Op_CmpN)) { // Collect compare pointers nodes. ptr_cmp_worklist.append(n); } else if (n->is_MemBarStoreStore()) { // Collect all MemBarStoreStore nodes so that depending on the // escape status of the associated Allocate node some of them // may be eliminated. storestore_worklist.append(n); ! } else if (n->is_MemBar() && (n->Opcode() == Op_MemBarRelease) && (n->req() > MemBarNode::Precedent)) { record_for_optimizer(n); #ifdef ASSERT } else if (n->is_AddP()) { // Collect address nodes for graph verification. --- 158,176 ---- if (n->is_MergeMem()) { // Collect all MergeMem nodes to add memory slices for // scalar replaceable objects in split_unique_types(). _mergemem_worklist.append(n->as_MergeMem()); } else if (OptimizePtrCompare && n->is_Cmp() && ! (n->Opcode() == Opcodes::Op_CmpP || n->Opcode() == Opcodes::Op_CmpN)) { // Collect compare pointers nodes. ptr_cmp_worklist.append(n); } else if (n->is_MemBarStoreStore()) { // Collect all MemBarStoreStore nodes so that depending on the // escape status of the associated Allocate node some of them // may be eliminated. storestore_worklist.append(n); ! } else if (n->is_MemBar() && (n->Opcode() == Opcodes::Op_MemBarRelease) && (n->req() > MemBarNode::Precedent)) { record_for_optimizer(n); #ifdef ASSERT } else if (n->is_AddP()) { // Collect address nodes for graph verification.
*** 377,389 **** // Put this check here to process call arguments since some call nodes // point to phantom_obj. if (n_ptn == phantom_obj || n_ptn == null_obj) return; // Skip predefined nodes. ! int opcode = n->Opcode(); switch (opcode) { ! case Op_AddP: { Node* base = get_addp_base(n); PointsToNode* ptn_base = ptnode_adr(base->_idx); // Field nodes are created for all field types. They are used in // adjust_scalar_replaceable_state() and split_unique_types(). // Note, non-oop fields will have only base edges in Connection --- 377,389 ---- // Put this check here to process call arguments since some call nodes // point to phantom_obj. if (n_ptn == phantom_obj || n_ptn == null_obj) return; // Skip predefined nodes. ! Opcodes opcode = n->Opcode(); switch (opcode) { ! case Opcodes::Op_AddP: { Node* base = get_addp_base(n); PointsToNode* ptn_base = ptnode_adr(base->_idx); // Field nodes are created for all field types. They are used in // adjust_scalar_replaceable_state() and split_unique_types(). // Note, non-oop fields will have only base edges in Connection
*** 396,429 **** n_ptn = ptnode_adr(n_idx); add_base(n_ptn->as_Field(), ptn_base); } break; } ! case Op_CastX2P: { map_ideal_node(n, phantom_obj); break; } ! case Op_CastPP: ! case Op_CheckCastPP: ! case Op_EncodeP: ! case Op_DecodeN: ! case Op_EncodePKlass: ! case Op_DecodeNKlass: { add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), delayed_worklist); break; } ! case Op_CMoveP: { add_local_var(n, PointsToNode::NoEscape); // Do not add edges during first iteration because some could be // not defined yet. delayed_worklist->push(n); break; } ! case Op_ConP: ! case Op_ConN: ! case Op_ConNKlass: { // assume all oop constants globally escape except for null PointsToNode::EscapeState es; const Type* t = igvn->type(n); if (t == TypePtr::NULL_PTR || t == TypeNarrowOop::NULL_PTR) { es = PointsToNode::NoEscape; --- 396,429 ---- n_ptn = ptnode_adr(n_idx); add_base(n_ptn->as_Field(), ptn_base); } break; } ! case Opcodes::Op_CastX2P: { map_ideal_node(n, phantom_obj); break; } ! case Opcodes::Op_CastPP: ! case Opcodes::Op_CheckCastPP: ! case Opcodes::Op_EncodeP: ! case Opcodes::Op_DecodeN: ! case Opcodes::Op_EncodePKlass: ! case Opcodes::Op_DecodeNKlass: { add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), delayed_worklist); break; } ! case Opcodes::Op_CMoveP: { add_local_var(n, PointsToNode::NoEscape); // Do not add edges during first iteration because some could be // not defined yet. delayed_worklist->push(n); break; } ! case Opcodes::Op_ConP: ! case Opcodes::Op_ConN: ! case Opcodes::Op_ConNKlass: { // assume all oop constants globally escape except for null PointsToNode::EscapeState es; const Type* t = igvn->type(n); if (t == TypePtr::NULL_PTR || t == TypeNarrowOop::NULL_PTR) { es = PointsToNode::NoEscape;
*** 431,468 **** es = PointsToNode::GlobalEscape; } add_java_object(n, es); break; } ! case Op_CreateEx: { // assume that all exception objects globally escape map_ideal_node(n, phantom_obj); break; } ! case Op_LoadKlass: ! case Op_LoadNKlass: { // Unknown class is loaded map_ideal_node(n, phantom_obj); break; } ! case Op_LoadP: ! case Op_LoadN: ! case Op_LoadPLocked: { add_objload_to_connection_graph(n, delayed_worklist); break; } ! case Op_Parm: { map_ideal_node(n, phantom_obj); break; } ! case Op_PartialSubtypeCheck: { // Produces Null or notNull and is used in only in CmpP so // phantom_obj could be used. map_ideal_node(n, phantom_obj); // Result is unknown break; } ! case Op_Phi: { // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because // ThreadLocal has RawPtr type. const Type* t = n->as_Phi()->type(); if (t->make_ptr() != NULL) { add_local_var(n, PointsToNode::NoEscape); --- 431,468 ---- es = PointsToNode::GlobalEscape; } add_java_object(n, es); break; } ! case Opcodes::Op_CreateEx: { // assume that all exception objects globally escape map_ideal_node(n, phantom_obj); break; } ! case Opcodes::Op_LoadKlass: ! case Opcodes::Op_LoadNKlass: { // Unknown class is loaded map_ideal_node(n, phantom_obj); break; } ! case Opcodes::Op_LoadP: ! case Opcodes::Op_LoadN: ! case Opcodes::Op_LoadPLocked: { add_objload_to_connection_graph(n, delayed_worklist); break; } ! case Opcodes::Op_Parm: { map_ideal_node(n, phantom_obj); break; } ! case Opcodes::Op_PartialSubtypeCheck: { // Produces Null or notNull and is used in only in CmpP so // phantom_obj could be used. map_ideal_node(n, phantom_obj); // Result is unknown break; } ! case Opcodes::Op_Phi: { // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because // ThreadLocal has RawPtr type. const Type* t = n->as_Phi()->type(); if (t->make_ptr() != NULL) { add_local_var(n, PointsToNode::NoEscape);
*** 470,521 **** // not defined yet. delayed_worklist->push(n); } break; } ! case Op_Proj: { // we are only interested in the oop result projection from a call if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() && n->in(0)->as_Call()->returns_pointer()) { add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), delayed_worklist); } break; } ! case Op_Rethrow: // Exception object escapes ! case Op_Return: { if (n->req() > TypeFunc::Parms && igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) { // Treat Return value as LocalVar with GlobalEscape escape state. add_local_var_and_edge(n, PointsToNode::GlobalEscape, n->in(TypeFunc::Parms), delayed_worklist); } break; } ! case Op_CompareAndExchangeP: ! case Op_CompareAndExchangeN: ! case Op_GetAndSetP: ! case Op_GetAndSetN: { add_objload_to_connection_graph(n, delayed_worklist); // fallthrough } ! case Op_StoreP: ! case Op_StoreN: ! case Op_StoreNKlass: ! case Op_StorePConditional: ! case Op_WeakCompareAndSwapP: ! case Op_WeakCompareAndSwapN: ! case Op_CompareAndSwapP: ! case Op_CompareAndSwapN: { Node* adr = n->in(MemNode::Address); const Type *adr_type = igvn->type(adr); adr_type = adr_type->make_ptr(); if (adr_type == NULL) { break; // skip dead nodes } if (adr_type->isa_oopptr() || ! (opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass) && (adr_type == TypeRawPtr::NOTNULL && adr->in(AddPNode::Address)->is_Proj() && adr->in(AddPNode::Address)->in(0)->is_Allocate())) { delayed_worklist->push(n); // Process it later. #ifdef ASSERT --- 470,521 ---- // not defined yet. delayed_worklist->push(n); } break; } ! case Opcodes::Op_Proj: { // we are only interested in the oop result projection from a call if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() && n->in(0)->as_Call()->returns_pointer()) { add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), delayed_worklist); } break; } ! case Opcodes::Op_Rethrow: // Exception object escapes ! case Opcodes::Op_Return: { if (n->req() > TypeFunc::Parms && igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) { // Treat Return value as LocalVar with GlobalEscape escape state. add_local_var_and_edge(n, PointsToNode::GlobalEscape, n->in(TypeFunc::Parms), delayed_worklist); } break; } ! case Opcodes::Op_CompareAndExchangeP: ! case Opcodes::Op_CompareAndExchangeN: ! case Opcodes::Op_GetAndSetP: ! case Opcodes::Op_GetAndSetN: { add_objload_to_connection_graph(n, delayed_worklist); // fallthrough } ! case Opcodes::Op_StoreP: ! case Opcodes::Op_StoreN: ! case Opcodes::Op_StoreNKlass: ! case Opcodes::Op_StorePConditional: ! case Opcodes::Op_WeakCompareAndSwapP: ! case Opcodes::Op_WeakCompareAndSwapN: ! case Opcodes::Op_CompareAndSwapP: ! case Opcodes::Op_CompareAndSwapN: { Node* adr = n->in(MemNode::Address); const Type *adr_type = igvn->type(adr); adr_type = adr_type->make_ptr(); if (adr_type == NULL) { break; // skip dead nodes } if (adr_type->isa_oopptr() || ! (opcode == Opcodes::Op_StoreP || opcode == Opcodes::Op_StoreN || opcode == Opcodes::Op_StoreNKlass) && (adr_type == TypeRawPtr::NOTNULL && adr->in(AddPNode::Address)->is_Proj() && adr->in(AddPNode::Address)->in(0)->is_Allocate())) { delayed_worklist->push(n); // Process it later. #ifdef ASSERT
*** 529,549 **** } else { // Ignore copy the displaced header to the BoxNode (OSR compilation). if (adr->is_BoxLock()) break; // Stored value escapes in unsafe access. ! if ((opcode == Op_StoreP) && adr_type->isa_rawptr()) { // Pointer stores in G1 barriers looks like unsafe access. // Ignore such stores to be able scalar replace non-escaping // allocations. if (UseG1GC && adr->is_AddP()) { Node* base = get_addp_base(adr); ! if (base->Opcode() == Op_LoadP && base->in(MemNode::Address)->is_AddP()) { adr = base->in(MemNode::Address); Node* tls = get_addp_base(adr); ! if (tls->Opcode() == Op_ThreadLocal) { int offs = (int)igvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot); if (offs == in_bytes(JavaThread::satb_mark_queue_offset() + SATBMarkQueue::byte_offset_of_buf())) { break; // G1 pre barrier previous oop value store. } --- 529,549 ---- } else { // Ignore copy the displaced header to the BoxNode (OSR compilation). if (adr->is_BoxLock()) break; // Stored value escapes in unsafe access. ! if ((opcode == Opcodes::Op_StoreP) && adr_type->isa_rawptr()) { // Pointer stores in G1 barriers looks like unsafe access. // Ignore such stores to be able scalar replace non-escaping // allocations. if (UseG1GC && adr->is_AddP()) { Node* base = get_addp_base(adr); ! if (base->Opcode() == Opcodes::Op_LoadP && base->in(MemNode::Address)->is_AddP()) { adr = base->in(MemNode::Address); Node* tls = get_addp_base(adr); ! if (tls->Opcode() == Opcodes::Op_ThreadLocal) { int offs = (int)igvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot); if (offs == in_bytes(JavaThread::satb_mark_queue_offset() + SATBMarkQueue::byte_offset_of_buf())) { break; // G1 pre barrier previous oop value store. }
*** 562,585 **** assert(false, "not unsafe or G1 barrier raw StoreP"); #endif } break; } ! case Op_AryEq: ! case Op_HasNegatives: ! case Op_StrComp: ! case Op_StrEquals: ! case Op_StrIndexOf: ! case Op_StrIndexOfChar: ! case Op_StrInflatedCopy: ! case Op_StrCompressedCopy: ! case Op_EncodeISOArray: { add_local_var(n, PointsToNode::ArgEscape); delayed_worklist->push(n); // Process it later. break; } ! case Op_ThreadLocal: { add_java_object(n, PointsToNode::ArgEscape); break; } default: ; // Do nothing for nodes not related to EA. --- 562,585 ---- assert(false, "not unsafe or G1 barrier raw StoreP"); #endif } break; } ! case Opcodes::Op_AryEq: ! case Opcodes::Op_HasNegatives: ! case Opcodes::Op_StrComp: ! case Opcodes::Op_StrEquals: ! case Opcodes::Op_StrIndexOf: ! case Opcodes::Op_StrIndexOfChar: ! case Opcodes::Op_StrInflatedCopy: ! case Opcodes::Op_StrCompressedCopy: ! case Opcodes::Op_EncodeISOArray: { add_local_var(n, PointsToNode::ArgEscape); delayed_worklist->push(n); // Process it later. break; } ! case Opcodes::Op_ThreadLocal: { add_java_object(n, PointsToNode::ArgEscape); break; } default: ; // Do nothing for nodes not related to EA.
*** 611,640 **** return; } assert(n->is_Store() || n->is_LoadStore() || (n_ptn != NULL) && (n_ptn->ideal_node() != NULL), "node should be registered already"); ! int opcode = n->Opcode(); switch (opcode) { ! case Op_AddP: { Node* base = get_addp_base(n); PointsToNode* ptn_base = ptnode_adr(base->_idx); assert(ptn_base != NULL, "field's base should be registered"); add_base(n_ptn->as_Field(), ptn_base); break; } ! case Op_CastPP: ! case Op_CheckCastPP: ! case Op_EncodeP: ! case Op_DecodeN: ! case Op_EncodePKlass: ! case Op_DecodeNKlass: { add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), NULL); break; } ! case Op_CMoveP: { for (uint i = CMoveNode::IfFalse; i < n->req(); i++) { Node* in = n->in(i); if (in == NULL) continue; // ignore NULL Node* uncast_in = in->uncast(); --- 611,640 ---- return; } assert(n->is_Store() || n->is_LoadStore() || (n_ptn != NULL) && (n_ptn->ideal_node() != NULL), "node should be registered already"); ! Opcodes opcode = n->Opcode(); switch (opcode) { ! case Opcodes::Op_AddP: { Node* base = get_addp_base(n); PointsToNode* ptn_base = ptnode_adr(base->_idx); assert(ptn_base != NULL, "field's base should be registered"); add_base(n_ptn->as_Field(), ptn_base); break; } ! case Opcodes::Op_CastPP: ! case Opcodes::Op_CheckCastPP: ! case Opcodes::Op_EncodeP: ! case Opcodes::Op_DecodeN: ! case Opcodes::Op_EncodePKlass: ! case Opcodes::Op_DecodeNKlass: { add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), NULL); break; } ! case Opcodes::Op_CMoveP: { for (uint i = CMoveNode::IfFalse; i < n->req(); i++) { Node* in = n->in(i); if (in == NULL) continue; // ignore NULL Node* uncast_in = in->uncast();
*** 644,667 **** assert(ptn != NULL, "node should be registered"); add_edge(n_ptn, ptn); } break; } ! case Op_LoadP: ! case Op_LoadN: ! case Op_LoadPLocked: { // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because // ThreadLocal has RawPtr type. const Type* t = _igvn->type(n); if (t->make_ptr() != NULL) { Node* adr = n->in(MemNode::Address); add_local_var_and_edge(n, PointsToNode::NoEscape, adr, NULL); break; } ELSE_FAIL("Op_LoadP"); } ! case Op_Phi: { // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because // ThreadLocal has RawPtr type. const Type* t = n->as_Phi()->type(); if (t->make_ptr() != NULL) { for (uint i = 1; i < n->req(); i++) { --- 644,667 ---- assert(ptn != NULL, "node should be registered"); add_edge(n_ptn, ptn); } break; } ! case Opcodes::Op_LoadP: ! case Opcodes::Op_LoadN: ! case Opcodes::Op_LoadPLocked: { // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because // ThreadLocal has RawPtr type. const Type* t = _igvn->type(n); if (t->make_ptr() != NULL) { Node* adr = n->in(MemNode::Address); add_local_var_and_edge(n, PointsToNode::NoEscape, adr, NULL); break; } ELSE_FAIL("Op_LoadP"); } ! case Opcodes::Op_Phi: { // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because // ThreadLocal has RawPtr type. const Type* t = n->as_Phi()->type(); if (t->make_ptr() != NULL) { for (uint i = 1; i < n->req(); i++) {
*** 677,734 **** } break; } ELSE_FAIL("Op_Phi"); } ! case Op_Proj: { // we are only interested in the oop result projection from a call if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() && n->in(0)->as_Call()->returns_pointer()) { add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), NULL); break; } ELSE_FAIL("Op_Proj"); } ! case Op_Rethrow: // Exception object escapes ! case Op_Return: { if (n->req() > TypeFunc::Parms && _igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) { // Treat Return value as LocalVar with GlobalEscape escape state. add_local_var_and_edge(n, PointsToNode::GlobalEscape, n->in(TypeFunc::Parms), NULL); break; } ELSE_FAIL("Op_Return"); } ! case Op_StoreP: ! case Op_StoreN: ! case Op_StoreNKlass: ! case Op_StorePConditional: ! case Op_CompareAndExchangeP: ! case Op_CompareAndExchangeN: ! case Op_CompareAndSwapP: ! case Op_CompareAndSwapN: ! case Op_WeakCompareAndSwapP: ! case Op_WeakCompareAndSwapN: ! case Op_GetAndSetP: ! case Op_GetAndSetN: { Node* adr = n->in(MemNode::Address); const Type *adr_type = _igvn->type(adr); adr_type = adr_type->make_ptr(); #ifdef ASSERT if (adr_type == NULL) { n->dump(1); assert(adr_type != NULL, "dead node should not be on list"); break; } #endif ! if (opcode == Op_GetAndSetP || opcode == Op_GetAndSetN || ! opcode == Op_CompareAndExchangeN || opcode == Op_CompareAndExchangeP) { add_local_var_and_edge(n, PointsToNode::NoEscape, adr, NULL); } if (adr_type->isa_oopptr() || ! (opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass) && (adr_type == TypeRawPtr::NOTNULL && adr->in(AddPNode::Address)->is_Proj() && adr->in(AddPNode::Address)->in(0)->is_Allocate())) { // Point Address to Value PointsToNode* adr_ptn = ptnode_adr(adr->_idx); --- 677,734 ---- } break; } ELSE_FAIL("Op_Phi"); } ! case Opcodes::Op_Proj: { // we are only interested in the oop result projection from a call if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() && n->in(0)->as_Call()->returns_pointer()) { add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), NULL); break; } ELSE_FAIL("Op_Proj"); } ! case Opcodes::Op_Rethrow: // Exception object escapes ! case Opcodes::Op_Return: { if (n->req() > TypeFunc::Parms && _igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) { // Treat Return value as LocalVar with GlobalEscape escape state. add_local_var_and_edge(n, PointsToNode::GlobalEscape, n->in(TypeFunc::Parms), NULL); break; } ELSE_FAIL("Op_Return"); } ! case Opcodes::Op_StoreP: ! case Opcodes::Op_StoreN: ! case Opcodes::Op_StoreNKlass: ! case Opcodes::Op_StorePConditional: ! case Opcodes::Op_CompareAndExchangeP: ! case Opcodes::Op_CompareAndExchangeN: ! case Opcodes::Op_CompareAndSwapP: ! case Opcodes::Op_CompareAndSwapN: ! case Opcodes::Op_WeakCompareAndSwapP: ! case Opcodes::Op_WeakCompareAndSwapN: ! case Opcodes::Op_GetAndSetP: ! case Opcodes::Op_GetAndSetN: { Node* adr = n->in(MemNode::Address); const Type *adr_type = _igvn->type(adr); adr_type = adr_type->make_ptr(); #ifdef ASSERT if (adr_type == NULL) { n->dump(1); assert(adr_type != NULL, "dead node should not be on list"); break; } #endif ! if (opcode == Opcodes::Op_GetAndSetP || opcode == Opcodes::Op_GetAndSetN || ! opcode == Opcodes::Op_CompareAndExchangeN || opcode == Opcodes::Op_CompareAndExchangeP) { add_local_var_and_edge(n, PointsToNode::NoEscape, adr, NULL); } if (adr_type->isa_oopptr() || ! (opcode == Opcodes::Op_StoreP || opcode == Opcodes::Op_StoreN || opcode == Opcodes::Op_StoreNKlass) && (adr_type == TypeRawPtr::NOTNULL && adr->in(AddPNode::Address)->is_Proj() && adr->in(AddPNode::Address)->in(0)->is_Allocate())) { // Point Address to Value PointsToNode* adr_ptn = ptnode_adr(adr->_idx);
*** 737,747 **** Node *val = n->in(MemNode::ValueIn); PointsToNode* ptn = ptnode_adr(val->_idx); assert(ptn != NULL, "node should be registered"); add_edge(adr_ptn, ptn); break; ! } else if ((opcode == Op_StoreP) && adr_type->isa_rawptr()) { // Stored value escapes in unsafe access. Node *val = n->in(MemNode::ValueIn); PointsToNode* ptn = ptnode_adr(val->_idx); assert(ptn != NULL, "node should be registered"); set_escape_state(ptn, PointsToNode::GlobalEscape); --- 737,747 ---- Node *val = n->in(MemNode::ValueIn); PointsToNode* ptn = ptnode_adr(val->_idx); assert(ptn != NULL, "node should be registered"); add_edge(adr_ptn, ptn); break; ! } else if ((opcode == Opcodes::Op_StoreP) && adr_type->isa_rawptr()) { // Stored value escapes in unsafe access. Node *val = n->in(MemNode::ValueIn); PointsToNode* ptn = ptnode_adr(val->_idx); assert(ptn != NULL, "node should be registered"); set_escape_state(ptn, PointsToNode::GlobalEscape);
*** 754,772 **** } break; } ELSE_FAIL("Op_StoreP"); } ! case Op_AryEq: ! case Op_HasNegatives: ! case Op_StrComp: ! case Op_StrEquals: ! case Op_StrIndexOf: ! case Op_StrIndexOfChar: ! case Op_StrInflatedCopy: ! case Op_StrCompressedCopy: ! case Op_EncodeISOArray: { // char[]/byte[] arrays passed to string intrinsic do not escape but // they are not scalar replaceable. Adjust escape state for them. // Start from in(2) edge since in(1) is memory edge. for (uint i = 2; i < n->req(); i++) { Node* adr = n->in(i); --- 754,772 ---- } break; } ELSE_FAIL("Op_StoreP"); } ! case Opcodes::Op_AryEq: ! case Opcodes::Op_HasNegatives: ! case Opcodes::Op_StrComp: ! case Opcodes::Op_StrEquals: ! case Opcodes::Op_StrIndexOf: ! case Opcodes::Op_StrIndexOfChar: ! case Opcodes::Op_StrInflatedCopy: ! case Opcodes::Op_StrCompressedCopy: ! case Opcodes::Op_EncodeISOArray: { // char[]/byte[] arrays passed to string intrinsic do not escape but // they are not scalar replaceable. Adjust escape state for them. // Start from in(2) edge since in(1) is memory edge. for (uint i = 2; i < n->req(); i++) { Node* adr = n->in(i);
*** 902,936 **** } } } else { // An other type of call, assume the worst case: // returned value is unknown and globally escapes. ! assert(call->Opcode() == Op_CallDynamicJava, "add failed case check"); map_ideal_node(call, phantom_obj); } } void ConnectionGraph::process_call_arguments(CallNode *call) { bool is_arraycopy = false; switch (call->Opcode()) { #ifdef ASSERT ! case Op_Allocate: ! case Op_AllocateArray: ! case Op_Lock: ! case Op_Unlock: assert(false, "should be done already"); break; #endif ! case Op_ArrayCopy: ! case Op_CallLeafNoFP: // Most array copies are ArrayCopy nodes at this point but there // are still a few direct calls to the copy subroutines (See // PhaseStringOpts::copy_string()) ! is_arraycopy = (call->Opcode() == Op_ArrayCopy) || call->as_CallLeaf()->is_call_to_arraycopystub(); // fall through ! case Op_CallLeaf: { // Stub calls, objects do not escape but they are not scale replaceable. // Adjust escape state for outgoing arguments. const TypeTuple * d = call->tf()->domain(); bool src_has_oops = false; for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { --- 902,936 ---- } } } else { // An other type of call, assume the worst case: // returned value is unknown and globally escapes. ! assert(call->Opcode() == Opcodes::Op_CallDynamicJava, "add failed case check"); map_ideal_node(call, phantom_obj); } } void ConnectionGraph::process_call_arguments(CallNode *call) { bool is_arraycopy = false; switch (call->Opcode()) { #ifdef ASSERT ! case Opcodes::Op_Allocate: ! case Opcodes::Op_AllocateArray: ! case Opcodes::Op_Lock: ! case Opcodes::Op_Unlock: assert(false, "should be done already"); break; #endif ! case Opcodes::Op_ArrayCopy: ! case Opcodes::Op_CallLeafNoFP: // Most array copies are ArrayCopy nodes at this point but there // are still a few direct calls to the copy subroutines (See // PhaseStringOpts::copy_string()) ! is_arraycopy = (call->Opcode() == Opcodes::Op_ArrayCopy) || call->as_CallLeaf()->is_call_to_arraycopystub(); // fall through ! case Opcodes::Op_CallLeaf: { // Stub calls, objects do not escape but they are not scale replaceable. // Adjust escape state for outgoing arguments. const TypeTuple * d = call->tf()->domain(); bool src_has_oops = false; for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
*** 1042,1052 **** } } } break; } ! case Op_CallStaticJava: { // For a static call, we know exactly what method is being called. // Use bytecode estimator to record the call's escape affects #ifdef ASSERT const char* name = call->as_CallStaticJava()->_name; assert((name == NULL || strcmp(name, "uncommon_trap") != 0), "normal calls only"); --- 1042,1052 ---- } } } break; } ! case Opcodes::Op_CallStaticJava: { // For a static call, we know exactly what method is being called. // Use bytecode estimator to record the call's escape affects #ifdef ASSERT const char* name = call->as_CallStaticJava()->_name; assert((name == NULL || strcmp(name, "uncommon_trap") != 0), "normal calls only");
*** 1870,1880 **** Node *n = ptr_cmp_worklist.pop(); Node *res = optimize_ptr_compare(n); if (res != NULL) { #ifndef PRODUCT if (PrintOptimizePtrCompare) { ! tty->print_cr("++++ Replaced: %d %s(%d,%d) --> %s", n->_idx, (n->Opcode() == Op_CmpP ? "CmpP" : "CmpN"), n->in(1)->_idx, n->in(2)->_idx, (res == _pcmp_eq ? "EQ" : "NotEQ")); if (Verbose) { n->dump(1); } } #endif --- 1870,1880 ---- Node *n = ptr_cmp_worklist.pop(); Node *res = optimize_ptr_compare(n); if (res != NULL) { #ifndef PRODUCT if (PrintOptimizePtrCompare) { ! tty->print_cr("++++ Replaced: %d %s(%d,%d) --> %s", n->_idx, (n->Opcode() == Opcodes::Op_CmpP ? "CmpP" : "CmpN"), n->in(1)->_idx, n->in(2)->_idx, (res == _pcmp_eq ? "EQ" : "NotEQ")); if (Verbose) { n->dump(1); } } #endif
*** 1895,1905 **** Node *n = storestore_worklist.pop(); MemBarStoreStoreNode *storestore = n ->as_MemBarStoreStore(); Node *alloc = storestore->in(MemBarNode::Precedent)->in(0); assert (alloc->is_Allocate(), "storestore should point to AllocateNode"); if (not_global_escape(alloc)) { ! MemBarNode* mb = MemBarNode::make(C, Op_MemBarCPUOrder, Compile::AliasIdxBot); mb->init_req(TypeFunc::Memory, storestore->in(TypeFunc::Memory)); mb->init_req(TypeFunc::Control, storestore->in(TypeFunc::Control)); igvn->register_new_node_with_optimizer(mb); igvn->replace_node(storestore, mb); } --- 1895,1905 ---- Node *n = storestore_worklist.pop(); MemBarStoreStoreNode *storestore = n ->as_MemBarStoreStore(); Node *alloc = storestore->in(MemBarNode::Precedent)->in(0); assert (alloc->is_Allocate(), "storestore should point to AllocateNode"); if (not_global_escape(alloc)) { ! MemBarNode* mb = MemBarNode::make(C, Opcodes::Op_MemBarCPUOrder, Compile::AliasIdxBot); mb->init_req(TypeFunc::Memory, storestore->in(TypeFunc::Memory)); mb->init_req(TypeFunc::Control, storestore->in(TypeFunc::Control)); igvn->register_new_node_with_optimizer(mb); igvn->replace_node(storestore, mb); }
*** 2065,2075 **** ciField* field = _compile->alias_type(adr_type->isa_instptr())->field(); if (field != NULL) { bt = field->layout_type(); } else { // Check for unsafe oop field access ! if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN)) { bt = T_OBJECT; (*unsafe) = true; } } } else if (adr_type->isa_aryptr()) { --- 2065,2075 ---- ciField* field = _compile->alias_type(adr_type->isa_instptr())->field(); if (field != NULL) { bt = field->layout_type(); } else { // Check for unsafe oop field access ! if (n->has_out_with(Opcodes::Op_StoreP, Opcodes::Op_LoadP, Opcodes::Op_StoreN, Opcodes::Op_LoadN)) { bt = T_OBJECT; (*unsafe) = true; } } } else if (adr_type->isa_aryptr()) {
*** 2081,2091 **** const Type* elemtype = adr_type->isa_aryptr()->elem(); bt = elemtype->array_element_basic_type(); } } else if (adr_type->isa_rawptr() || adr_type->isa_klassptr()) { // Allocation initialization, ThreadLocal field access, unsafe access ! if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN)) { bt = T_OBJECT; } } } return (bt == T_OBJECT || bt == T_NARROWOOP || bt == T_ARRAY); --- 2081,2091 ---- const Type* elemtype = adr_type->isa_aryptr()->elem(); bt = elemtype->array_element_basic_type(); } } else if (adr_type->isa_rawptr() || adr_type->isa_klassptr()) { // Allocation initialization, ThreadLocal field access, unsafe access ! if (n->has_out_with(Opcodes::Op_StoreP, Opcodes::Op_LoadP, Opcodes::Op_StoreN, Opcodes::Op_LoadN)) { bt = T_OBJECT; } } } return (bt == T_OBJECT || bt == T_NARROWOOP || bt == T_ARRAY);
*** 2305,2317 **** // Case #6 (unsafe access) may have several chained AddP nodes. assert(base->in(AddPNode::Base)->uncast()->is_top(), "expected unsafe access address only"); base = base->in(AddPNode::Address); } Node* uncast_base = base->uncast(); ! int opcode = uncast_base->Opcode(); ! assert(opcode == Op_ConP || opcode == Op_ThreadLocal || ! opcode == Op_CastX2P || uncast_base->is_DecodeNarrowPtr() || (uncast_base->is_Mem() && (uncast_base->bottom_type()->isa_rawptr() != NULL)) || (uncast_base->is_Proj() && uncast_base->in(0)->is_Allocate()), "sanity"); } return base; } --- 2305,2317 ---- // Case #6 (unsafe access) may have several chained AddP nodes. assert(base->in(AddPNode::Base)->uncast()->is_top(), "expected unsafe access address only"); base = base->in(AddPNode::Address); } Node* uncast_base = base->uncast(); ! Opcodes opcode = uncast_base->Opcode(); ! assert(opcode == Opcodes::Op_ConP || opcode == Opcodes::Op_ThreadLocal || ! opcode == Opcodes::Op_CastX2P || uncast_base->is_DecodeNarrowPtr() || (uncast_base->is_Mem() && (uncast_base->bottom_type()->isa_rawptr() != NULL)) || (uncast_base->is_Proj() && uncast_base->in(0)->is_Allocate()), "sanity"); } return base; }
*** 2619,2629 **** igvn->hash_insert(use); record_for_optimizer(use); --i; #ifdef ASSERT } else if (use->is_Mem()) { ! if (use->Opcode() == Op_StoreCM && use->in(MemNode::OopStore) == n) { // Don't move related cardmark. continue; } // Memory nodes should have new memory input. tp = igvn->type(use->in(MemNode::Address))->isa_ptr(); --- 2619,2629 ---- igvn->hash_insert(use); record_for_optimizer(use); --i; #ifdef ASSERT } else if (use->is_Mem()) { ! if (use->Opcode() == Opcodes::Op_StoreCM && use->in(MemNode::OopStore) == n) { // Don't move related cardmark. continue; } // Memory nodes should have new memory input. tp = igvn->type(use->in(MemNode::Address))->isa_ptr();
*** 2735,2752 **** // Can not bypass initialization of the instance // we are looking for. break; } // Otherwise skip it (the call updated 'result' value). ! } else if (result->Opcode() == Op_SCMemProj) { Node* mem = result->in(0); Node* adr = NULL; if (mem->is_LoadStore()) { adr = mem->in(MemNode::Address); } else { ! assert(mem->Opcode() == Op_EncodeISOArray || ! mem->Opcode() == Op_StrCompressedCopy, "sanity"); adr = mem->in(3); // Memory edge corresponds to destination array } const Type *at = igvn->type(adr); if (at != Type::TOP) { assert(at->isa_ptr() != NULL, "pointer type required."); --- 2735,2752 ---- // Can not bypass initialization of the instance // we are looking for. break; } // Otherwise skip it (the call updated 'result' value). ! } else if (result->Opcode() == Opcodes::Op_SCMemProj) { Node* mem = result->in(0); Node* adr = NULL; if (mem->is_LoadStore()) { adr = mem->in(MemNode::Address); } else { ! assert(mem->Opcode() == Opcodes::Op_EncodeISOArray || ! mem->Opcode() == Opcodes::Op_StrCompressedCopy, "sanity"); adr = mem->in(3); // Memory edge corresponds to destination array } const Type *at = igvn->type(adr); if (at != Type::TOP) { assert(at->isa_ptr() != NULL, "pointer type required.");
*** 2756,2766 **** assert(false, "Object is not scalar replaceable if a LoadStore node accesses its field"); break; // In product mode return SCMemProj node } } result = mem->in(MemNode::Memory); ! } else if (result->Opcode() == Op_StrInflatedCopy) { Node* adr = result->in(3); // Memory edge corresponds to destination array const Type *at = igvn->type(adr); if (at != Type::TOP) { assert(at->isa_ptr() != NULL, "pointer type required."); int idx = C->get_alias_index(at->is_ptr()); --- 2756,2766 ---- assert(false, "Object is not scalar replaceable if a LoadStore node accesses its field"); break; // In product mode return SCMemProj node } } result = mem->in(MemNode::Memory); ! } else if (result->Opcode() == Opcodes::Op_StrInflatedCopy) { Node* adr = result->in(3); // Memory edge corresponds to destination array const Type *at = igvn->type(adr); if (at != Type::TOP) { assert(at->isa_ptr() != NULL, "pointer type required."); int idx = C->get_alias_index(at->is_ptr());
*** 3041,3051 **** if (!split_AddP(n, base)) continue; // wrong type from dead path } else if (n->is_Phi() || n->is_CheckCastPP() || n->is_EncodeP() || n->is_DecodeN() || ! (n->is_ConstraintCast() && n->Opcode() == Op_CastPP)) { if (visited.test_set(n->_idx)) { assert(n->is_Phi(), "loops only through Phi's"); continue; // already processed } JavaObjectNode* jobj = unique_java_object(n); --- 3041,3051 ---- if (!split_AddP(n, base)) continue; // wrong type from dead path } else if (n->is_Phi() || n->is_CheckCastPP() || n->is_EncodeP() || n->is_DecodeN() || ! (n->is_ConstraintCast() && n->Opcode() == Opcodes::Op_CastPP)) { if (visited.test_set(n->_idx)) { assert(n->is_Phi(), "loops only through Phi's"); continue; // already processed } JavaObjectNode* jobj = unique_java_object(n);
*** 3111,3121 **** alloc_worklist.append_if_missing(use); } else if (use->is_Phi() || use->is_CheckCastPP() || use->is_EncodeNarrowPtr() || use->is_DecodeNarrowPtr() || ! (use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) { alloc_worklist.append_if_missing(use); #ifdef ASSERT } else if (use->is_Mem()) { assert(use->in(MemNode::Address) != n, "EA: missing allocation reference path"); } else if (use->is_MergeMem()) { --- 3111,3121 ---- alloc_worklist.append_if_missing(use); } else if (use->is_Phi() || use->is_CheckCastPP() || use->is_EncodeNarrowPtr() || use->is_DecodeNarrowPtr() || ! (use->is_ConstraintCast() && use->Opcode() == Opcodes::Op_CastPP)) { alloc_worklist.append_if_missing(use); #ifdef ASSERT } else if (use->is_Mem()) { assert(use->in(MemNode::Address) != n, "EA: missing allocation reference path"); } else if (use->is_MergeMem()) {
*** 3125,3150 **** // (through CheckCastPP nodes) even for debug info. Node* m = use->in(TypeFunc::Memory); if (m->is_MergeMem()) { assert(_mergemem_worklist.contains(m->as_MergeMem()), "EA: missing MergeMem node in the worklist"); } ! } else if (use->Opcode() == Op_EncodeISOArray) { if (use->in(MemNode::Memory) == n || use->in(3) == n) { // EncodeISOArray overwrites destination array memnode_worklist.append_if_missing(use); } } else { ! uint op = use->Opcode(); if ((use->in(MemNode::Memory) == n) && ! (op == Op_StrCompressedCopy || op == Op_StrInflatedCopy)) { // They overwrite memory edge corresponding to destination array, memnode_worklist.append_if_missing(use); ! } else if (!(op == Op_CmpP || op == Op_Conv2B || ! op == Op_CastP2X || op == Op_StoreCM || ! op == Op_FastLock || op == Op_AryEq || op == Op_StrComp || op == Op_HasNegatives || ! op == Op_StrCompressedCopy || op == Op_StrInflatedCopy || ! op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar)) { n->dump(); use->dump(); assert(false, "EA: missing allocation reference path"); } #endif --- 3125,3150 ---- // (through CheckCastPP nodes) even for debug info. Node* m = use->in(TypeFunc::Memory); if (m->is_MergeMem()) { assert(_mergemem_worklist.contains(m->as_MergeMem()), "EA: missing MergeMem node in the worklist"); } ! } else if (use->Opcode() == Opcodes::Op_EncodeISOArray) { if (use->in(MemNode::Memory) == n || use->in(3) == n) { // EncodeISOArray overwrites destination array memnode_worklist.append_if_missing(use); } } else { ! Opcodes op = use->Opcode(); if ((use->in(MemNode::Memory) == n) && ! (op == Opcodes::Op_StrCompressedCopy || op == Opcodes::Op_StrInflatedCopy)) { // They overwrite memory edge corresponding to destination array, memnode_worklist.append_if_missing(use); ! } else if (!(op == Opcodes::Op_CmpP || op == Opcodes::Op_Conv2B || ! op == Opcodes::Op_CastP2X || op == Opcodes::Op_StoreCM || ! op == Opcodes::Op_FastLock || op == Opcodes::Op_AryEq || op == Opcodes::Op_StrComp || op == Opcodes::Op_HasNegatives || ! op == Opcodes::Op_StrCompressedCopy || op == Opcodes::Op_StrInflatedCopy || ! op == Opcodes::Op_StrEquals || op == Opcodes::Op_StrIndexOf || op == Opcodes::Op_StrIndexOfChar)) { n->dump(); use->dump(); assert(false, "EA: missing allocation reference path"); } #endif
*** 3202,3216 **** } else if (n->is_MemBar()) { // Initialize, MemBar nodes // we don't need to do anything, but the users must be pushed n = n->as_MemBar()->proj_out(TypeFunc::Memory); if (n == NULL) continue; ! } else if (n->Opcode() == Op_StrCompressedCopy || ! n->Opcode() == Op_EncodeISOArray) { // get the memory projection ! n = n->find_out_with(Op_SCMemProj); ! assert(n->Opcode() == Op_SCMemProj, "memory projection required"); } else { assert(n->is_Mem(), "memory node required."); Node *addr = n->in(MemNode::Address); const Type *addr_t = igvn->type(addr); if (addr_t == Type::TOP) --- 3202,3216 ---- } else if (n->is_MemBar()) { // Initialize, MemBar nodes // we don't need to do anything, but the users must be pushed n = n->as_MemBar()->proj_out(TypeFunc::Memory); if (n == NULL) continue; ! } else if (n->Opcode() == Opcodes::Op_StrCompressedCopy || ! n->Opcode() == Opcodes::Op_EncodeISOArray) { // get the memory projection ! n = n->find_out_with(Opcodes::Op_SCMemProj); ! assert(n->Opcode() == Opcodes::Op_SCMemProj, "memory projection required"); } else { assert(n->is_Mem(), "memory node required."); Node *addr = n->in(MemNode::Address); const Type *addr_t = igvn->type(addr); if (addr_t == Type::TOP)
*** 3229,3249 **** } if (n->is_Load()) { continue; // don't push users } else if (n->is_LoadStore()) { // get the memory projection ! n = n->find_out_with(Op_SCMemProj); ! assert(n->Opcode() == Op_SCMemProj, "memory projection required"); } } // push user on appropriate worklist for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { Node *use = n->fast_out(i); if (use->is_Phi() || use->is_ClearArray()) { memnode_worklist.append_if_missing(use); } else if (use->is_Mem() && use->in(MemNode::Memory) == n) { ! if (use->Opcode() == Op_StoreCM) // Ignore cardmark stores continue; memnode_worklist.append_if_missing(use); } else if (use->is_MemBar()) { if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge memnode_worklist.append_if_missing(use); --- 3229,3249 ---- } if (n->is_Load()) { continue; // don't push users } else if (n->is_LoadStore()) { // get the memory projection ! n = n->find_out_with(Opcodes::Op_SCMemProj); ! assert(n->Opcode() == Opcodes::Op_SCMemProj, "memory projection required"); } } // push user on appropriate worklist for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { Node *use = n->fast_out(i); if (use->is_Phi() || use->is_ClearArray()) { memnode_worklist.append_if_missing(use); } else if (use->is_Mem() && use->in(MemNode::Memory) == n) { ! if (use->Opcode() == Opcodes::Op_StoreCM) // Ignore cardmark stores continue; memnode_worklist.append_if_missing(use); } else if (use->is_MemBar()) { if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge memnode_worklist.append_if_missing(use);
*** 3251,3277 **** #ifdef ASSERT } else if(use->is_Mem()) { assert(use->in(MemNode::Memory) != n, "EA: missing memory path"); } else if (use->is_MergeMem()) { assert(_mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist"); ! } else if (use->Opcode() == Op_EncodeISOArray) { if (use->in(MemNode::Memory) == n || use->in(3) == n) { // EncodeISOArray overwrites destination array memnode_worklist.append_if_missing(use); } } else { ! uint op = use->Opcode(); if ((use->in(MemNode::Memory) == n) && ! (op == Op_StrCompressedCopy || op == Op_StrInflatedCopy)) { // They overwrite memory edge corresponding to destination array, memnode_worklist.append_if_missing(use); ! } else if (!(op == Op_StoreCM || ! (op == Op_CallLeaf && use->as_CallLeaf()->_name != NULL && strcmp(use->as_CallLeaf()->_name, "g1_wb_pre") == 0) || ! op == Op_AryEq || op == Op_StrComp || op == Op_HasNegatives || ! op == Op_StrCompressedCopy || op == Op_StrInflatedCopy || ! op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar)) { n->dump(); use->dump(); assert(false, "EA: missing memory path"); } #endif --- 3251,3277 ---- #ifdef ASSERT } else if(use->is_Mem()) { assert(use->in(MemNode::Memory) != n, "EA: missing memory path"); } else if (use->is_MergeMem()) { assert(_mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist"); ! } else if (use->Opcode() == Opcodes::Op_EncodeISOArray) { if (use->in(MemNode::Memory) == n || use->in(3) == n) { // EncodeISOArray overwrites destination array memnode_worklist.append_if_missing(use); } } else { ! Opcodes op = use->Opcode(); if ((use->in(MemNode::Memory) == n) && ! (op == Opcodes::Op_StrCompressedCopy || op == Opcodes::Op_StrInflatedCopy)) { // They overwrite memory edge corresponding to destination array, memnode_worklist.append_if_missing(use); ! } else if (!(op == Opcodes::Op_StoreCM || ! (op == Opcodes::Op_CallLeaf && use->as_CallLeaf()->_name != NULL && strcmp(use->as_CallLeaf()->_name, "g1_wb_pre") == 0) || ! op == Opcodes::Op_AryEq || op == Opcodes::Op_StrComp || op == Opcodes::Op_HasNegatives || ! op == Opcodes::Op_StrCompressedCopy || op == Opcodes::Op_StrInflatedCopy || ! op == Opcodes::Op_StrEquals || op == Opcodes::Op_StrIndexOf || op == Opcodes::Op_StrIndexOfChar)) { n->dump(); use->dump(); assert(false, "EA: missing memory path"); } #endif
< prev index next >