< prev index next >

src/share/vm/opto/phaseX.cpp

Print this page

        

*** 106,116 **** if( !k ) { // ?Miss? NOT_PRODUCT( _lookup_misses++ ); return NULL; // Miss! } ! int op = n->Opcode(); uint req = n->req(); while( 1 ) { // While probing hash table if( k->req() == req && // Same count of inputs k->Opcode() == op ) { // Same Opcode for( uint i=0; i<req; i++ ) --- 106,116 ---- if( !k ) { // ?Miss? NOT_PRODUCT( _lookup_misses++ ); return NULL; // Miss! } ! Opcodes op = n->Opcode(); uint req = n->req(); while( 1 ) { // While probing hash table if( k->req() == req && // Same count of inputs k->Opcode() == op ) { // Same Opcode for( uint i=0; i<req; i++ )
*** 158,168 **** } else if( k == _sentinel ) { first_sentinel = key; // Can insert here } ! int op = n->Opcode(); uint req = n->req(); while( 1 ) { // While probing hash table if( k->req() == req && // Same count of inputs k->Opcode() == op ) { // Same Opcode for( uint i=0; i<req; i++ ) --- 158,168 ---- } else if( k == _sentinel ) { first_sentinel = key; // Can insert here } ! Opcodes op = n->Opcode(); uint req = n->req(); while( 1 ) { // While probing hash table if( k->req() == req && // Same count of inputs k->Opcode() == op ) { // Same Opcode for( uint i=0; i<req; i++ )
*** 928,939 **** // make more progress because the uses were made while the Phis and Regions // were in half-built states. Put all uses of Phis and Regions on worklist. max = _worklist.size(); for( uint j = 0; j < max; j++ ) { Node *n = _worklist.at(j); ! uint uop = n->Opcode(); ! if( uop == Op_Phi || uop == Op_Region || n->is_Type() || n->is_Mem() ) add_users_to_worklist(n); } } --- 928,939 ---- // make more progress because the uses were made while the Phis and Regions // were in half-built states. Put all uses of Phis and Regions on worklist. max = _worklist.size(); for( uint j = 0; j < max; j++ ) { Node *n = _worklist.at(j); ! Opcodes uop = n->Opcode(); ! if( uop == Opcodes::Op_Phi || uop == Opcodes::Op_Region || n->is_Type() || n->is_Mem() ) add_users_to_worklist(n); } }
*** 1352,1362 **** recurse = true; } else if (in->outcnt() == 1 && in->has_special_unique_user()) { _worklist.push(in->unique_out()); } else if (in->outcnt() <= 2 && dead->is_Phi()) { ! if (in->Opcode() == Op_Region) { _worklist.push(in); } else if (in->is_Store()) { DUIterator_Fast imax, i = in->fast_outs(imax); _worklist.push(in->fast_out(i)); i++; --- 1352,1362 ---- recurse = true; } else if (in->outcnt() == 1 && in->has_special_unique_user()) { _worklist.push(in->unique_out()); } else if (in->outcnt() <= 2 && dead->is_Phi()) { ! if (in->Opcode() == Opcodes::Op_Region) { _worklist.push(in); } else if (in->is_Store()) { DUIterator_Fast imax, i = in->fast_outs(imax); _worklist.push(in->fast_out(i)); i++;
*** 1513,1523 **** if (p != NULL) { add_users_to_worklist0(p); } } ! uint use_op = use->Opcode(); if(use->is_Cmp()) { // Enable CMP/BOOL optimization add_users_to_worklist(use); // Put Bool on worklist if (use->outcnt() > 0) { Node* bol = use->raw_out(0); if (bol->outcnt() > 0) { --- 1513,1523 ---- if (p != NULL) { add_users_to_worklist0(p); } } ! Opcodes use_op = use->Opcode(); if(use->is_Cmp()) { // Enable CMP/BOOL optimization add_users_to_worklist(use); // Put Bool on worklist if (use->outcnt() > 0) { Node* bol = use->raw_out(0); if (bol->outcnt() > 0) {
*** 1534,1544 **** add_users_to_worklist0(region0); } } } } ! if (use_op == Op_CmpI) { Node* phi = countedloop_phi_from_cmp((CmpINode*)use, n); if (phi != NULL) { // If an opaque node feeds into the limit condition of a // CountedLoop, we need to process the Phi node for the // induction variable when the opaque node is removed: --- 1534,1544 ---- add_users_to_worklist0(region0); } } } } ! if (use_op == Opcodes::Op_CmpI) { Node* phi = countedloop_phi_from_cmp((CmpINode*)use, n); if (phi != NULL) { // If an opaque node feeds into the limit condition of a // CountedLoop, we need to process the Phi node for the // induction variable when the opaque node is removed:
*** 1546,1556 **** // so its type is also known. _worklist.push(phi); } Node* in1 = use->in(1); for (uint i = 0; i < in1->outcnt(); i++) { ! if (in1->raw_out(i)->Opcode() == Op_CastII) { Node* castii = in1->raw_out(i); if (castii->in(0) != NULL && castii->in(0)->in(0) != NULL && castii->in(0)->in(0)->is_If()) { Node* ifnode = castii->in(0)->in(0); if (ifnode->in(1) != NULL && ifnode->in(1)->is_Bool() && ifnode->in(1)->in(1) == use) { // Reprocess a CastII node that may depend on an --- 1546,1556 ---- // so its type is also known. _worklist.push(phi); } Node* in1 = use->in(1); for (uint i = 0; i < in1->outcnt(); i++) { ! if (in1->raw_out(i)->Opcode() == Opcodes::Op_CastII) { Node* castii = in1->raw_out(i); if (castii->in(0) != NULL && castii->in(0)->in(0) != NULL && castii->in(0)->in(0)->is_If()) { Node* ifnode = castii->in(0)->in(0); if (ifnode->in(1) != NULL && ifnode->in(1)->is_Bool() && ifnode->in(1)->in(1) == use) { // Reprocess a CastII node that may depend on an
*** 1572,1614 **** if (u->is_Phi()) _worklist.push(u); } } // If changed LShift inputs, check RShift users for useless sign-ext ! if( use_op == Op_LShiftI ) { for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) { Node* u = use->fast_out(i2); ! if (u->Opcode() == Op_RShiftI) _worklist.push(u); } } // If changed AddI/SubI inputs, check CmpU for range check optimization. ! if (use_op == Op_AddI || use_op == Op_SubI) { for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) { Node* u = use->fast_out(i2); ! if (u->is_Cmp() && (u->Opcode() == Op_CmpU)) { _worklist.push(u); } } } // If changed AddP inputs, check Stores for loop invariant ! if( use_op == Op_AddP ) { for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) { Node* u = use->fast_out(i2); if (u->is_Mem()) _worklist.push(u); } } // If changed initialization activity, check dependent Stores ! if (use_op == Op_Allocate || use_op == Op_AllocateArray) { InitializeNode* init = use->as_Allocate()->initialization(); if (init != NULL) { Node* imem = init->proj_out(TypeFunc::Memory); if (imem != NULL) add_users_to_worklist0(imem); } } ! if (use_op == Op_Initialize) { Node* imem = use->as_Initialize()->proj_out(TypeFunc::Memory); if (imem != NULL) add_users_to_worklist0(imem); } } } --- 1572,1614 ---- if (u->is_Phi()) _worklist.push(u); } } // If changed LShift inputs, check RShift users for useless sign-ext ! if( use_op == Opcodes::Op_LShiftI ) { for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) { Node* u = use->fast_out(i2); ! if (u->Opcode() == Opcodes::Op_RShiftI) _worklist.push(u); } } // If changed AddI/SubI inputs, check CmpU for range check optimization. ! if (use_op == Opcodes::Op_AddI || use_op == Opcodes::Op_SubI) { for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) { Node* u = use->fast_out(i2); ! if (u->is_Cmp() && (u->Opcode() == Opcodes::Op_CmpU)) { _worklist.push(u); } } } // If changed AddP inputs, check Stores for loop invariant ! if( use_op == Opcodes::Op_AddP ) { for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) { Node* u = use->fast_out(i2); if (u->is_Mem()) _worklist.push(u); } } // If changed initialization activity, check dependent Stores ! if (use_op == Opcodes::Op_Allocate || use_op == Opcodes::Op_AllocateArray) { InitializeNode* init = use->as_Allocate()->initialization(); if (init != NULL) { Node* imem = init->proj_out(TypeFunc::Memory); if (imem != NULL) add_users_to_worklist0(imem); } } ! if (use_op == Opcodes::Op_Initialize) { Node* imem = use->as_Initialize()->proj_out(TypeFunc::Memory); if (imem != NULL) add_users_to_worklist0(imem); } } }
*** 1720,1745 **** // CmpU nodes can get their type information from two nodes up in the // graph (instead of from the nodes immediately above). Make sure they // are added to the worklist if nodes they depend on are updated, since // they could be missed and get wrong types otherwise. ! uint m_op = m->Opcode(); ! if (m_op == Op_AddI || m_op == Op_SubI) { for (DUIterator_Fast i2max, i2 = m->fast_outs(i2max); i2 < i2max; i2++) { Node* p = m->fast_out(i2); // Propagate changes to uses ! if (p->Opcode() == Op_CmpU) { // Got a CmpU which might need the new type information from node n. if(p->bottom_type() != type(p)) { // If not already bottomed out worklist.push(p); // Propagate change to user } } } } // If n is used in a counted loop exit condition then the type // of the counted loop's Phi depends on the type of n. See // PhiNode::Value(). ! if (m_op == Op_CmpI) { PhiNode* phi = countedloop_phi_from_cmp((CmpINode*)m, n); if (phi != NULL) { worklist.push(phi); } } --- 1720,1745 ---- // CmpU nodes can get their type information from two nodes up in the // graph (instead of from the nodes immediately above). Make sure they // are added to the worklist if nodes they depend on are updated, since // they could be missed and get wrong types otherwise. ! Opcodes m_op = m->Opcode(); ! if (m_op == Opcodes::Op_AddI || m_op == Opcodes::Op_SubI) { for (DUIterator_Fast i2max, i2 = m->fast_outs(i2max); i2 < i2max; i2++) { Node* p = m->fast_out(i2); // Propagate changes to uses ! if (p->Opcode() == Opcodes::Op_CmpU) { // Got a CmpU which might need the new type information from node n. if(p->bottom_type() != type(p)) { // If not already bottomed out worklist.push(p); // Propagate change to user } } } } // If n is used in a counted loop exit condition then the type // of the counted loop's Phi depends on the type of n. See // PhiNode::Value(). ! if (m_op == Opcodes::Op_CmpI) { PhiNode* phi = countedloop_phi_from_cmp((CmpINode*)m, n); if (phi != NULL) { worklist.push(phi); } }
*** 1835,1853 **** _worklist.push(n); // n re-enters the hash table via the worklist } // TEMPORARY fix to ensure that 2nd GVN pass eliminates NULL checks switch( n->Opcode() ) { ! case Op_FastLock: // Revisit FastLocks for lock coarsening ! case Op_If: ! case Op_CountedLoopEnd: ! case Op_Region: ! case Op_Loop: ! case Op_CountedLoop: ! case Op_Conv2B: ! case Op_Opaque1: ! case Op_Opaque2: _worklist.push(n); break; default: break; } --- 1835,1853 ---- _worklist.push(n); // n re-enters the hash table via the worklist } // TEMPORARY fix to ensure that 2nd GVN pass eliminates NULL checks switch( n->Opcode() ) { ! case Opcodes::Op_FastLock: // Revisit FastLocks for lock coarsening ! case Opcodes::Op_If: ! case Opcodes::Op_CountedLoopEnd: ! case Opcodes::Op_Region: ! case Opcodes::Op_Loop: ! case Opcodes::Op_CountedLoop: ! case Opcodes::Op_Conv2B: ! case Opcodes::Op_Opaque1: ! case Opcodes::Op_Opaque2: _worklist.push(n); break; default: break; }
*** 1989,2003 **** igvn->add_users_to_worklist( old ); break; case 2: if( old->is_Store() ) igvn->add_users_to_worklist( old ); ! if( old->Opcode() == Op_Region ) igvn->_worklist.push(old); break; case 3: ! if( old->Opcode() == Op_Region ) { igvn->_worklist.push(old); igvn->add_users_to_worklist( old ); } break; default: --- 1989,2003 ---- igvn->add_users_to_worklist( old ); break; case 2: if( old->is_Store() ) igvn->add_users_to_worklist( old ); ! if( old->Opcode() == Opcodes::Op_Region ) igvn->_worklist.push(old); break; case 3: ! if( old->Opcode() == Opcodes::Op_Region ) { igvn->_worklist.push(old); igvn->add_users_to_worklist( old ); } break; default:
< prev index next >