1375
1376 // Move users of node to worklist
1377 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1378 Node* use = n->fast_out(i); // Get use
1379
1380 if( use->is_Multi() || // Multi-definer? Push projs on worklist
1381 use->is_Store() ) // Enable store/load same address
1382 add_users_to_worklist0(use);
1383
1384 // If we changed the receiver type to a call, we need to revisit
1385 // the Catch following the call. It's looking for a non-NULL
1386 // receiver to know when to enable the regular fall-through path
1387 // in addition to the NullPtrException path.
1388 if (use->is_CallDynamicJava() && n == use->in(TypeFunc::Parms)) {
1389 Node* p = use->as_CallDynamicJava()->proj_out(TypeFunc::Control);
1390 if (p != NULL) {
1391 add_users_to_worklist0(p);
1392 }
1393 }
1394
1395 if( use->is_Cmp() ) { // Enable CMP/BOOL optimization
1396 add_users_to_worklist(use); // Put Bool on worklist
1397 // Look for the 'is_x2logic' pattern: "x ? : 0 : 1" and put the
1398 // phi merging either 0 or 1 onto the worklist
1399 if (use->outcnt() > 0) {
1400 Node* bol = use->raw_out(0);
1401 if (bol->outcnt() > 0) {
1402 Node* iff = bol->raw_out(0);
1403 if (iff->outcnt() == 2) {
1404 Node* ifproj0 = iff->raw_out(0);
1405 Node* ifproj1 = iff->raw_out(1);
1406 if (ifproj0->outcnt() > 0 && ifproj1->outcnt() > 0) {
1407 Node* region0 = ifproj0->raw_out(0);
1408 Node* region1 = ifproj1->raw_out(0);
1409 if( region0 == region1 )
1410 add_users_to_worklist0(region0);
1411 }
1412 }
1413 }
1414 }
1415 }
1416
1417 uint use_op = use->Opcode();
1418 // If changed Cast input, check Phi users for simple cycles
1419 if( use->is_ConstraintCast() || use->is_CheckCastPP() ) {
1420 for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1421 Node* u = use->fast_out(i2);
1422 if (u->is_Phi())
1423 _worklist.push(u);
1424 }
1425 }
1426 // If changed LShift inputs, check RShift users for useless sign-ext
1427 if( use_op == Op_LShiftI ) {
1428 for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1429 Node* u = use->fast_out(i2);
1430 if (u->Opcode() == Op_RShiftI)
1431 _worklist.push(u);
1432 }
1433 }
1434 // If changed AddI/SubI inputs, check CmpU for range check optimization.
1435 if (use_op == Op_AddI || use_op == Op_SubI) {
1436 for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1437 Node* u = use->fast_out(i2);
|
1375
1376 // Move users of node to worklist
1377 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1378 Node* use = n->fast_out(i); // Get use
1379
1380 if( use->is_Multi() || // Multi-definer? Push projs on worklist
1381 use->is_Store() ) // Enable store/load same address
1382 add_users_to_worklist0(use);
1383
1384 // If we changed the receiver type to a call, we need to revisit
1385 // the Catch following the call. It's looking for a non-NULL
1386 // receiver to know when to enable the regular fall-through path
1387 // in addition to the NullPtrException path.
1388 if (use->is_CallDynamicJava() && n == use->in(TypeFunc::Parms)) {
1389 Node* p = use->as_CallDynamicJava()->proj_out(TypeFunc::Control);
1390 if (p != NULL) {
1391 add_users_to_worklist0(p);
1392 }
1393 }
1394
1395 uint use_op = use->Opcode();
1396 if(use->is_Cmp()) { // Enable CMP/BOOL optimization
1397 add_users_to_worklist(use); // Put Bool on worklist
1398 if (use->outcnt() > 0) {
1399 Node* bol = use->raw_out(0);
1400 if (bol->outcnt() > 0) {
1401 Node* iff = bol->raw_out(0);
1402 if (use_op == Op_CmpI &&
1403 iff->is_CountedLoopEnd()) {
1404 CountedLoopEndNode* cle = iff->as_CountedLoopEnd();
1405 if (cle->limit() == n && cle->phi() != NULL) {
1406 // If an opaque node feeds into the limit condition of a
1407 // CountedLoop, we need to process the Phi node for the
1408 // induction variable when the opaque node is removed:
1409 // the range of values taken by the Phi is now known and
1410 // so its type is also known.
1411 _worklist.push(cle->phi());
1412 }
1413 } else if (iff->outcnt() == 2) {
1414 // Look for the 'is_x2logic' pattern: "x ? : 0 : 1" and put the
1415 // phi merging either 0 or 1 onto the worklist
1416 Node* ifproj0 = iff->raw_out(0);
1417 Node* ifproj1 = iff->raw_out(1);
1418 if (ifproj0->outcnt() > 0 && ifproj1->outcnt() > 0) {
1419 Node* region0 = ifproj0->raw_out(0);
1420 Node* region1 = ifproj1->raw_out(0);
1421 if( region0 == region1 )
1422 add_users_to_worklist0(region0);
1423 }
1424 }
1425 }
1426 }
1427 if (use_op == Op_CmpI) {
1428 Node* in1 = use->in(1);
1429 for (uint i = 0; i < in1->outcnt(); i++) {
1430 if (in1->raw_out(i)->Opcode() == Op_CastII) {
1431 Node* castii = in1->raw_out(i);
1432 if (castii->in(0) != NULL && castii->in(0)->in(0) != NULL && castii->in(0)->in(0)->is_If()) {
1433 Node* ifnode = castii->in(0)->in(0);
1434 if (ifnode->in(1) != NULL && ifnode->in(1)->in(1) == use) {
1435 // Reprocess a CastII node that may depend on an
1436 // opaque node value when the opaque node is
1437 // removed. In case it carries a dependency we can do
1438 // a better job of computing its type.
1439 _worklist.push(castii);
1440 }
1441 }
1442 }
1443 }
1444 }
1445 }
1446
1447 // If changed Cast input, check Phi users for simple cycles
1448 if( use->is_ConstraintCast() || use->is_CheckCastPP() ) {
1449 for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1450 Node* u = use->fast_out(i2);
1451 if (u->is_Phi())
1452 _worklist.push(u);
1453 }
1454 }
1455 // If changed LShift inputs, check RShift users for useless sign-ext
1456 if( use_op == Op_LShiftI ) {
1457 for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1458 Node* u = use->fast_out(i2);
1459 if (u->Opcode() == Op_RShiftI)
1460 _worklist.push(u);
1461 }
1462 }
1463 // If changed AddI/SubI inputs, check CmpU for range check optimization.
1464 if (use_op == Op_AddI || use_op == Op_SubI) {
1465 for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1466 Node* u = use->fast_out(i2);
|