src/share/vm/opto/gcm.cpp

Print this page
rev 5728 : 8029015: PPC64 (part 216): opto: trap based null and range checks


1313   // dominator tree of all USES of a value.  Pick the block with the least
1314   // loop nesting depth that is lowest in the dominator tree.
1315   // ( visited.Clear() called in schedule_late()->Node_Backward_Iterator() )
1316   schedule_late(visited, stack);
1317   if (C->failing()) {
1318     // schedule_late fails only when graph is incorrect.
1319     assert(!VerifyGraphEdges, "verification should have failed");
1320     return;
1321   }
1322 
1323 #ifndef PRODUCT
1324   if (trace_opto_pipelining()) {
1325     tty->print("\n---- Detect implicit null checks ----\n");
1326   }
1327 #endif
1328 
1329   // Detect implicit-null-check opportunities.  Basically, find NULL checks
1330   // with suitable memory ops nearby.  Use the memory op to do the NULL check.
1331   // I can generate a memory op if there is not one nearby.
1332   if (C->is_method_compilation()) {
1333     // Don't do it for natives, adapters, or runtime stubs
1334     int allowed_reasons = 0;
1335     // ...and don't do it when there have been too many traps, globally.
1336     for (int reason = (int)Deoptimization::Reason_none+1;
1337          reason < Compile::trapHistLength; reason++) {
1338       assert(reason < BitsPerInt, "recode bit map");
1339       if (!C->too_many_traps((Deoptimization::DeoptReason) reason))
1340         allowed_reasons |= nth_bit(reason);
1341     }
1342     // By reversing the loop direction we get a very minor gain on mpegaudio.
1343     // Feel free to revert to a forward loop for clarity.
1344     // for( int i=0; i < (int)matcher._null_check_tests.size(); i+=2 ) {
1345     for (int i = _matcher._null_check_tests.size() - 2; i >= 0; i -= 2) {
1346       Node* proj = _matcher._null_check_tests[i];
1347       Node* val  = _matcher._null_check_tests[i + 1];
1348       Block* block = get_block_for_node(proj);
1349       implicit_null_check(block, proj, val, allowed_reasons);
1350       // The implicit_null_check will only perform the transformation
1351       // if the null branch is truly uncommon, *and* it leads to an
1352       // uncommon trap.  Combined with the too_many_traps guards
1353       // above, this prevents SEGV storms reported in 6366351,
1354       // by recompiling offending methods without this optimization.
1355     }
1356   }
1357 
1358 #ifndef PRODUCT
1359   if (trace_opto_pipelining()) {
1360     tty->print("\n---- Start Local Scheduling ----\n");
1361   }
1362 #endif
1363 
1364   // Schedule locally.  Right now a simple topological sort.
1365   // Later, do a real latency aware scheduler.
1366   GrowableArray<int> ready_cnt(C->unique(), C->unique(), -1);
1367   visited.Clear();
1368   for (uint i = 0; i < number_of_blocks(); i++) {
1369     Block* block = get_block(i);




1313   // dominator tree of all USES of a value.  Pick the block with the least
1314   // loop nesting depth that is lowest in the dominator tree.
1315   // ( visited.Clear() called in schedule_late()->Node_Backward_Iterator() )
1316   schedule_late(visited, stack);
1317   if (C->failing()) {
1318     // schedule_late fails only when graph is incorrect.
1319     assert(!VerifyGraphEdges, "verification should have failed");
1320     return;
1321   }
1322 
1323 #ifndef PRODUCT
1324   if (trace_opto_pipelining()) {
1325     tty->print("\n---- Detect implicit null checks ----\n");
1326   }
1327 #endif
1328 
1329   // Detect implicit-null-check opportunities.  Basically, find NULL checks
1330   // with suitable memory ops nearby.  Use the memory op to do the NULL check.
1331   // I can generate a memory op if there is not one nearby.
1332   if (C->is_method_compilation()) {









1333     // By reversing the loop direction we get a very minor gain on mpegaudio.
1334     // Feel free to revert to a forward loop for clarity.
1335     // for( int i=0; i < (int)matcher._null_check_tests.size(); i+=2 ) {
1336     for (int i = _matcher._null_check_tests.size() - 2; i >= 0; i -= 2) {
1337       Node* proj = _matcher._null_check_tests[i];
1338       Node* val  = _matcher._null_check_tests[i + 1];
1339       Block* block = get_block_for_node(proj);
1340       implicit_null_check(block, proj, val, C->allowed_deopt_reasons());
1341       // The implicit_null_check will only perform the transformation
1342       // if the null branch is truly uncommon, *and* it leads to an
1343       // uncommon trap.  Combined with the too_many_traps guards
1344       // above, this prevents SEGV storms reported in 6366351,
1345       // by recompiling offending methods without this optimization.
1346     }
1347   }
1348 
1349 #ifndef PRODUCT
1350   if (trace_opto_pipelining()) {
1351     tty->print("\n---- Start Local Scheduling ----\n");
1352   }
1353 #endif
1354 
1355   // Schedule locally.  Right now a simple topological sort.
1356   // Later, do a real latency aware scheduler.
1357   GrowableArray<int> ready_cnt(C->unique(), C->unique(), -1);
1358   visited.Clear();
1359   for (uint i = 0; i < number_of_blocks(); i++) {
1360     Block* block = get_block(i);