src/share/vm/opto/bytecodeInfo.cpp

Print this page
rev 3904 : 8005071: Incremental inlining for JSR 292
Summary: post parse inlining driven by number of live nodes.
Reviewed-by:

@@ -44,11 +44,12 @@
   _caller_jvms(caller_jvms),
   _caller_tree((InlineTree*) caller_tree),
   _method(callee),
   _site_invoke_ratio(site_invoke_ratio),
   _max_inline_level(max_inline_level),
-  _count_inline_bcs(method()->code_size_for_inlining())
+  _count_inline_bcs(method()->code_size_for_inlining()),
+  _subtrees(c->comp_arena(), 2, 0, NULL)
 {
   NOT_PRODUCT(_count_inlines = 0;)
   if (_caller_jvms != NULL) {
     // Keep a private copy of the caller_jvms:
     _caller_jvms = new (C) JVMState(caller_jvms->method(), caller_tree->caller_jvms());

@@ -207,21 +208,23 @@
   if (!callee_method->holder()->is_initialized())           return "method holder not initialized";
   if ( callee_method->is_native())                          return "native method";
   if ( callee_method->dont_inline())                        return "don't inline by annotation";
   if ( callee_method->has_unloaded_classes_in_signature())  return "unloaded signature classes";
 
-  if (callee_method->force_inline() || callee_method->should_inline()) {
+  if (callee_method->should_inline()) {
     // ignore heuristic controls on inlining
     return NULL;
   }
 
   // Now perform checks which are heuristic
 
+  if (!callee_method->force_inline()) {
   if (callee_method->has_compiled_code() &&
       callee_method->instructions_size() > InlineSmallCode) {
     return "already compiled into a big method";
   }
+  }
 
   // don't inline exception code unless the top method belongs to an
   // exception class
   if (caller_tree() != NULL &&
       callee_method->holder()->is_subclass_of(C->env()->Throwable_klass())) {

@@ -275,16 +278,19 @@
 }
 
 //-----------------------------try_to_inline-----------------------------------
 // return NULL if ok, reason for not inlining otherwise
 // Relocated from "InliningClosure::try_to_inline"
-const char* InlineTree::try_to_inline(ciMethod* callee_method, ciMethod* caller_method, int caller_bci, ciCallProfile& profile, WarmCallInfo* wci_result) {
-
+const char* InlineTree::try_to_inline(ciMethod* callee_method, ciMethod* caller_method, int caller_bci, ciCallProfile& profile, WarmCallInfo* wci_result, bool& should_delay) {
   // Old algorithm had funny accumulating BC-size counters
   if (UseOldInlining && ClipInlining
       && (int)count_inline_bcs() >= DesiredMethodLimit) {
+    if (!callee_method->force_inline() || !IncrementalInline) {
     return "size > DesiredMethodLimit";
+    } else if (!C->inlining_incrementally()) {
+      should_delay = true;
+    }
   }
 
   const char *msg = NULL;
   msg = should_inline(callee_method, caller_method, caller_bci, profile, wci_result);
   if (msg != NULL)

@@ -301,12 +307,17 @@
 
   // suppress a few checks for accessors and trivial methods
   if (callee_method->code_size() > MaxTrivialSize) {
 
     // don't inline into giant methods
-    if (C->unique() > (uint)NodeCountInliningCutoff) {
+    if (C->over_inlining_cutoff()) {
+      if ((!callee_method->force_inline() && !caller_method->is_compiled_lambda_form())
+          || !IncrementalInline) {
       return "NodeCountInliningCutoff";
+      } else {
+        should_delay = true;
+      }
     }
 
     if ((!UseInterpreter || CompileTheWorld) &&
         is_init_with_ea(callee_method, caller_method, C)) {
 

@@ -320,12 +331,17 @@
   }
 
   if (!C->do_inlining() && InlineAccessors) {
     return "not an accessor";
   }
+
   if (inline_level() > _max_inline_level) {
+    if (!callee_method->force_inline() || !IncrementalInline) {
     return "inlining too deep";
+    } else if (!C->inlining_incrementally()) {
+      should_delay = true;
+    }
   }
 
   // detect direct and indirect recursive inlining
   if (!callee_method->is_compiled_lambda_form()) {
     // count the current method and the callee

@@ -346,11 +362,15 @@
 
   int size = callee_method->code_size_for_inlining();
 
   if (UseOldInlining && ClipInlining
       && (int)count_inline_bcs() + size >= DesiredMethodLimit) {
+    if (!callee_method->force_inline() || !IncrementalInline) {
     return "size > DesiredMethodLimit";
+    } else if (!C->inlining_incrementally()) {
+      should_delay = true;
+    }
   }
 
   // ok, inline this method
   return NULL;
 }

@@ -411,12 +431,13 @@
     //tty->print("  bcs: %d+%d  invoked: %d", top->count_inline_bcs(), callee_method->code_size(), callee_method->interpreter_invocation_count());
   }
 }
 
 //------------------------------ok_to_inline-----------------------------------
-WarmCallInfo* InlineTree::ok_to_inline(ciMethod* callee_method, JVMState* jvms, ciCallProfile& profile, WarmCallInfo* initial_wci) {
+WarmCallInfo* InlineTree::ok_to_inline(ciMethod* callee_method, JVMState* jvms, ciCallProfile& profile, WarmCallInfo* initial_wci, bool& should_delay) {
   assert(callee_method != NULL, "caller checks for optimized virtual!");
+  assert(!should_delay, "should be initialized to false");
 #ifdef ASSERT
   // Make sure the incoming jvms has the same information content as me.
   // This means that we can eventually make this whole class AllStatic.
   if (jvms->caller() == NULL) {
     assert(_caller_jvms == NULL, "redundant instance state");

@@ -442,11 +463,11 @@
     return NULL;
   }
 
   // Check if inlining policy says no.
   WarmCallInfo wci = *(initial_wci);
-  failure_msg = try_to_inline(callee_method, caller_method, caller_bci, profile, &wci);
+  failure_msg = try_to_inline(callee_method, caller_method, caller_bci, profile, &wci, should_delay);
   if (failure_msg != NULL && C->log() != NULL) {
     C->log()->inline_fail(failure_msg);
   }
 
 #ifndef PRODUCT