< prev index next >

src/share/vm/oops/instanceKlass.cpp

Print this page

        

@@ -1811,23 +1811,17 @@
     id = jmeths[idnum+1];                       // Look up the id (may be NULL)
   }
   return id;
 }
 
-int nmethodBucket::decrement() {
+int nmethodBucketEntry::decrement() {
   return Atomic::add(-1, (volatile int *)&_count);
 }
 
-//
-// Walk the list of dependent nmethods searching for nmethods which
-// are dependent on the changes that were passed in and mark them for
-// deoptimization.  Returns the number of nmethods found.
-//
-int nmethodBucket::mark_dependent_nmethods(nmethodBucket* deps, DepChange& changes) {
-  assert_locked_or_safepoint(CodeCache_lock);
-  int found = 0;
-  for (nmethodBucket* b = deps; b != NULL; b = b->next()) {
+int nmethodBucketEntry::mark_dependent_nmethods(nmethodBucketEntry* deps, DepChange& changes) {
+  int found = 0, total = 0;
+  for (nmethodBucketEntry* b = deps; b != NULL; b = b->next(), total++) {
     nmethod* nm = b->get_nmethod();
     // since dependencies aren't removed until an nmethod becomes a zombie,
     // the dependency list may contain nmethods which aren't alive.
     if (b->count() > 0 && nm->is_alive() && !nm->is_marked_for_deoptimization() && nm->check_dependency_on(changes)) {
       if (TraceDependencies) {

@@ -1839,65 +1833,46 @@
       }
       nm->mark_for_deoptimization();
       found++;
     }
   }
+  Dependencies::_perf_dependencies_checked_count->inc(total);
   return found;
 }
 
-//
-// Add an nmethodBucket to the list of dependencies for this nmethod.
-// It's possible that an nmethod has multiple dependencies on this klass
-// so a count is kept for each bucket to guarantee that creation and
-// deletion of dependencies is consistent. Returns new head of the list.
-//
-nmethodBucket* nmethodBucket::add_dependent_nmethod(nmethodBucket* deps, nmethod* nm) {
+nmethodBucketEntry* nmethodBucketEntry::add_dependent_nmethod(nmethodBucketEntry* deps, nmethod* nm) {
   assert_locked_or_safepoint(CodeCache_lock);
-  for (nmethodBucket* b = deps; b != NULL; b = b->next()) {
+  for (nmethodBucketEntry* b = deps; b != NULL; b = b->next()) {
     if (nm == b->get_nmethod()) {
       b->increment();
       return deps;
     }
   }
-  return new nmethodBucket(nm, deps);
+  return new nmethodBucketEntry(nm, deps);
 }
 
-//
-// Decrement count of the nmethod in the dependency list and remove
-// the bucket completely when the count goes to 0.  This method must
-// find a corresponding bucket otherwise there's a bug in the
-// recording of dependencies. Returns true if the bucket is ready for reclamation.
-//
-bool nmethodBucket::remove_dependent_nmethod(nmethodBucket* deps, nmethod* nm) {
+bool nmethodBucketEntry::remove_dependent_nmethod(nmethodBucketEntry* deps, nmethod* nm, bool& found) {
   assert_locked_or_safepoint(CodeCache_lock);
-
-  for (nmethodBucket* b = deps; b != NULL; b = b->next()) {
+  for (nmethodBucketEntry* b = deps; b != NULL; b = b->next()) {
     if (nm == b->get_nmethod()) {
       int val = b->decrement();
       guarantee(val >= 0, err_msg("Underflow: %d", val));
+      found = true;
       return (val == 0);
     }
   }
-#ifdef ASSERT
-  tty->print_raw_cr("### can't find dependent nmethod");
-  nm->print();
-#endif // ASSERT
-  ShouldNotReachHere();
   return false;
 }
 
-//
-// Reclaim all unused buckets. Returns new head of the list.
-//
-nmethodBucket* nmethodBucket::clean_dependent_nmethods(nmethodBucket* deps) {
-  nmethodBucket* first = deps;
-  nmethodBucket* last = NULL;
-  nmethodBucket* b = first;
+nmethodBucketEntry* nmethodBucketEntry::clean_dependent_nmethods(nmethodBucketEntry* deps) {
+  nmethodBucketEntry* first = deps;
+  nmethodBucketEntry* last = NULL;
+  nmethodBucketEntry* b = first;
 
   while (b != NULL) {
     assert(b->count() >= 0, err_msg("bucket count: %d", b->count()));
-    nmethodBucket* next = b->next();
+    nmethodBucketEntry* next = b->next();
     if (b->count() == 0) {
       if (last == NULL) {
         first = next;
       } else {
         last->set_next(next);

@@ -1911,13 +1886,20 @@
   }
   return first;
 }
 
 #ifndef PRODUCT
-void nmethodBucket::print_dependent_nmethods(nmethodBucket* deps, bool verbose) {
+void nmethodBucketEntry::verify(nmethodBucketEntry* deps) {
+  for (nmethodBucketEntry* b = deps; b != NULL; b = b->next()) {
+    assert(b->count() >= 0, err_msg("bucket count: %d", b->count()));
+    assert(b->count() != 0, "empty buckets need to be cleaned");
+  }
+}
+
+void nmethodBucketEntry::print_dependent_nmethods(nmethodBucketEntry* deps, bool verbose) {
   int idx = 0;
-  for (nmethodBucket* b = deps; b != NULL; b = b->next()) {
+  for (nmethodBucketEntry* b = deps; b != NULL; b = b->next()) {
     nmethod* nm = b->get_nmethod();
     tty->print("[%d] count=%d { ", idx++, b->count());
     if (!verbose) {
       nm->print_on(tty, "nmethod");
       tty->print_cr(" } ");

@@ -1927,12 +1909,12 @@
       tty->print_cr("--- } ");
     }
   }
 }
 
-bool nmethodBucket::is_dependent_nmethod(nmethodBucket* deps, nmethod* nm) {
-  for (nmethodBucket* b = deps; b != NULL; b = b->next()) {
+bool nmethodBucketEntry::is_dependent_nmethod(nmethodBucketEntry* deps, nmethod* nm) {
+  for (nmethodBucketEntry* b = deps; b != NULL; b = b->next()) {
     if (nm == b->get_nmethod()) {
 #ifdef ASSERT
       int count = b->count();
       assert(count >= 0, err_msg("count shouldn't be negative: %d", count));
 #endif

@@ -1941,10 +1923,153 @@
   }
   return false;
 }
 #endif //PRODUCT
 
+
+int nmethodBucket::bucket_index(DepChange& changes) {
+  if (changes.is_klass_change())               return KlassBucket;
+  else if (changes.is_call_site_change())      return CallSiteBucket;
+  else if (changes.is_constant_field_change()) return ConstantFieldBucket;
+  else {
+    ShouldNotReachHere();
+    return -1;
+  }
+}
+
+//
+// Walk the list of dependent nmethods searching for nmethods which
+// are dependent on the changes that were passed in and mark them for
+// deoptimization.  Returns the number of nmethods found.
+//
+int nmethodBucket::mark_dependent_nmethods(nmethodBucket* deps, DepChange& changes) {
+  assert_locked_or_safepoint(CodeCache_lock);
+  PerfTraceTime pt(Dependencies::_perf_dependency_checking_time);
+  int found = 0;
+  if (deps != NULL) {
+    int idx = bucket_index(changes);
+    nmethodBucketEntry* b = deps->_buckets[idx];
+    found = nmethodBucketEntry::mark_dependent_nmethods(b, changes);
+    Dependencies::_perf_dependencies_context_traversals->inc(1);
+    Dependencies::_perf_dependencies_invalidated->inc(found);
+  }
+  return found;
+}
+
+//
+// Add an nmethodBucket to the list of dependencies for this nmethod.
+// It's possible that an nmethod has multiple dependencies on this klass
+// so a count is kept for each bucket to guarantee that creation and
+// deletion of dependencies is consistent. Returns new head of the list.
+//
+nmethodBucket* nmethodBucket::add_dependent_nmethod(nmethodBucket* b, nmethod* nm) {
+  assert_locked_or_safepoint(CodeCache_lock);
+  if (b == NULL) {
+    b = new nmethodBucket();
+  }
+  bool has_deps[Bucket_LIMIT];
+  for (int i = FIRST_Bucket; i < Bucket_LIMIT; i++) { has_deps[i] = false; }
+  for (Dependencies::DepStream deps(nm); deps.next(); ) {
+    switch(deps.type()) {
+      case Dependencies::call_site_target_value:        has_deps[CallSiteBucket] = true;      break;
+      case Dependencies::constant_field_value_instance: // fallthru
+      case Dependencies::constant_field_value_klass:    has_deps[ConstantFieldBucket] = true; break;
+      default:                                          has_deps[KlassBucket] = true;         break;
+    }
+  }
+  for (int i = FIRST_Bucket; i < Bucket_LIMIT; i++) {
+    if (has_deps[i]) {
+      b->_buckets[i] = nmethodBucketEntry::add_dependent_nmethod(b->_buckets[i], nm);
+    }
+  }
+  return b;
+}
+
+//
+// Decrement count of the nmethod in the dependency list and remove
+// the bucket completely when the count goes to 0.  This method must
+// find a corresponding bucket otherwise there's a bug in the
+// recording of dependencies. Returns true if the bucket is ready for reclamation.
+//
+bool nmethodBucket::remove_dependent_nmethod(nmethodBucket* deps, nmethod* nm) {
+  if (deps != NULL) {
+    assert_locked_or_safepoint(CodeCache_lock);
+    bool found = false, removed = false;
+    for (int i = FIRST_Bucket; i < Bucket_LIMIT; i++) {
+      bool r = nmethodBucketEntry::remove_dependent_nmethod(deps->_buckets[i], nm, found);
+      removed = removed || r;
+    }
+    if (found) {
+      return removed;
+    }
+  }
+#ifdef ASSERT
+  tty->print_raw_cr("### can't find dependent nmethod");
+  nm->print();
+#endif // ASSERT
+  ShouldNotReachHere();
+  return false;
+}
+
+//
+// Reclaim all unused buckets. Returns new head of the list.
+//
+nmethodBucket* nmethodBucket::clean_dependent_nmethods(nmethodBucket* deps) {
+  if (deps == NULL)  return NULL;
+  for (int i = FIRST_Bucket; i < Bucket_LIMIT; i++) {
+    deps->_buckets[i] = nmethodBucketEntry::clean_dependent_nmethods(deps->_buckets[i]);
+  }
+  return deps;
+}
+
+int nmethodBucket::release(nmethodBucket* deps) {
+  if (deps == NULL)  return 0;
+  int marked = 0;
+  for (int i = FIRST_Bucket; i < Bucket_LIMIT; i++) {
+    nmethodBucketEntry* entry = deps->_buckets[i];
+    while (entry != NULL) {
+      nmethod* nm = entry->get_nmethod();
+      if (entry->count() > 0 && nm->is_alive() && !nm->is_marked_for_deoptimization()) {
+        nm->mark_for_deoptimization();
+        marked++;
+      }
+      nmethodBucketEntry* next = entry->next();
+      delete entry;
+      entry = next;
+    }
+  }
+  delete deps;
+  return marked;
+}
+
+#ifndef PRODUCT
+void nmethodBucket::verify(nmethodBucket* deps) {
+  if (deps == NULL)  return;
+  for (int i = FIRST_Bucket; i < Bucket_LIMIT; i++) {
+    nmethodBucketEntry::verify(deps->_buckets[i]);
+  }
+}
+
+void nmethodBucket::print_dependent_nmethods(nmethodBucket* deps, bool verbose) {
+  if (deps == NULL)  return;
+  for (int i = FIRST_Bucket; i < Bucket_LIMIT; i++) {
+    tty->print_cr("Bucket #%d: ", i);
+    nmethodBucketEntry::print_dependent_nmethods(deps->_buckets[i], verbose);
+  }
+}
+
+bool nmethodBucket::is_dependent_nmethod(nmethodBucket* deps, nmethod* nm) {
+  if (deps == NULL)  return false;
+  for (int i = FIRST_Bucket; i < Bucket_LIMIT; i++) {
+    if (nmethodBucketEntry::is_dependent_nmethod(deps->_buckets[i], nm)) {
+      return true;
+    }
+  }
+  return false;
+}
+#endif //PRODUCT
+
 int InstanceKlass::mark_dependent_nmethods(DepChange& changes) {
   assert_locked_or_safepoint(CodeCache_lock);
   return nmethodBucket::mark_dependent_nmethods(_dependencies, changes);
 }
 

@@ -1956,14 +2081,11 @@
     set_has_unloaded_dependent(false);
   }
 #ifdef ASSERT
   else {
     // Verification
-    for (nmethodBucket* b = _dependencies; b != NULL; b = b->next()) {
-      assert(b->count() >= 0, err_msg("bucket count: %d", b->count()));
-      assert(b->count() != 0, "empty buckets need to be cleaned");
-    }
+    nmethodBucket::verify(_dependencies);
   }
 #endif
 }
 
 void InstanceKlass::add_dependent_nmethod(nmethod* nm) {

@@ -2160,17 +2282,13 @@
       set_member_names(NULL);
     }
   }
 
   // release dependencies
-  nmethodBucket* b = _dependencies;
+  int marked = nmethodBucket::release(_dependencies);
+  assert(marked == 0, "");
   _dependencies = NULL;
-  while (b != NULL) {
-    nmethodBucket* next = b->next();
-    delete b;
-    b = next;
-  }
 
   // Deallocate breakpoint records
   if (breakpoints() != 0x0) {
     methods_do(clear_all_breakpoints);
     assert(breakpoints() == 0x0, "should have cleared breakpoints");
< prev index next >