< prev index next >

src/share/vm/memory/genCollectedHeap.cpp

Print this page
rev 7742 : [mq]: old_patch
rev 7743 : imported patch new_fix

@@ -318,11 +318,12 @@
          ((cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) ||
           (cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent));
 }
 
 void GenCollectedHeap::collect_generation(Generation* gen, bool full, size_t size,
-                                          bool is_tlab, bool run_verification, bool clear_soft_refs) {
+                                          bool is_tlab, bool run_verification, bool clear_soft_refs,
+                                          bool restore_marks_for_biased_locking) {
   // Timer for individual generations. Last argument is false: no CR
   // FIXME: We should try to start the timing earlier to cover more of the GC pause
   // The PrintGCDetails logging starts before we have incremented the GC id. We will do that later
   // so we can assume here that the next GC id is what we want.
   GCTraceTime t1(gen->short_name(), PrintGCDetails, false, NULL, GCId::peek());

@@ -349,10 +350,18 @@
     HandleMark hm;  // Discard invalid handles created during verification
     Universe::verify(" VerifyBeforeGC:");
   }
   COMPILER2_PRESENT(DerivedPointerTable::clear());
 
+  if (restore_marks_for_biased_locking) {
+    // We perform this mark word preservation work lazily
+    // because it's only at this point that we know whether we
+    // absolutely have to do it; we want to avoid doing it for
+    // scavenge-only collections where it's unnecessary
+    BiasedLocking::preserve_marks();
+  }
+
   // Do collection work
   {
     // Note on ref discovery: For what appear to be historical reasons,
     // GCH enables and disabled (by enqueing) refs discovery.
     // In the future this should be moved into the generation's

@@ -387,19 +396,10 @@
       rp->set_enqueuing_is_done(false);
     }
     rp->verify_no_references_recorded();
   }
 
-  // Determine if allocation request was met.
-  if (size > 0) {
-    if (!is_tlab || gen->supports_tlab_allocation()) {
-      if (size * HeapWordSize <= gen->unsafe_max_alloc_nogc()) {
-        size = 0;
-      }
-    }
-  }
-
   COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
 
   gen->stat_record()->accumulated_time.stop();
 
   update_gc_stats(gen->level(), full);

@@ -460,40 +460,56 @@
 
     size_t gch_prev_used = used();
     bool must_restore_marks_for_biased_locking = false;
     bool run_verification = total_collections() >= VerifyGCStartAt;
 
-    if (_young_gen->performs_in_place_marking() ||
-        _old_gen->performs_in_place_marking()) {
-      // We want to avoid doing this for
-      // scavenge-only collections where it's unnecessary.
-      must_restore_marks_for_biased_locking = true;
-      BiasedLocking::preserve_marks();
-    }
-
     bool prepared_for_verification = false;
     int max_level_collected = 0;
-    if (!(full && _old_gen->full_collects_younger_generations()) &&
+    if (!(max_level == 1 && full && _old_gen->full_collects_younger_generations()) &&
         _young_gen->should_collect(full, size, is_tlab)) {
       if (run_verification && VerifyGCLevel <= 0 && VerifyBeforeGC) {
         prepare_for_verify();
         prepared_for_verification = true;
       }
-      collect_generation(_young_gen, full, size, is_tlab, run_verification && VerifyGCLevel <= 0, do_clear_all_soft_refs);
+      if (_young_gen->performs_in_place_marking()) {
+        must_restore_marks_for_biased_locking = true;
+      }
+      collect_generation(_young_gen,
+                         full,
+                         size,
+                         is_tlab,
+                         run_verification && VerifyGCLevel <= 0,
+                         do_clear_all_soft_refs,
+                         must_restore_marks_for_biased_locking);
+
+      if (size > 0 && (!is_tlab || _young_gen->supports_tlab_allocation()) &&
+          size * HeapWordSize <= _young_gen->unsafe_max_alloc_nogc()) {
+        // Allocation request was met by young GC.
+        size = 0;
     }
+    }
+
     if (max_level == 1 && _old_gen->should_collect(full, size, is_tlab)) {
       if (!complete) {
         // The full_collections increment was missed above.
         increment_total_full_collections();
       }
       pre_full_gc_dump(NULL);    // do any pre full gc dumps
-      if (run_verification && VerifyGCLevel <= 1 && VerifyBeforeGC) {
-        if (!prepared_for_verification) {
+      if (!prepared_for_verification && run_verification &&
+          VerifyGCLevel <= 1 && VerifyBeforeGC) {
           prepare_for_verify();
         }
-      }
-      collect_generation(_old_gen, full, size, is_tlab, run_verification && VerifyGCLevel <= 1, do_clear_all_soft_refs);
+      assert(_old_gen->performs_in_place_marking(), "All old generations do in place marking");
+      collect_generation(_old_gen,
+                         full,
+                         size,
+                         is_tlab,
+                         run_verification && VerifyGCLevel <= 1,
+                         do_clear_all_soft_refs,
+                         !must_restore_marks_for_biased_locking);
+
+      must_restore_marks_for_biased_locking = true;
       max_level_collected = 1;
     }
 
     // Update "complete" boolean wrt what actually transpired --
     // for instance, a promotion failure could have led to

@@ -647,15 +663,14 @@
 ALL_SINCE_SAVE_MARKS_CLOSURES(GCH_SINCE_SAVE_MARKS_ITERATE_DEFN)
 
 #undef GCH_SINCE_SAVE_MARKS_ITERATE_DEFN
 
 bool GenCollectedHeap::no_allocs_since_save_marks(int level) {
-  if (level == 0) {
-    if (!_young_gen->no_allocs_since_save_marks()) return false;
+  if (level == 0 && !_young_gen->no_allocs_since_save_marks()) {
+    return false;
   }
-  if (!_old_gen->no_allocs_since_save_marks()) return false;
-  return true;
+  return _old_gen->no_allocs_since_save_marks();
 }
 
 bool GenCollectedHeap::supports_inline_contig_alloc() const {
   return _young_gen->supports_inline_contig_alloc();
 }
< prev index next >