hotspot/src/share/vm/gc_implementation/parallelScavenge/psOldGen.cpp

Print this page
rev 611 : Merge

@@ -1,10 +1,10 @@
 #ifdef USE_PRAGMA_IDENT_SRC
 #pragma ident "@(#)psOldGen.cpp 1.54 07/05/05 17:05:28 JVM"
 #endif
 /*
- * Copyright 2001-2007 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2001-2008 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 only, as
  * published by the Free Software Foundation.

@@ -88,10 +88,19 @@
   // Card table stuff
   //
 
   MemRegion cmr((HeapWord*)virtual_space()->low(),
                 (HeapWord*)virtual_space()->high());
+  if (ZapUnusedHeapArea) {
+    // Mangle newly committed space immediately rather than
+    // waiting for the initialization of the space even though
+    // mangling is related to spaces.  Doing it here eliminates
+    // the need to carry along information that a complete mangling
+    // (bottom to end) needs to be done.
+    SpaceMangler::mangle_region(cmr);
+  }
+
   Universe::heap()->barrier_set()->resize_covered_region(cmr);
 
   CardTableModRefBS* _ct = (CardTableModRefBS*)Universe::heap()->barrier_set();
   assert (_ct->kind() == BarrierSet::CardTableModRef, "Sanity");
 

@@ -113,11 +122,13 @@
   _object_space = new MutableSpace();
   
   if (_object_space == NULL)
     vm_exit_during_initialization("Could not allocate an old gen space");
 
-  object_space()->initialize(cmr, true);
+  object_space()->initialize(cmr,
+                             SpaceDecorator::Clear,
+                             SpaceDecorator::Mangle);
 
   _object_mark_sweep = new PSMarkSweepDecorator(_object_space, start_array(), MarkSweepDeadRatio);
 
   if (_object_mark_sweep == NULL)
     vm_exit_during_initialization("Could not complete allocation of old generation");

@@ -142,13 +153,11 @@
 void PSOldGen::precompact() {
   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
 
   // Reset start array first.
-  debug_only(if (!UseParallelOldGC || !VerifyParallelOldWithMarkSweep) {)
   start_array()->reset();
-  debug_only(})
 
   object_mark_sweep()->precompact();
 
   // Now compact the young gen
   heap->young_gen()->precompact();

@@ -205,14 +214,26 @@
   }
   return cas_allocate_noexpand(word_size);
 }
 
 void PSOldGen::expand(size_t bytes) {
+  if (bytes == 0) {
+    return;
+  }
   MutexLocker x(ExpandHeap_lock);
   const size_t alignment = virtual_space()->alignment();
   size_t aligned_bytes  = align_size_up(bytes, alignment);
   size_t aligned_expand_bytes = align_size_up(MinHeapDeltaBytes, alignment);
+  if (aligned_bytes == 0){
+    // The alignment caused the number of bytes to wrap.  An expand_by(0) will
+    // return true with the implication that and expansion was done when it
+    // was not.  A call to expand implies a best effort to expand by "bytes"
+    // but not a guarantee.  Align down to give a best effort.  This is likely
+    // the most that the generation can expand since it has some capacity to
+    // start with.
+    aligned_bytes = align_size_down(bytes, alignment);
+  }
 
   bool success = false;
   if (aligned_expand_bytes > aligned_bytes) {
     success = expand_by(aligned_expand_bytes);
   }

@@ -221,22 +242,38 @@
   }
   if (!success) {
     success = expand_to_reserved();
   }
 
-  if (GC_locker::is_active()) {
     if (PrintGC && Verbose) {
+    if (success && GC_locker::is_active()) {
       gclog_or_tty->print_cr("Garbage collection disabled, expanded heap instead");
     }
   }
 }
 
 bool PSOldGen::expand_by(size_t bytes) {
   assert_lock_strong(ExpandHeap_lock);
   assert_locked_or_safepoint(Heap_lock);
+  if (bytes == 0) {
+    return true;  // That's what virtual_space()->expand_by(0) would return
+  }
   bool result = virtual_space()->expand_by(bytes);
   if (result) {
+    if (ZapUnusedHeapArea) {
+      // We need to mangle the newly expanded area. The memregion spans
+      // end -> new_end, we assume that top -> end is already mangled.
+      // Do the mangling before post_resize() is called because
+      // the space is available for allocation after post_resize();
+      HeapWord* const virtual_space_high = (HeapWord*) virtual_space()->high();
+      assert(object_space()->end() < virtual_space_high,
+        "Should be true before post_resize()");
+      MemRegion mangle_region(object_space()->end(), virtual_space_high);
+      // Note that the object space has not yet been updated to
+      // coincede with the new underlying virtual space.
+      SpaceMangler::mangle_region(mangle_region);
+    }
     post_resize();
     if (UsePerfData) {
       _space_counters->update_capacity();
       _gen_counters->update_all();
     }

@@ -349,20 +386,11 @@
   size_t new_word_size = new_memregion.word_size();
 
   start_array()->set_covered_region(new_memregion);
   Universe::heap()->barrier_set()->resize_covered_region(new_memregion);
 
-  // Did we expand?
   HeapWord* const virtual_space_high = (HeapWord*) virtual_space()->high();
-  if (object_space()->end() < virtual_space_high) {
-    // We need to mangle the newly expanded area. The memregion spans
-    // end -> new_end, we assume that top -> end is already mangled.
-    // This cannot be safely tested for, as allocation may be taking
-    // place.
-    MemRegion mangle_region(object_space()->end(), virtual_space_high);
-    object_space()->mangle_region(mangle_region); 
-  }
 
   // ALWAYS do this last!!
   object_space()->set_end(virtual_space_high);
 
   assert(new_word_size == heap_word_size(object_space()->capacity_in_bytes()),

@@ -463,5 +491,12 @@
 
 void PSOldGen::verify_object_start_array() {
   VerifyObjectStartArrayClosure check( this, &_start_array );
   object_iterate(&check);
 }
+
+#ifndef PRODUCT
+void PSOldGen::record_spaces_top() {
+  assert(ZapUnusedHeapArea, "Not mangling unused space");
+  object_space()->set_top_for_allocations();
+}
+#endif