< prev index next >

src/share/vm/gc/g1/g1CollectedHeap.cpp

Print this page
rev 8824 : [mq]: rev1
rev 8825 : [mq]: rev2

@@ -1126,19 +1126,19 @@
     dirty_young_block(result, word_size);
   }
   return result;
 }
 
-void G1CollectedHeap::free_archive_regions(MemRegion* ranges, size_t count) {
+void G1CollectedHeap::dealloc_archive_regions(MemRegion* ranges, size_t count) {
   assert(!is_init_completed(), "Expect to be called at JVM init time");
   assert(ranges != NULL, "MemRegion array NULL");
   assert(count != 0, "No MemRegions provided");
   MemRegion reserved = _hrm.reserved();
   HeapWord* prev_last_addr = NULL;
   HeapRegion* prev_last_region = NULL;
-  FreeRegionList local_free_list("Local List for Freeing Archive Regions");
   size_t size_used = 0;
+  size_t uncommitted_regions = 0;
 
   // For each Memregion, free the G1 regions that constitute it, and 
   // notify mark-sweep that the range is no longer to be considered 'archive.' 
   MutexLockerEx x(Heap_lock);
   for (size_t i = 0; i < count; i++) {

@@ -1169,34 +1169,42 @@
       start_region = _hrm.addr_to_region(start_address);
     }
     prev_last_region = last_region;
 
     // After verifying that each region was marked as an archive region by
-    // alloc_archive_regions, free it.
+    // alloc_archive_regions, set it free and empty and uncommit it. 
     HeapRegion* curr_region = start_region;
     while (curr_region != NULL) {
       guarantee(curr_region->is_archive(),
                 err_msg("Expected archive region at index %u", curr_region->hrm_index()));
-
+      uint curr_index = curr_region->hrm_index();
       _old_set.remove(curr_region);
-      free_region(curr_region, &local_free_list, false /* par */, true /* locked */);
+      curr_region->set_free();
+      curr_region->set_top(curr_region->bottom());
       if (curr_region != last_region) {
         curr_region = _hrm.next_region_in_heap(curr_region);
       } else {
         curr_region = NULL;
       }
+      _hrm.shrink_at(curr_index);
+      uncommitted_regions++;
     }
 
     // Notify mark-sweep that this is no longer an archive range.
     G1MarkSweep::set_range_archive(ranges[i], false);
   }
 
-  prepend_to_freelist(&local_free_list);
+  if (uncommitted_regions != 0) {
+    ergo_verbose1(ErgoHeapSizing,
+                  "attempt heap shrinking",
+                  ergo_format_reason("uncommitted archive regions")
+                  ergo_format_byte("total size"),
+                  HeapRegion::GrainWords * HeapWordSize * uncommitted_regions);
+  }
   decrease_used(size_used);
 }
 
-
 HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
                                                         uint* gc_count_before_ret,
                                                         uint* gclocker_retry_count_ret) {
   // The structure of this method has a lot of similarities to
   // attempt_allocation_slow(). The reason these two were not merged
< prev index next >