< prev index next >

src/share/vm/gc/g1/g1CollectedHeap.cpp

Print this page

        

@@ -1042,11 +1042,11 @@
         curr_region = NULL;
       }
     }
 
     // Notify mark-sweep of the archive range.
-    G1MarkSweep::mark_range_archive(curr_range);
+    G1MarkSweep::mark_range_archive(curr_range, true);
   }
   return true;
 }
 
 void G1CollectedHeap::fill_archive_regions(MemRegion* ranges, size_t count) {

@@ -1107,10 +1107,76 @@
       increase_used(fill_size * HeapWordSize);
     }
   }
 }
 
+void G1CollectedHeap::free_archive_regions(MemRegion* ranges, size_t count) {
+  assert(ranges != NULL, "MemRegion array NULL");
+  assert(count != 0, "No MemRegions provided");
+  MemRegion reserved = _hrm.reserved();
+  HeapWord* prev_last_addr = NULL;
+  HeapRegion* prev_last_region = NULL;
+  FreeRegionList local_free_list("Local List for Freeing Archive Regions");
+  size_t size_used = 0;
+
+  // For each Memregion, free the G1 regions that constitute it, and 
+  // notify mark-sweep that the range is no longer to be considered 'archive.' 
+  MutexLockerEx x(Heap_lock);
+  for (size_t i = 0; i < count; i++) {
+    HeapWord* start_address = ranges[i].start();
+    HeapWord* last_address = ranges[i].last();
+
+    assert(reserved.contains(start_address) && reserved.contains(last_address),
+           err_msg("MemRegion outside of heap [" PTR_FORMAT ", " PTR_FORMAT "]",
+                   p2i(start_address), p2i(last_address)));
+    assert(start_address > prev_last_addr,
+           err_msg("Ranges not in ascending order: " PTR_FORMAT " <= " PTR_FORMAT ,
+                   p2i(start_address), p2i(prev_last_addr)));
+    size_used += ranges[i].word_size() * HeapWordSize;
+    prev_last_addr = last_address;
+
+    HeapRegion* start_region = _hrm.addr_to_region(start_address);
+    HeapRegion* last_region = _hrm.addr_to_region(last_address);
+
+    // Check for ranges that start in the same G1 region in which the previous
+    // range ended, and adjust the start address so we don't try to free
+    // the same region again. If the current range is entirely within that
+    // region, skip it.
+    if ((prev_last_region != NULL) && (start_region == prev_last_region)) {
+      start_address = start_region->end();
+      if (start_address > last_address) {
+        continue;
+      }
+      start_region = _hrm.addr_to_region(start_address);
+    }
+    prev_last_region = last_region;
+
+    // After verifying that each region was marked as an archive region by
+    // alloc_archive_regions, free it.
+    HeapRegion* curr_region = start_region;
+    while (curr_region != NULL) {
+      guarantee(curr_region->is_archive(),
+                err_msg("Expected archive region at index %u", curr_region->hrm_index()));
+
+      _old_set.remove(curr_region);
+      free_region(curr_region, &local_free_list, false /* par */, true /* locked */);
+      if (curr_region != last_region) {
+        curr_region = _hrm.next_region_in_heap(curr_region);
+      } else {
+        curr_region = NULL;
+      }
+    }
+
+    // Notify mark-sweep that this is no longer an archive range.
+    G1MarkSweep::mark_range_archive(ranges[i], false);
+  }
+
+  prepend_to_freelist(&local_free_list);
+  decrease_used(size_used);
+}
+
+
 HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
                                                         uint* gc_count_before_ret,
                                                         uint* gclocker_retry_count_ret) {
   // The structure of this method has a lot of similarities to
   // attempt_allocation_slow(). The reason these two were not merged
< prev index next >