< prev index next >

src/share/vm/gc/g1/g1CollectedHeap.cpp

Print this page

        

@@ -612,14 +612,14 @@
 
   ShouldNotReachHere();
   return NULL;
 }
 
-void G1CollectedHeap::begin_archive_alloc_range() {
+void G1CollectedHeap::begin_archive_alloc_range(bool open) {
   assert_at_safepoint(true /* should_be_vm_thread */);
   if (_archive_allocator == NULL) {
-    _archive_allocator = G1ArchiveAllocator::create_allocator(this);
+    _archive_allocator = G1ArchiveAllocator::create_allocator(this, open);
   }
 }
 
 bool G1CollectedHeap::is_archive_alloc_too_large(size_t word_size) {
   // Allocations in archive regions cannot be of a size that would be considered

@@ -659,11 +659,13 @@
     }
   }
   return true;
 }
 
-bool G1CollectedHeap::alloc_archive_regions(MemRegion* ranges, size_t count) {
+bool G1CollectedHeap::alloc_archive_regions(MemRegion* ranges,
+                                             size_t count,
+                                             bool open) {
   assert(!is_init_completed(), "Expect to be called at JVM init time");
   assert(ranges != NULL, "MemRegion array NULL");
   assert(count != 0, "No MemRegions provided");
   MutexLockerEx x(Heap_lock);
 

@@ -678,12 +680,12 @@
   // Enable archive object checking used by G1MarkSweep. We have to let it know
   // about each archive range, so that objects in those ranges aren't marked.
   G1ArchiveAllocator::enable_archive_object_check();
 
   // For each specified MemRegion range, allocate the corresponding G1
-  // regions and mark them as archive regions. We expect the ranges in
-  // ascending starting address order, without overlap.
+  // regions and mark them as archive regions. We expect the ranges
+  // in ascending starting address order, without overlap.
   for (size_t i = 0; i < count; i++) {
     MemRegion curr_range = ranges[i];
     HeapWord* start_address = curr_range.start();
     size_t word_size = curr_range.word_size();
     HeapWord* last_address = curr_range.last();

@@ -724,34 +726,44 @@
       log_debug(gc, ergo, heap)("Attempt heap expansion (allocate archive regions). Total size: " SIZE_FORMAT "B",
                                 HeapRegion::GrainWords * HeapWordSize * commits);
 
     }
 
-    // Mark each G1 region touched by the range as archive, add it to the old set,
-    // and set the allocation context and top.
+    // Mark each G1 region touched by the range as archive, add it to
+    // the old set, and set the allocation context and top.
     HeapRegion* curr_region = _hrm.addr_to_region(start_address);
     HeapRegion* last_region = _hrm.addr_to_region(last_address);
     prev_last_region = last_region;
 
     while (curr_region != NULL) {
       assert(curr_region->is_empty() && !curr_region->is_pinned(),
              "Region already in use (index %u)", curr_region->hrm_index());
       curr_region->set_allocation_context(AllocationContext::system());
-      curr_region->set_archive();
+      if (open) {
+        curr_region->set_open_archive();
+      } else {
+        curr_region->set_closed_archive();
+      }
       _hr_printer.alloc(curr_region);
       _old_set.add(curr_region);
       if (curr_region != last_region) {
-        curr_region->set_top(curr_region->end());
+        HeapWord* top = curr_region->end();
+        curr_region->set_top(top);
+        curr_region->set_first_dead(top);
+        curr_region->set_end_of_live(top);
         curr_region = _hrm.next_region_in_heap(curr_region);
       } else {
-        curr_region->set_top(last_address + 1);
+        HeapWord* top = last_address + 1;
+        curr_region->set_top(top);
+        curr_region->set_first_dead(top);
+        curr_region->set_end_of_live(top);
         curr_region = NULL;
       }
     }
 
-    // Notify mark-sweep of the archive range.
-    G1ArchiveAllocator::set_range_archive(curr_range, true);
+    // Notify mark-sweep of the archive
+    G1ArchiveAllocator::set_range_archive(curr_range, open);
   }
   return true;
 }
 
 void G1CollectedHeap::fill_archive_regions(MemRegion* ranges, size_t count) {

@@ -5211,16 +5223,12 @@
 
       if (r->is_humongous()) {
         // We ignore humongous regions. We left the humongous set unchanged.
       } else {
         assert(r->is_young() || r->is_free() || r->is_old(), "invariant");
-        // We now consider all regions old, so register as such. Leave
-        // archive regions set that way, however, while still adding
-        // them to the old set.
-        if (!r->is_archive()) {
-          r->set_old();
-        }
+        // We now move all (non-humongous, non-old) regions to old gen, and register them as such.
+        r->move_to_old();
         _old_set->add(r);
       }
       _total_used += r->used();
     }
 
< prev index next >