< prev index next >

src/hotspot/share/gc/g1/g1Allocator.cpp

Print this page
rev 56323 : imported patch 8220310.mut.0
rev 56324 : imported patch 8220310.mut.1_thomas

@@ -36,24 +36,48 @@
 
 G1Allocator::G1Allocator(G1CollectedHeap* heap) :
   _g1h(heap),
   _survivor_is_full(false),
   _old_is_full(false),
-  _mutator_alloc_region(),
+  _num_alloc_region(heap->mem_node_mgr()->num_active_nodes()),
+  _mutator_alloc_region(NULL),
   _survivor_gc_alloc_region(heap->alloc_buffer_stats(G1HeapRegionAttr::Young)),
   _old_gc_alloc_region(heap->alloc_buffer_stats(G1HeapRegionAttr::Old)),
   _retained_old_gc_alloc_region(NULL) {
+
+  _mutator_alloc_region = NEW_C_HEAP_ARRAY(MutatorAllocRegion, _num_alloc_region, mtGC);
+  for (uint i = 0; i < _num_alloc_region; i++) {
+    ::new(_mutator_alloc_region + i) MutatorAllocRegion(i);
+  }
+}
+
+G1Allocator::~G1Allocator() {
+  for (uint i = 0; i < _num_alloc_region; i++) {
+    _mutator_alloc_region[i].~MutatorAllocRegion();
+  }
+  FREE_C_HEAP_ARRAY(MutatorAllocRegion, _mutator_alloc_region);
 }
 
+#ifdef ASSERT
+bool G1Allocator::has_mutator_alloc_region() {
+  uint node_index = _g1h->mem_node_mgr()->index_of_current_thread();
+  return mutator_alloc_region(node_index)->get() != NULL;
+}
+#endif
+
 void G1Allocator::init_mutator_alloc_region() {
-  assert(_mutator_alloc_region.get() == NULL, "pre-condition");
-  _mutator_alloc_region.init();
+  for (uint i = 0; i < _num_alloc_region; i++) {
+    assert(mutator_alloc_region(i)->get() == NULL, "pre-condition");
+    mutator_alloc_region(i)->init();
+  }
 }
 
 void G1Allocator::release_mutator_alloc_region() {
-  _mutator_alloc_region.release();
-  assert(_mutator_alloc_region.get() == NULL, "post-condition");
+  for (uint i = 0; i < _num_alloc_region; i++) {
+    mutator_alloc_region(i)->release();
+    assert(mutator_alloc_region(i)->get() == NULL, "post-condition");
+  }
 }
 
 bool G1Allocator::is_retained_old_region(HeapRegion* hr) {
   return _retained_old_gc_alloc_region == hr;
 }

@@ -144,22 +168,26 @@
 
   // Also, this value can be at most the humongous object threshold,
   // since we can't allow tlabs to grow big enough to accommodate
   // humongous objects.
 
-  HeapRegion* hr = mutator_alloc_region()->get();
+  uint node_index = _g1h->mem_node_mgr()->index_of_current_thread();
+  HeapRegion* hr = mutator_alloc_region(node_index)->get();
   size_t max_tlab = _g1h->max_tlab_size() * wordSize;
   if (hr == NULL) {
     return max_tlab;
   } else {
     return MIN2(MAX2(hr->free(), (size_t) MinTLABSize), max_tlab);
   }
 }
 
 size_t G1Allocator::used_in_alloc_regions() {
-  assert(Heap_lock->owner() != NULL, "Should be owned on this thread's behalf.");
-  return mutator_alloc_region()->used_in_alloc_regions();
+  size_t used = 0;
+  for (uint i = 0; i < _num_alloc_region; i++) {
+    used += mutator_alloc_region(i)->used_in_alloc_regions();
+  }
+  return used;
 }
 
 
 HeapWord* G1Allocator::par_allocate_during_gc(G1HeapRegionAttr dest,
                                               size_t word_size) {
< prev index next >