< prev index next >

src/hotspot/share/gc/g1/g1Allocator.cpp

Print this page
rev 56448 : imported patch 8220310.mut.0
rev 56449 : imported patch 8220310.mut.1
rev 56450 : imported patch 8220310.mut.2
rev 56451 : imported patch 8220310.mut.3
rev 56452 : [mq]: 8220310.mut.4

*** 26,59 **** #include "gc/g1/g1Allocator.inline.hpp" #include "gc/g1/g1AllocRegion.inline.hpp" #include "gc/g1/g1EvacStats.inline.hpp" #include "gc/g1/g1EvacuationInfo.hpp" #include "gc/g1/g1CollectedHeap.inline.hpp" #include "gc/g1/g1Policy.hpp" #include "gc/g1/heapRegion.inline.hpp" #include "gc/g1/heapRegionSet.inline.hpp" #include "gc/g1/heapRegionType.hpp" #include "utilities/align.hpp" G1Allocator::G1Allocator(G1CollectedHeap* heap) : _g1h(heap), _survivor_is_full(false), _old_is_full(false), ! _mutator_alloc_region(), _survivor_gc_alloc_region(heap->alloc_buffer_stats(G1HeapRegionAttr::Young)), _old_gc_alloc_region(heap->alloc_buffer_stats(G1HeapRegionAttr::Old)), _retained_old_gc_alloc_region(NULL) { } ! void G1Allocator::init_mutator_alloc_region() { ! assert(_mutator_alloc_region.get() == NULL, "pre-condition"); ! _mutator_alloc_region.init(); } ! void G1Allocator::release_mutator_alloc_region() { ! _mutator_alloc_region.release(); ! assert(_mutator_alloc_region.get() == NULL, "post-condition"); } bool G1Allocator::is_retained_old_region(HeapRegion* hr) { return _retained_old_gc_alloc_region == hr; } --- 26,85 ---- #include "gc/g1/g1Allocator.inline.hpp" #include "gc/g1/g1AllocRegion.inline.hpp" #include "gc/g1/g1EvacStats.inline.hpp" #include "gc/g1/g1EvacuationInfo.hpp" #include "gc/g1/g1CollectedHeap.inline.hpp" + #include "gc/g1/g1NUMA.hpp" #include "gc/g1/g1Policy.hpp" #include "gc/g1/heapRegion.inline.hpp" #include "gc/g1/heapRegionSet.inline.hpp" #include "gc/g1/heapRegionType.hpp" #include "utilities/align.hpp" G1Allocator::G1Allocator(G1CollectedHeap* heap) : _g1h(heap), + _numa(heap->numa()), _survivor_is_full(false), _old_is_full(false), ! _num_alloc_regions(_numa->num_active_nodes()), ! _mutator_alloc_regions(NULL), _survivor_gc_alloc_region(heap->alloc_buffer_stats(G1HeapRegionAttr::Young)), _old_gc_alloc_region(heap->alloc_buffer_stats(G1HeapRegionAttr::Old)), _retained_old_gc_alloc_region(NULL) { + + _mutator_alloc_regions = NEW_C_HEAP_ARRAY(MutatorAllocRegion, _num_alloc_regions, mtGC); + for (uint i = 0; i < _num_alloc_regions; i++) { + ::new(_mutator_alloc_regions + i) MutatorAllocRegion(i); + } + } + + G1Allocator::~G1Allocator() { + for (uint i = 0; i < _num_alloc_regions; i++) { + _mutator_alloc_regions[i].~MutatorAllocRegion(); + } + FREE_C_HEAP_ARRAY(MutatorAllocRegion, _mutator_alloc_regions); } ! #ifdef ASSERT ! bool G1Allocator::has_mutator_alloc_region() { ! uint node_index = current_node_index(); ! return mutator_alloc_region(node_index)->get() != NULL; } + #endif ! void G1Allocator::init_mutator_alloc_regions() { ! for (uint i = 0; i < _num_alloc_regions; i++) { ! assert(mutator_alloc_region(i)->get() == NULL, "pre-condition"); ! mutator_alloc_region(i)->init(); ! } ! } ! ! void G1Allocator::release_mutator_alloc_regions() { ! for (uint i = 0; i < _num_alloc_regions; i++) { ! mutator_alloc_region(i)->release(); ! assert(mutator_alloc_region(i)->get() == NULL, "post-condition"); ! } } bool G1Allocator::is_retained_old_region(HeapRegion* hr) { return _retained_old_gc_alloc_region == hr; }
*** 144,165 **** // Also, this value can be at most the humongous object threshold, // since we can't allow tlabs to grow big enough to accommodate // humongous objects. ! HeapRegion* hr = mutator_alloc_region()->get(); size_t max_tlab = _g1h->max_tlab_size() * wordSize; if (hr == NULL) { return max_tlab; } else { return MIN2(MAX2(hr->free(), (size_t) MinTLABSize), max_tlab); } } size_t G1Allocator::used_in_alloc_regions() { assert(Heap_lock->owner() != NULL, "Should be owned on this thread's behalf."); ! return mutator_alloc_region()->used_in_alloc_regions(); } HeapWord* G1Allocator::par_allocate_during_gc(G1HeapRegionAttr dest, size_t word_size) { --- 170,196 ---- // Also, this value can be at most the humongous object threshold, // since we can't allow tlabs to grow big enough to accommodate // humongous objects. ! uint node_index = current_node_index(); ! HeapRegion* hr = mutator_alloc_region(node_index)->get(); size_t max_tlab = _g1h->max_tlab_size() * wordSize; if (hr == NULL) { return max_tlab; } else { return MIN2(MAX2(hr->free(), (size_t) MinTLABSize), max_tlab); } } size_t G1Allocator::used_in_alloc_regions() { assert(Heap_lock->owner() != NULL, "Should be owned on this thread's behalf."); ! size_t used = 0; ! for (uint i = 0; i < _num_alloc_regions; i++) { ! used += mutator_alloc_region(i)->used_in_alloc_regions(); ! } ! return used; } HeapWord* G1Allocator::par_allocate_during_gc(G1HeapRegionAttr dest, size_t word_size) {
< prev index next >