42 void G1Allocator::reuse_retained_old_region(EvacuationInfo& evacuation_info,
43 OldGCAllocRegion* old,
44 HeapRegion** retained_old) {
45 HeapRegion* retained_region = *retained_old;
46 *retained_old = NULL;
47
48 // We will discard the current GC alloc region if:
49 // a) it's in the collection set (it can happen!),
50 // b) it's already full (no point in using it),
51 // c) it's empty (this means that it was emptied during
52 // a cleanup and it should be on the free list now), or
53 // d) it's humongous (this means that it was emptied
54 // during a cleanup and was added to the free list, but
55 // has been subsequently used to allocate a humongous
56 // object that may be less than the region size).
57 if (retained_region != NULL &&
58 !retained_region->in_collection_set() &&
59 !(retained_region->top() == retained_region->end()) &&
60 !retained_region->is_empty() &&
61 !retained_region->is_humongous()) {
62 retained_region->record_top_and_timestamp();
63 // The retained region was added to the old region set when it was
64 // retired. We have to remove it now, since we don't allow regions
65 // we allocate to in the region sets. We'll re-add it later, when
66 // it's retired again.
67 _g1h->_old_set.remove(retained_region);
68 bool during_im = _g1h->g1_policy()->during_initial_mark_pause();
69 retained_region->note_start_of_copying(during_im);
70 old->set(retained_region);
71 _g1h->_hr_printer.reuse(retained_region);
72 evacuation_info.set_alloc_regions_used_before(retained_region->used());
73 }
74 }
75
76 void G1DefaultAllocator::init_gc_alloc_regions(EvacuationInfo& evacuation_info) {
77 assert_at_safepoint(true /* should_be_vm_thread */);
78
79 _survivor_gc_alloc_region.init();
80 _old_gc_alloc_region.init();
81 reuse_retained_old_region(evacuation_info,
82 &_old_gc_alloc_region,
83 &_retained_old_gc_alloc_region);
84 }
85
86 void G1DefaultAllocator::release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info) {
87 AllocationContext_t context = AllocationContext::current();
88 evacuation_info.set_allocation_regions(survivor_gc_alloc_region(context)->count() +
89 old_gc_alloc_region(context)->count());
90 survivor_gc_alloc_region(context)->release();
91 // If we have an old GC alloc region to release, we'll save it in
92 // _retained_old_gc_alloc_region. If we don't
93 // _retained_old_gc_alloc_region will become NULL. This is what we
94 // want either way so no reason to check explicitly for either
95 // condition.
96 _retained_old_gc_alloc_region = old_gc_alloc_region(context)->release();
97
98 if (ResizePLAB) {
99 _g1h->_survivor_plab_stats.adjust_desired_plab_sz(no_of_gc_workers);
100 _g1h->_old_plab_stats.adjust_desired_plab_sz(no_of_gc_workers);
101 }
102 }
103
104 void G1DefaultAllocator::abandon_gc_alloc_regions() {
105 assert(survivor_gc_alloc_region(AllocationContext::current())->get() == NULL, "pre-condition");
106 assert(old_gc_alloc_region(AllocationContext::current())->get() == NULL, "pre-condition");
107 _retained_old_gc_alloc_region = NULL;
108 }
109
110 G1ParGCAllocBuffer::G1ParGCAllocBuffer(size_t gclab_word_size) :
111 ParGCAllocBuffer(gclab_word_size), _retired(true) { }
112
113 HeapWord* G1ParGCAllocator::allocate_slow(GCAllocPurpose purpose, size_t word_sz, AllocationContext_t context) {
114 HeapWord* obj = NULL;
115 size_t gclab_word_size = _g1h->desired_plab_sz(purpose);
116 if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
|
42 void G1Allocator::reuse_retained_old_region(EvacuationInfo& evacuation_info,
43 OldGCAllocRegion* old,
44 HeapRegion** retained_old) {
45 HeapRegion* retained_region = *retained_old;
46 *retained_old = NULL;
47
48 // We will discard the current GC alloc region if:
49 // a) it's in the collection set (it can happen!),
50 // b) it's already full (no point in using it),
51 // c) it's empty (this means that it was emptied during
52 // a cleanup and it should be on the free list now), or
53 // d) it's humongous (this means that it was emptied
54 // during a cleanup and was added to the free list, but
55 // has been subsequently used to allocate a humongous
56 // object that may be less than the region size).
57 if (retained_region != NULL &&
58 !retained_region->in_collection_set() &&
59 !(retained_region->top() == retained_region->end()) &&
60 !retained_region->is_empty() &&
61 !retained_region->is_humongous()) {
62 retained_region->record_timestamp();
63 // The retained region was added to the old region set when it was
64 // retired. We have to remove it now, since we don't allow regions
65 // we allocate to in the region sets. We'll re-add it later, when
66 // it's retired again.
67 _g1h->_old_set.remove(retained_region);
68 bool during_im = _g1h->g1_policy()->during_initial_mark_pause();
69 retained_region->note_start_of_copying(during_im);
70 old->set(retained_region);
71 _g1h->_hr_printer.reuse(retained_region);
72 evacuation_info.set_alloc_regions_used_before(retained_region->used());
73 }
74 }
75
76 void G1DefaultAllocator::init_gc_alloc_regions(EvacuationInfo& evacuation_info) {
77 assert_at_safepoint(true /* should_be_vm_thread */);
78
79 _survivor_gc_alloc_region.init();
80 _old_gc_alloc_region.init();
81 reuse_retained_old_region(evacuation_info,
82 &_old_gc_alloc_region,
83 &_retained_old_gc_alloc_region);
84 }
85
86 void G1DefaultAllocator::release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info) {
87 AllocationContext_t context = AllocationContext::current();
88 evacuation_info.set_allocation_regions(survivor_gc_alloc_region(context)->count() +
89 old_gc_alloc_region(context)->count());
90 survivor_gc_alloc_region(context)->release();
91 // If we have an old GC alloc region to release, we'll save it in
92 // _retained_old_gc_alloc_region. If we don't
93 // _retained_old_gc_alloc_region will become NULL. This is what we
94 // want either way so no reason to check explicitly for either
95 // condition.
96 _retained_old_gc_alloc_region = old_gc_alloc_region(context)->release();
97 if (_retained_old_gc_alloc_region != NULL) {
98 _retained_old_gc_alloc_region->record_retained_region();
99 }
100
101 if (ResizePLAB) {
102 _g1h->_survivor_plab_stats.adjust_desired_plab_sz(no_of_gc_workers);
103 _g1h->_old_plab_stats.adjust_desired_plab_sz(no_of_gc_workers);
104 }
105 }
106
107 void G1DefaultAllocator::abandon_gc_alloc_regions() {
108 assert(survivor_gc_alloc_region(AllocationContext::current())->get() == NULL, "pre-condition");
109 assert(old_gc_alloc_region(AllocationContext::current())->get() == NULL, "pre-condition");
110 _retained_old_gc_alloc_region = NULL;
111 }
112
113 G1ParGCAllocBuffer::G1ParGCAllocBuffer(size_t gclab_word_size) :
114 ParGCAllocBuffer(gclab_word_size), _retired(true) { }
115
116 HeapWord* G1ParGCAllocator::allocate_slow(GCAllocPurpose purpose, size_t word_sz, AllocationContext_t context) {
117 HeapWord* obj = NULL;
118 size_t gclab_word_size = _g1h->desired_plab_sz(purpose);
119 if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
|