90 start_array()->initialize(limit_reserved);
91
92 _reserved = MemRegion((HeapWord*)virtual_space()->low_boundary(),
93 (HeapWord*)virtual_space()->high_boundary());
94
95 //
96 // Card table stuff
97 //
98
99 MemRegion cmr((HeapWord*)virtual_space()->low(),
100 (HeapWord*)virtual_space()->high());
101 if (ZapUnusedHeapArea) {
102 // Mangle newly committed space immediately rather than
103 // waiting for the initialization of the space even though
104 // mangling is related to spaces. Doing it here eliminates
105 // the need to carry along information that a complete mangling
106 // (bottom to end) needs to be done.
107 SpaceMangler::mangle_region(cmr);
108 }
109
110 Universe::heap()->barrier_set()->resize_covered_region(cmr);
111
112 CardTableModRefBS* _ct =
113 barrier_set_cast<CardTableModRefBS>(Universe::heap()->barrier_set());
114
115 // Verify that the start and end of this generation is the start of a card.
116 // If this wasn't true, a single card could span more than one generation,
117 // which would cause problems when we commit/uncommit memory, and when we
118 // clear and dirty cards.
119 guarantee(_ct->is_card_aligned(_reserved.start()), "generation must be card aligned");
120 if (_reserved.end() != Universe::heap()->reserved_region().end()) {
121 // Don't check at the very end of the heap as we'll assert that we're probing off
122 // the end if we try.
123 guarantee(_ct->is_card_aligned(_reserved.end()), "generation must be card aligned");
124 }
125
126 //
127 // ObjectSpace stuff
128 //
129
130 _object_space = new MutableSpace(virtual_space()->alignment());
131
132 if (_object_space == NULL)
133 vm_exit_during_initialization("Could not allocate an old gen space");
134
135 object_space()->initialize(cmr,
136 SpaceDecorator::Clear,
137 SpaceDecorator::Mangle);
138
139 _object_mark_sweep = new PSMarkSweepDecorator(_object_space, start_array(), MarkSweepDeadRatio);
140
141 if (_object_mark_sweep == NULL)
142 vm_exit_during_initialization("Could not complete allocation of old generation");
143
144 // Update the start_array
145 start_array()->set_covered_region(cmr);
146 }
147
148 void PSOldGen::initialize_performance_counters(const char* perf_data_name, int level) {
149 // Generation Counters, generation 'level', 1 subspace
150 _gen_counters = new PSGenerationCounters(perf_data_name, level, 1, _min_gen_size,
151 _max_gen_size, virtual_space());
152 _space_counters = new SpaceCounters(perf_data_name, 0,
153 virtual_space()->reserved_size(),
154 _object_space, _gen_counters);
155 }
156
157 // Assume that the generation has been allocated if its
158 // reserved size is not 0.
159 bool PSOldGen::is_allocated() {
160 return virtual_space()->reserved_size() != 0;
161 }
162
163 void PSOldGen::precompact() {
164 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
165 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
166
167 // Reset start array first.
168 start_array()->reset();
169
170 object_mark_sweep()->precompact();
171
172 // Now compact the young gen
173 heap->young_gen()->precompact();
174 }
175
176 void PSOldGen::adjust_pointers() {
177 object_mark_sweep()->adjust_pointers();
178 }
179
180 void PSOldGen::compact() {
181 object_mark_sweep()->compact(ZapUnusedHeapArea);
182 }
183
184 size_t PSOldGen::contiguous_available() const {
185 return object_space()->free_in_bytes() + virtual_space()->uncommitted_size();
186 }
187
188 // Allocation. We report all successful allocations to the size policy
189 // Note that the perm gen does not use this method, and should not!
190 HeapWord* PSOldGen::allocate(size_t word_size) {
191 assert_locked_or_safepoint(Heap_lock);
192 HeapWord* res = allocate_noexpand(word_size);
193
194 if (res == NULL) {
195 res = expand_and_allocate(word_size);
196 }
197
198 // Allocations in the old generation need to be reported
199 if (res != NULL) {
200 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
201 heap->size_policy()->tenured_allocation(word_size);
202 }
203
204 return res;
205 }
206
207 HeapWord* PSOldGen::expand_and_allocate(size_t word_size) {
208 expand(word_size*HeapWordSize);
209 if (GCExpandToAllocateDelayMillis > 0) {
210 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
211 }
212 return allocate_noexpand(word_size);
213 }
214
215 HeapWord* PSOldGen::expand_and_cas_allocate(size_t word_size) {
216 expand(word_size*HeapWordSize);
217 if (GCExpandToAllocateDelayMillis > 0) {
218 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
219 }
220 return cas_allocate_noexpand(word_size);
359 " gen limits: " SIZE_FORMAT " / " SIZE_FORMAT,
360 desired_free_space, used_in_bytes(), new_size, current_size,
361 gen_size_limit(), min_gen_size());
362 }
363
364 if (new_size == current_size) {
365 // No change requested
366 return;
367 }
368 if (new_size > current_size) {
369 size_t change_bytes = new_size - current_size;
370 expand(change_bytes);
371 } else {
372 size_t change_bytes = current_size - new_size;
373 // shrink doesn't grab this lock, expand does. Is that right?
374 MutexLocker x(ExpandHeap_lock);
375 shrink(change_bytes);
376 }
377
378 if (PrintAdaptiveSizePolicy) {
379 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
380 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
381 gclog_or_tty->print_cr("AdaptiveSizePolicy::old generation size: "
382 "collection: %d "
383 "(" SIZE_FORMAT ") -> (" SIZE_FORMAT ") ",
384 heap->total_collections(),
385 size_before, virtual_space()->committed_size());
386 }
387 }
388
389 // NOTE! We need to be careful about resizing. During a GC, multiple
390 // allocators may be active during heap expansion. If we allow the
391 // heap resizing to become visible before we have correctly resized
392 // all heap related data structures, we may cause program failures.
393 void PSOldGen::post_resize() {
394 // First construct a memregion representing the new size
395 MemRegion new_memregion((HeapWord*)virtual_space()->low(),
396 (HeapWord*)virtual_space()->high());
397 size_t new_word_size = new_memregion.word_size();
398
399 start_array()->set_covered_region(new_memregion);
400 Universe::heap()->barrier_set()->resize_covered_region(new_memregion);
401
402 // ALWAYS do this last!!
403 object_space()->initialize(new_memregion,
404 SpaceDecorator::DontClear,
405 SpaceDecorator::DontMangle);
406
407 assert(new_word_size == heap_word_size(object_space()->capacity_in_bytes()),
408 "Sanity");
409 }
410
411 size_t PSOldGen::gen_size_limit() {
412 return _max_gen_size;
413 }
414
415 void PSOldGen::reset_after_change() {
416 ShouldNotReachHere();
417 return;
418 }
419
420 size_t PSOldGen::available_for_expansion() {
|
90 start_array()->initialize(limit_reserved);
91
92 _reserved = MemRegion((HeapWord*)virtual_space()->low_boundary(),
93 (HeapWord*)virtual_space()->high_boundary());
94
95 //
96 // Card table stuff
97 //
98
99 MemRegion cmr((HeapWord*)virtual_space()->low(),
100 (HeapWord*)virtual_space()->high());
101 if (ZapUnusedHeapArea) {
102 // Mangle newly committed space immediately rather than
103 // waiting for the initialization of the space even though
104 // mangling is related to spaces. Doing it here eliminates
105 // the need to carry along information that a complete mangling
106 // (bottom to end) needs to be done.
107 SpaceMangler::mangle_region(cmr);
108 }
109
110 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
111 BarrierSet* bs = heap->barrier_set();
112
113 bs->resize_covered_region(cmr);
114
115 CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
116
117 // Verify that the start and end of this generation is the start of a card.
118 // If this wasn't true, a single card could span more than one generation,
119 // which would cause problems when we commit/uncommit memory, and when we
120 // clear and dirty cards.
121 guarantee(ct->is_card_aligned(_reserved.start()), "generation must be card aligned");
122 if (_reserved.end() != heap->reserved_region().end()) {
123 // Don't check at the very end of the heap as we'll assert that we're probing off
124 // the end if we try.
125 guarantee(ct->is_card_aligned(_reserved.end()), "generation must be card aligned");
126 }
127
128 //
129 // ObjectSpace stuff
130 //
131
132 _object_space = new MutableSpace(virtual_space()->alignment());
133
134 if (_object_space == NULL)
135 vm_exit_during_initialization("Could not allocate an old gen space");
136
137 object_space()->initialize(cmr,
138 SpaceDecorator::Clear,
139 SpaceDecorator::Mangle);
140
141 _object_mark_sweep = new PSMarkSweepDecorator(_object_space, start_array(), MarkSweepDeadRatio);
142
143 if (_object_mark_sweep == NULL)
144 vm_exit_during_initialization("Could not complete allocation of old generation");
145
146 // Update the start_array
147 start_array()->set_covered_region(cmr);
148 }
149
150 void PSOldGen::initialize_performance_counters(const char* perf_data_name, int level) {
151 // Generation Counters, generation 'level', 1 subspace
152 _gen_counters = new PSGenerationCounters(perf_data_name, level, 1, _min_gen_size,
153 _max_gen_size, virtual_space());
154 _space_counters = new SpaceCounters(perf_data_name, 0,
155 virtual_space()->reserved_size(),
156 _object_space, _gen_counters);
157 }
158
159 // Assume that the generation has been allocated if its
160 // reserved size is not 0.
161 bool PSOldGen::is_allocated() {
162 return virtual_space()->reserved_size() != 0;
163 }
164
165 void PSOldGen::precompact() {
166 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
167
168 // Reset start array first.
169 start_array()->reset();
170
171 object_mark_sweep()->precompact();
172
173 // Now compact the young gen
174 heap->young_gen()->precompact();
175 }
176
177 void PSOldGen::adjust_pointers() {
178 object_mark_sweep()->adjust_pointers();
179 }
180
181 void PSOldGen::compact() {
182 object_mark_sweep()->compact(ZapUnusedHeapArea);
183 }
184
185 size_t PSOldGen::contiguous_available() const {
186 return object_space()->free_in_bytes() + virtual_space()->uncommitted_size();
187 }
188
189 // Allocation. We report all successful allocations to the size policy
190 // Note that the perm gen does not use this method, and should not!
191 HeapWord* PSOldGen::allocate(size_t word_size) {
192 assert_locked_or_safepoint(Heap_lock);
193 HeapWord* res = allocate_noexpand(word_size);
194
195 if (res == NULL) {
196 res = expand_and_allocate(word_size);
197 }
198
199 // Allocations in the old generation need to be reported
200 if (res != NULL) {
201 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
202 heap->size_policy()->tenured_allocation(word_size);
203 }
204
205 return res;
206 }
207
208 HeapWord* PSOldGen::expand_and_allocate(size_t word_size) {
209 expand(word_size*HeapWordSize);
210 if (GCExpandToAllocateDelayMillis > 0) {
211 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
212 }
213 return allocate_noexpand(word_size);
214 }
215
216 HeapWord* PSOldGen::expand_and_cas_allocate(size_t word_size) {
217 expand(word_size*HeapWordSize);
218 if (GCExpandToAllocateDelayMillis > 0) {
219 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
220 }
221 return cas_allocate_noexpand(word_size);
360 " gen limits: " SIZE_FORMAT " / " SIZE_FORMAT,
361 desired_free_space, used_in_bytes(), new_size, current_size,
362 gen_size_limit(), min_gen_size());
363 }
364
365 if (new_size == current_size) {
366 // No change requested
367 return;
368 }
369 if (new_size > current_size) {
370 size_t change_bytes = new_size - current_size;
371 expand(change_bytes);
372 } else {
373 size_t change_bytes = current_size - new_size;
374 // shrink doesn't grab this lock, expand does. Is that right?
375 MutexLocker x(ExpandHeap_lock);
376 shrink(change_bytes);
377 }
378
379 if (PrintAdaptiveSizePolicy) {
380 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
381 gclog_or_tty->print_cr("AdaptiveSizePolicy::old generation size: "
382 "collection: %d "
383 "(" SIZE_FORMAT ") -> (" SIZE_FORMAT ") ",
384 heap->total_collections(),
385 size_before, virtual_space()->committed_size());
386 }
387 }
388
389 // NOTE! We need to be careful about resizing. During a GC, multiple
390 // allocators may be active during heap expansion. If we allow the
391 // heap resizing to become visible before we have correctly resized
392 // all heap related data structures, we may cause program failures.
393 void PSOldGen::post_resize() {
394 // First construct a memregion representing the new size
395 MemRegion new_memregion((HeapWord*)virtual_space()->low(),
396 (HeapWord*)virtual_space()->high());
397 size_t new_word_size = new_memregion.word_size();
398
399 start_array()->set_covered_region(new_memregion);
400 ParallelScavengeHeap::heap()->barrier_set()->resize_covered_region(new_memregion);
401
402 // ALWAYS do this last!!
403 object_space()->initialize(new_memregion,
404 SpaceDecorator::DontClear,
405 SpaceDecorator::DontMangle);
406
407 assert(new_word_size == heap_word_size(object_space()->capacity_in_bytes()),
408 "Sanity");
409 }
410
411 size_t PSOldGen::gen_size_limit() {
412 return _max_gen_size;
413 }
414
415 void PSOldGen::reset_after_change() {
416 ShouldNotReachHere();
417 return;
418 }
419
420 size_t PSOldGen::available_for_expansion() {
|