72 vm_shutdown_during_initialization(
73 "Could not reserve enough space for object heap");
74 return JNI_ENOMEM;
75 }
76
77 initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
78
79 CardTableExtension* const barrier_set = new CardTableExtension(reserved_region());
80 barrier_set->initialize();
81 set_barrier_set(barrier_set);
82
83 // Make up the generations
84 // Calculate the maximum size that a generation can grow. This
85 // includes growth into the other generation. Note that the
86 // parameter _max_gen_size is kept as the maximum
87 // size of the generation as the boundaries currently stand.
88 // _max_gen_size is still used as that value.
89 double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0;
90 double max_gc_minor_pause_sec = ((double) MaxGCMinorPauseMillis)/1000.0;
91
92 _gens = new AdjoiningGenerations(heap_rs, _collector_policy, generation_alignment());
93
94 _old_gen = _gens->old_gen();
95 _young_gen = _gens->young_gen();
96
97 const size_t eden_capacity = _young_gen->eden_space()->capacity_in_bytes();
98 const size_t old_capacity = _old_gen->capacity_in_bytes();
99 const size_t initial_promo_size = MIN2(eden_capacity, old_capacity);
100 _size_policy =
101 new PSAdaptiveSizePolicy(eden_capacity,
102 initial_promo_size,
103 young_gen()->to_space()->capacity_in_bytes(),
104 _collector_policy->gen_alignment(),
105 max_gc_pause_sec,
106 max_gc_minor_pause_sec,
107 GCTimeRatio
108 );
109
110 assert(!UseAdaptiveGCBoundary ||
111 (old_gen()->virtual_space()->high_boundary() ==
112 young_gen()->virtual_space()->low_boundary()),
113 "Boundaries must meet");
114 // initialize the policy counters - 2 collectors, 3 generations
115 _gc_policy_counters =
116 new PSGCAdaptivePolicyCounters("ParScav:MSC", 2, 3, _size_policy);
117 _psh = this;
118
119 // Set up the GCTaskManager
120 _gc_task_manager = GCTaskManager::create(ParallelGCThreads);
121
122 if (UseParallelOldGC && !PSParallelCompact::initialize()) {
123 return JNI_ENOMEM;
124 }
125
126 return JNI_OK;
127 }
128
129 void ParallelScavengeHeap::post_initialize() {
130 // Need to init the tenuring threshold
131 PSScavenge::initialize();
132 if (UseParallelOldGC) {
133 PSParallelCompact::post_initialize();
134 } else {
135 PSMarkSweep::initialize();
136 }
137 PSPromotionManager::initialize();
242 HeapWord* result = young_gen()->allocate(size);
243
244 uint loop_count = 0;
245 uint gc_count = 0;
246 uint gclocker_stalled_count = 0;
247
248 while (result == NULL) {
249 // We don't want to have multiple collections for a single filled generation.
250 // To prevent this, each thread tracks the total_collections() value, and if
251 // the count has changed, does not do a new collection.
252 //
253 // The collection count must be read only while holding the heap lock. VM
254 // operations also hold the heap lock during collections. There is a lock
255 // contention case where thread A blocks waiting on the Heap_lock, while
256 // thread B is holding it doing a collection. When thread A gets the lock,
257 // the collection count has already changed. To prevent duplicate collections,
258 // The policy MUST attempt allocations during the same period it reads the
259 // total_collections() value!
260 {
261 MutexLocker ml(Heap_lock);
262 gc_count = Universe::heap()->total_collections();
263
264 result = young_gen()->allocate(size);
265 if (result != NULL) {
266 return result;
267 }
268
269 // If certain conditions hold, try allocating from the old gen.
270 result = mem_allocate_old_gen(size);
271 if (result != NULL) {
272 return result;
273 }
274
275 if (gclocker_stalled_count > GCLockerRetryAllocationCount) {
276 return NULL;
277 }
278
279 // Failed to allocate without a gc.
280 if (GC_locker::is_active_and_needs_gc()) {
281 // If this thread is not in a jni critical section, we stall
282 // the requestor until the critical section has cleared and
292 continue;
293 } else {
294 if (CheckJNICalls) {
295 fatal("Possible deadlock due to allocating while"
296 " in jni critical section");
297 }
298 return NULL;
299 }
300 }
301 }
302
303 if (result == NULL) {
304 // Generate a VM operation
305 VM_ParallelGCFailedAllocation op(size, gc_count);
306 VMThread::execute(&op);
307
308 // Did the VM operation execute? If so, return the result directly.
309 // This prevents us from looping until time out on requests that can
310 // not be satisfied.
311 if (op.prologue_succeeded()) {
312 assert(Universe::heap()->is_in_or_null(op.result()),
313 "result not in heap");
314
315 // If GC was locked out during VM operation then retry allocation
316 // and/or stall as necessary.
317 if (op.gc_locked()) {
318 assert(op.result() == NULL, "must be NULL if gc_locked() is true");
319 continue; // retry and/or stall as necessary
320 }
321
322 // Exit the loop if the gc time limit has been exceeded.
323 // The allocation must have failed above ("result" guarding
324 // this path is NULL) and the most recent collection has exceeded the
325 // gc overhead limit (although enough may have been collected to
326 // satisfy the allocation). Exit the loop so that an out-of-memory
327 // will be thrown (return a NULL ignoring the contents of
328 // op.result()),
329 // but clear gc_overhead_limit_exceeded so that the next collection
330 // starts with a clean slate (i.e., forgets about previous overhead
331 // excesses). Fill op.result() with a filler object so that the
332 // heap remains parsable.
333 const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
403 if (UseParallelOldGC) {
404 // The do_full_collection() parameter clear_all_soft_refs
405 // is interpreted here as maximum_compaction which will
406 // cause SoftRefs to be cleared.
407 bool maximum_compaction = clear_all_soft_refs;
408 PSParallelCompact::invoke(maximum_compaction);
409 } else {
410 PSMarkSweep::invoke(clear_all_soft_refs);
411 }
412 }
413
414 // Failed allocation policy. Must be called from the VM thread, and
415 // only at a safepoint! Note that this method has policy for allocation
416 // flow, and NOT collection policy. So we do not check for gc collection
417 // time over limit here, that is the responsibility of the heap specific
418 // collection methods. This method decides where to attempt allocations,
419 // and when to attempt collections, but no collection specific policy.
420 HeapWord* ParallelScavengeHeap::failed_mem_allocate(size_t size) {
421 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
422 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
423 assert(!Universe::heap()->is_gc_active(), "not reentrant");
424 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
425
426 // We assume that allocation in eden will fail unless we collect.
427
428 // First level allocation failure, scavenge and allocate in young gen.
429 GCCauseSetter gccs(this, GCCause::_allocation_failure);
430 const bool invoked_full_gc = PSScavenge::invoke();
431 HeapWord* result = young_gen()->allocate(size);
432
433 // Second level allocation failure.
434 // Mark sweep and allocate in young generation.
435 if (result == NULL && !invoked_full_gc) {
436 do_full_collection(false);
437 result = young_gen()->allocate(size);
438 }
439
440 death_march_check(result, size);
441
442 // Third level allocation failure.
443 // After mark sweep and young generation allocation failure,
491 CollectedHeap::resize_all_tlabs();
492 }
493
494 bool ParallelScavengeHeap::can_elide_initializing_store_barrier(oop new_obj) {
495 // We don't need barriers for stores to objects in the
496 // young gen and, a fortiori, for initializing stores to
497 // objects therein.
498 return is_in_young(new_obj);
499 }
500
501 // This method is used by System.gc() and JVMTI.
502 void ParallelScavengeHeap::collect(GCCause::Cause cause) {
503 assert(!Heap_lock->owned_by_self(),
504 "this thread should not own the Heap_lock");
505
506 uint gc_count = 0;
507 uint full_gc_count = 0;
508 {
509 MutexLocker ml(Heap_lock);
510 // This value is guarded by the Heap_lock
511 gc_count = Universe::heap()->total_collections();
512 full_gc_count = Universe::heap()->total_full_collections();
513 }
514
515 VM_ParallelGCSystemGC op(gc_count, full_gc_count, cause);
516 VMThread::execute(&op);
517 }
518
519 void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) {
520 young_gen()->object_iterate(cl);
521 old_gen()->object_iterate(cl);
522 }
523
524
525 HeapWord* ParallelScavengeHeap::block_start(const void* addr) const {
526 if (young_gen()->is_in_reserved(addr)) {
527 assert(young_gen()->is_in(addr),
528 "addr should be in allocated part of young gen");
529 // called from os::print_location by find or VMError
530 if (Debugging || VMError::fatal_error_in_progress()) return NULL;
531 Unimplemented();
532 } else if (old_gen()->is_in_reserved(addr)) {
|
72 vm_shutdown_during_initialization(
73 "Could not reserve enough space for object heap");
74 return JNI_ENOMEM;
75 }
76
77 initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
78
79 CardTableExtension* const barrier_set = new CardTableExtension(reserved_region());
80 barrier_set->initialize();
81 set_barrier_set(barrier_set);
82
83 // Make up the generations
84 // Calculate the maximum size that a generation can grow. This
85 // includes growth into the other generation. Note that the
86 // parameter _max_gen_size is kept as the maximum
87 // size of the generation as the boundaries currently stand.
88 // _max_gen_size is still used as that value.
89 double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0;
90 double max_gc_minor_pause_sec = ((double) MaxGCMinorPauseMillis)/1000.0;
91
92 _psh = this;
93 _gens = new AdjoiningGenerations(heap_rs, _collector_policy, generation_alignment());
94
95 _old_gen = _gens->old_gen();
96 _young_gen = _gens->young_gen();
97
98 const size_t eden_capacity = _young_gen->eden_space()->capacity_in_bytes();
99 const size_t old_capacity = _old_gen->capacity_in_bytes();
100 const size_t initial_promo_size = MIN2(eden_capacity, old_capacity);
101 _size_policy =
102 new PSAdaptiveSizePolicy(eden_capacity,
103 initial_promo_size,
104 young_gen()->to_space()->capacity_in_bytes(),
105 _collector_policy->gen_alignment(),
106 max_gc_pause_sec,
107 max_gc_minor_pause_sec,
108 GCTimeRatio
109 );
110
111 assert(!UseAdaptiveGCBoundary ||
112 (old_gen()->virtual_space()->high_boundary() ==
113 young_gen()->virtual_space()->low_boundary()),
114 "Boundaries must meet");
115 // initialize the policy counters - 2 collectors, 3 generations
116 _gc_policy_counters =
117 new PSGCAdaptivePolicyCounters("ParScav:MSC", 2, 3, _size_policy);
118
119 // Set up the GCTaskManager
120 _gc_task_manager = GCTaskManager::create(ParallelGCThreads);
121
122 if (UseParallelOldGC && !PSParallelCompact::initialize()) {
123 return JNI_ENOMEM;
124 }
125
126 return JNI_OK;
127 }
128
129 void ParallelScavengeHeap::post_initialize() {
130 // Need to init the tenuring threshold
131 PSScavenge::initialize();
132 if (UseParallelOldGC) {
133 PSParallelCompact::post_initialize();
134 } else {
135 PSMarkSweep::initialize();
136 }
137 PSPromotionManager::initialize();
242 HeapWord* result = young_gen()->allocate(size);
243
244 uint loop_count = 0;
245 uint gc_count = 0;
246 uint gclocker_stalled_count = 0;
247
248 while (result == NULL) {
249 // We don't want to have multiple collections for a single filled generation.
250 // To prevent this, each thread tracks the total_collections() value, and if
251 // the count has changed, does not do a new collection.
252 //
253 // The collection count must be read only while holding the heap lock. VM
254 // operations also hold the heap lock during collections. There is a lock
255 // contention case where thread A blocks waiting on the Heap_lock, while
256 // thread B is holding it doing a collection. When thread A gets the lock,
257 // the collection count has already changed. To prevent duplicate collections,
258 // The policy MUST attempt allocations during the same period it reads the
259 // total_collections() value!
260 {
261 MutexLocker ml(Heap_lock);
262 gc_count = total_collections();
263
264 result = young_gen()->allocate(size);
265 if (result != NULL) {
266 return result;
267 }
268
269 // If certain conditions hold, try allocating from the old gen.
270 result = mem_allocate_old_gen(size);
271 if (result != NULL) {
272 return result;
273 }
274
275 if (gclocker_stalled_count > GCLockerRetryAllocationCount) {
276 return NULL;
277 }
278
279 // Failed to allocate without a gc.
280 if (GC_locker::is_active_and_needs_gc()) {
281 // If this thread is not in a jni critical section, we stall
282 // the requestor until the critical section has cleared and
292 continue;
293 } else {
294 if (CheckJNICalls) {
295 fatal("Possible deadlock due to allocating while"
296 " in jni critical section");
297 }
298 return NULL;
299 }
300 }
301 }
302
303 if (result == NULL) {
304 // Generate a VM operation
305 VM_ParallelGCFailedAllocation op(size, gc_count);
306 VMThread::execute(&op);
307
308 // Did the VM operation execute? If so, return the result directly.
309 // This prevents us from looping until time out on requests that can
310 // not be satisfied.
311 if (op.prologue_succeeded()) {
312 assert(is_in_or_null(op.result()), "result not in heap");
313
314 // If GC was locked out during VM operation then retry allocation
315 // and/or stall as necessary.
316 if (op.gc_locked()) {
317 assert(op.result() == NULL, "must be NULL if gc_locked() is true");
318 continue; // retry and/or stall as necessary
319 }
320
321 // Exit the loop if the gc time limit has been exceeded.
322 // The allocation must have failed above ("result" guarding
323 // this path is NULL) and the most recent collection has exceeded the
324 // gc overhead limit (although enough may have been collected to
325 // satisfy the allocation). Exit the loop so that an out-of-memory
326 // will be thrown (return a NULL ignoring the contents of
327 // op.result()),
328 // but clear gc_overhead_limit_exceeded so that the next collection
329 // starts with a clean slate (i.e., forgets about previous overhead
330 // excesses). Fill op.result() with a filler object so that the
331 // heap remains parsable.
332 const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
402 if (UseParallelOldGC) {
403 // The do_full_collection() parameter clear_all_soft_refs
404 // is interpreted here as maximum_compaction which will
405 // cause SoftRefs to be cleared.
406 bool maximum_compaction = clear_all_soft_refs;
407 PSParallelCompact::invoke(maximum_compaction);
408 } else {
409 PSMarkSweep::invoke(clear_all_soft_refs);
410 }
411 }
412
413 // Failed allocation policy. Must be called from the VM thread, and
414 // only at a safepoint! Note that this method has policy for allocation
415 // flow, and NOT collection policy. So we do not check for gc collection
416 // time over limit here, that is the responsibility of the heap specific
417 // collection methods. This method decides where to attempt allocations,
418 // and when to attempt collections, but no collection specific policy.
419 HeapWord* ParallelScavengeHeap::failed_mem_allocate(size_t size) {
420 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
421 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
422 assert(!is_gc_active(), "not reentrant");
423 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
424
425 // We assume that allocation in eden will fail unless we collect.
426
427 // First level allocation failure, scavenge and allocate in young gen.
428 GCCauseSetter gccs(this, GCCause::_allocation_failure);
429 const bool invoked_full_gc = PSScavenge::invoke();
430 HeapWord* result = young_gen()->allocate(size);
431
432 // Second level allocation failure.
433 // Mark sweep and allocate in young generation.
434 if (result == NULL && !invoked_full_gc) {
435 do_full_collection(false);
436 result = young_gen()->allocate(size);
437 }
438
439 death_march_check(result, size);
440
441 // Third level allocation failure.
442 // After mark sweep and young generation allocation failure,
490 CollectedHeap::resize_all_tlabs();
491 }
492
493 bool ParallelScavengeHeap::can_elide_initializing_store_barrier(oop new_obj) {
494 // We don't need barriers for stores to objects in the
495 // young gen and, a fortiori, for initializing stores to
496 // objects therein.
497 return is_in_young(new_obj);
498 }
499
500 // This method is used by System.gc() and JVMTI.
501 void ParallelScavengeHeap::collect(GCCause::Cause cause) {
502 assert(!Heap_lock->owned_by_self(),
503 "this thread should not own the Heap_lock");
504
505 uint gc_count = 0;
506 uint full_gc_count = 0;
507 {
508 MutexLocker ml(Heap_lock);
509 // This value is guarded by the Heap_lock
510 gc_count = total_collections();
511 full_gc_count = total_full_collections();
512 }
513
514 VM_ParallelGCSystemGC op(gc_count, full_gc_count, cause);
515 VMThread::execute(&op);
516 }
517
518 void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) {
519 young_gen()->object_iterate(cl);
520 old_gen()->object_iterate(cl);
521 }
522
523
524 HeapWord* ParallelScavengeHeap::block_start(const void* addr) const {
525 if (young_gen()->is_in_reserved(addr)) {
526 assert(young_gen()->is_in(addr),
527 "addr should be in allocated part of young gen");
528 // called from os::print_location by find or VMError
529 if (Debugging || VMError::fatal_error_in_progress()) return NULL;
530 Unimplemented();
531 } else if (old_gen()->is_in_reserved(addr)) {
|