97
98 uint count = maximum_heap_compaction ? 1 : MarkSweepAlwaysCompactCount;
99 UIntFlagSetting flag_setting(MarkSweepAlwaysCompactCount, count);
100 PSMarkSweep::invoke_no_policy(clear_all_soft_refs || maximum_heap_compaction);
101 }
102
103 // This method contains no policy. You should probably
104 // be calling invoke() instead.
105 bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
106 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
107 assert(ref_processor() != NULL, "Sanity");
108
109 if (GC_locker::check_active_before_gc()) {
110 return false;
111 }
112
113 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
114 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
115 GCCause::Cause gc_cause = heap->gc_cause();
116
117 _gc_timer->register_gc_start(os::elapsed_counter());
118 _gc_tracer->report_gc_start(gc_cause, _gc_timer->gc_start());
119
120 PSAdaptiveSizePolicy* size_policy = heap->size_policy();
121
122 // The scope of casr should end after code that can change
123 // CollectorPolicy::_should_clear_all_soft_refs.
124 ClearedAllSoftRefs casr(clear_all_softrefs, heap->collector_policy());
125
126 PSYoungGen* young_gen = heap->young_gen();
127 PSOldGen* old_gen = heap->old_gen();
128
129 // Increment the invocation count
130 heap->increment_total_collections(true /* full */);
131
132 // Save information needed to minimize mangling
133 heap->record_gen_tops_before_GC();
134
135 // We need to track unique mark sweep invocations as well.
136 _total_invocations++;
137
373 if (VerifyObjectStartArray &&
374 VerifyAfterGC) {
375 old_gen->verify_object_start_array();
376 }
377
378 if (ZapUnusedHeapArea) {
379 old_gen->object_space()->check_mangled_unused_area_complete();
380 }
381
382 NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
383
384 heap->print_heap_after_gc();
385 heap->trace_heap_after_gc(_gc_tracer);
386
387 heap->post_full_gc_dump(_gc_timer);
388
389 #ifdef TRACESPINNING
390 ParallelTaskTerminator::print_termination_counts();
391 #endif
392
393 _gc_timer->register_gc_end(os::elapsed_counter());
394
395 _gc_tracer->report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
396
397 return true;
398 }
399
400 bool PSMarkSweep::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
401 PSYoungGen* young_gen,
402 PSOldGen* old_gen) {
403 MutableSpace* const eden_space = young_gen->eden_space();
404 assert(!eden_space->is_empty(), "eden must be non-empty");
405 assert(young_gen->virtual_space()->alignment() ==
406 old_gen->virtual_space()->alignment(), "alignments do not match");
407
408 if (!(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary)) {
409 return false;
410 }
411
412 // Both generations must be completely committed.
413 if (young_gen->virtual_space()->uncommitted_size() != 0) {
|
97
98 uint count = maximum_heap_compaction ? 1 : MarkSweepAlwaysCompactCount;
99 UIntFlagSetting flag_setting(MarkSweepAlwaysCompactCount, count);
100 PSMarkSweep::invoke_no_policy(clear_all_soft_refs || maximum_heap_compaction);
101 }
102
103 // This method contains no policy. You should probably
104 // be calling invoke() instead.
105 bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
106 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
107 assert(ref_processor() != NULL, "Sanity");
108
109 if (GC_locker::check_active_before_gc()) {
110 return false;
111 }
112
113 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
114 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
115 GCCause::Cause gc_cause = heap->gc_cause();
116
117 _gc_timer->register_gc_start();
118 _gc_tracer->report_gc_start(gc_cause, _gc_timer->gc_start());
119
120 PSAdaptiveSizePolicy* size_policy = heap->size_policy();
121
122 // The scope of casr should end after code that can change
123 // CollectorPolicy::_should_clear_all_soft_refs.
124 ClearedAllSoftRefs casr(clear_all_softrefs, heap->collector_policy());
125
126 PSYoungGen* young_gen = heap->young_gen();
127 PSOldGen* old_gen = heap->old_gen();
128
129 // Increment the invocation count
130 heap->increment_total_collections(true /* full */);
131
132 // Save information needed to minimize mangling
133 heap->record_gen_tops_before_GC();
134
135 // We need to track unique mark sweep invocations as well.
136 _total_invocations++;
137
373 if (VerifyObjectStartArray &&
374 VerifyAfterGC) {
375 old_gen->verify_object_start_array();
376 }
377
378 if (ZapUnusedHeapArea) {
379 old_gen->object_space()->check_mangled_unused_area_complete();
380 }
381
382 NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
383
384 heap->print_heap_after_gc();
385 heap->trace_heap_after_gc(_gc_tracer);
386
387 heap->post_full_gc_dump(_gc_timer);
388
389 #ifdef TRACESPINNING
390 ParallelTaskTerminator::print_termination_counts();
391 #endif
392
393 _gc_timer->register_gc_end();
394
395 _gc_tracer->report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
396
397 return true;
398 }
399
400 bool PSMarkSweep::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
401 PSYoungGen* young_gen,
402 PSOldGen* old_gen) {
403 MutableSpace* const eden_space = young_gen->eden_space();
404 assert(!eden_space->is_empty(), "eden must be non-empty");
405 assert(young_gen->virtual_space()->alignment() ==
406 old_gen->virtual_space()->alignment(), "alignments do not match");
407
408 if (!(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary)) {
409 return false;
410 }
411
412 // Both generations must be completely committed.
413 if (young_gen->virtual_space()->uncommitted_size() != 0) {
|