1989
1990 const bool clear_all_soft_refs =
1991 heap->collector_policy()->should_clear_all_soft_refs();
1992
1993 PSParallelCompact::invoke_no_policy(clear_all_soft_refs ||
1994 maximum_heap_compaction);
1995 }
1996
1997 // This method contains no policy. You should probably
1998 // be calling invoke() instead.
1999 bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
2000 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
2001 assert(ref_processor() != NULL, "Sanity");
2002
2003 if (GC_locker::check_active_before_gc()) {
2004 return false;
2005 }
2006
2007 ParallelScavengeHeap* heap = gc_heap();
2008
2009 _gc_timer.register_gc_start(os::elapsed_counter());
2010 _gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start());
2011
2012 TimeStamp marking_start;
2013 TimeStamp compaction_start;
2014 TimeStamp collection_exit;
2015
2016 GCCause::Cause gc_cause = heap->gc_cause();
2017 PSYoungGen* young_gen = heap->young_gen();
2018 PSOldGen* old_gen = heap->old_gen();
2019 PSAdaptiveSizePolicy* size_policy = heap->size_policy();
2020
2021 // The scope of casr should end after code that can change
2022 // CollectorPolicy::_should_clear_all_soft_refs.
2023 ClearedAllSoftRefs casr(maximum_heap_compaction,
2024 heap->collector_policy());
2025
2026 if (ZapUnusedHeapArea) {
2027 // Save information needed to minimize mangling
2028 heap->record_gen_tops_before_GC();
2029 }
2227
2228 collection_exit.update();
2229
2230 heap->print_heap_after_gc();
2231 heap->trace_heap_after_gc(&_gc_tracer);
2232
2233 if (PrintGCTaskTimeStamps) {
2234 gclog_or_tty->print_cr("VM-Thread " INT64_FORMAT " " INT64_FORMAT " "
2235 INT64_FORMAT,
2236 marking_start.ticks(), compaction_start.ticks(),
2237 collection_exit.ticks());
2238 gc_task_manager()->print_task_time_stamps();
2239 }
2240
2241 heap->post_full_gc_dump(&_gc_timer);
2242
2243 #ifdef TRACESPINNING
2244 ParallelTaskTerminator::print_termination_counts();
2245 #endif
2246
2247 _gc_timer.register_gc_end(os::elapsed_counter());
2248
2249 _gc_tracer.report_dense_prefix(dense_prefix(old_space_id));
2250 _gc_tracer.report_gc_end(_gc_timer.gc_end(), _gc_timer.time_partitions());
2251
2252 return true;
2253 }
2254
2255 bool PSParallelCompact::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
2256 PSYoungGen* young_gen,
2257 PSOldGen* old_gen) {
2258 MutableSpace* const eden_space = young_gen->eden_space();
2259 assert(!eden_space->is_empty(), "eden must be non-empty");
2260 assert(young_gen->virtual_space()->alignment() ==
2261 old_gen->virtual_space()->alignment(), "alignments do not match");
2262
2263 if (!(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary)) {
2264 return false;
2265 }
2266
2267 // Both generations must be completely committed.
|
1989
1990 const bool clear_all_soft_refs =
1991 heap->collector_policy()->should_clear_all_soft_refs();
1992
1993 PSParallelCompact::invoke_no_policy(clear_all_soft_refs ||
1994 maximum_heap_compaction);
1995 }
1996
1997 // This method contains no policy. You should probably
1998 // be calling invoke() instead.
1999 bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
2000 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
2001 assert(ref_processor() != NULL, "Sanity");
2002
2003 if (GC_locker::check_active_before_gc()) {
2004 return false;
2005 }
2006
2007 ParallelScavengeHeap* heap = gc_heap();
2008
2009 _gc_timer.register_gc_start();
2010 _gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start());
2011
2012 TimeStamp marking_start;
2013 TimeStamp compaction_start;
2014 TimeStamp collection_exit;
2015
2016 GCCause::Cause gc_cause = heap->gc_cause();
2017 PSYoungGen* young_gen = heap->young_gen();
2018 PSOldGen* old_gen = heap->old_gen();
2019 PSAdaptiveSizePolicy* size_policy = heap->size_policy();
2020
2021 // The scope of casr should end after code that can change
2022 // CollectorPolicy::_should_clear_all_soft_refs.
2023 ClearedAllSoftRefs casr(maximum_heap_compaction,
2024 heap->collector_policy());
2025
2026 if (ZapUnusedHeapArea) {
2027 // Save information needed to minimize mangling
2028 heap->record_gen_tops_before_GC();
2029 }
2227
2228 collection_exit.update();
2229
2230 heap->print_heap_after_gc();
2231 heap->trace_heap_after_gc(&_gc_tracer);
2232
2233 if (PrintGCTaskTimeStamps) {
2234 gclog_or_tty->print_cr("VM-Thread " INT64_FORMAT " " INT64_FORMAT " "
2235 INT64_FORMAT,
2236 marking_start.ticks(), compaction_start.ticks(),
2237 collection_exit.ticks());
2238 gc_task_manager()->print_task_time_stamps();
2239 }
2240
2241 heap->post_full_gc_dump(&_gc_timer);
2242
2243 #ifdef TRACESPINNING
2244 ParallelTaskTerminator::print_termination_counts();
2245 #endif
2246
2247 _gc_timer.register_gc_end();
2248
2249 _gc_tracer.report_dense_prefix(dense_prefix(old_space_id));
2250 _gc_tracer.report_gc_end(_gc_timer.gc_end(), _gc_timer.time_partitions());
2251
2252 return true;
2253 }
2254
2255 bool PSParallelCompact::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
2256 PSYoungGen* young_gen,
2257 PSOldGen* old_gen) {
2258 MutableSpace* const eden_space = young_gen->eden_space();
2259 assert(!eden_space->is_empty(), "eden must be non-empty");
2260 assert(young_gen->virtual_space()->alignment() ==
2261 old_gen->virtual_space()->alignment(), "alignments do not match");
2262
2263 if (!(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary)) {
2264 return false;
2265 }
2266
2267 // Both generations must be completely committed.
|