< prev index next >

src/share/vm/gc/parallel/psParallelCompact.cpp

Print this page
rev 13233 : 8179268: Factor out AdaptiveSizePolicy from top-level interfaces CollectorPolicy and CollectedHeap


1690 // may be true because this method can be called without intervening
1691 // activity.  For example when the heap space is tight and full measure
1692 // are being taken to free space.
1693 void PSParallelCompact::invoke(bool maximum_heap_compaction) {
1694   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
1695   assert(Thread::current() == (Thread*)VMThread::vm_thread(),
1696          "should be in vm thread");
1697 
1698   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
1699   GCCause::Cause gc_cause = heap->gc_cause();
1700   assert(!heap->is_gc_active(), "not reentrant");
1701 
1702   PSAdaptiveSizePolicy* policy = heap->size_policy();
1703   IsGCActiveMark mark;
1704 
1705   if (ScavengeBeforeFullGC) {
1706     PSScavenge::invoke_no_policy();
1707   }
1708 
1709   const bool clear_all_soft_refs =
1710     heap->collector_policy()->should_clear_all_soft_refs();
1711 
1712   PSParallelCompact::invoke_no_policy(clear_all_soft_refs ||
1713                                       maximum_heap_compaction);
1714 }
1715 
1716 // This method contains no policy. You should probably
1717 // be calling invoke() instead.
1718 bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
1719   assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
1720   assert(ref_processor() != NULL, "Sanity");
1721 
1722   if (GCLocker::check_active_before_gc()) {
1723     return false;
1724   }
1725 
1726   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
1727 
1728   GCIdMark gc_id_mark;
1729   _gc_timer.register_gc_start();
1730   _gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start());
1731 
1732   TimeStamp marking_start;
1733   TimeStamp compaction_start;
1734   TimeStamp collection_exit;
1735 
1736   GCCause::Cause gc_cause = heap->gc_cause();
1737   PSYoungGen* young_gen = heap->young_gen();
1738   PSOldGen* old_gen = heap->old_gen();
1739   PSAdaptiveSizePolicy* size_policy = heap->size_policy();
1740 
1741   // The scope of casr should end after code that can change
1742   // CollectorPolicy::_should_clear_all_soft_refs.
1743   ClearedAllSoftRefs casr(maximum_heap_compaction,
1744                           heap->collector_policy());
1745 
1746   if (ZapUnusedHeapArea) {
1747     // Save information needed to minimize mangling
1748     heap->record_gen_tops_before_GC();
1749   }
1750 
1751   // Make sure data structures are sane, make the heap parsable, and do other
1752   // miscellaneous bookkeeping.
1753   pre_compact();
1754 
1755   PreGCValues pre_gc_values(heap);
1756 
1757   // Get the compaction manager reserved for the VM thread.
1758   ParCompactionManager* const vmthread_cm =
1759     ParCompactionManager::manager_array(gc_task_manager()->workers());
1760 
1761   {
1762     ResourceMark rm;
1763     HandleMark hm;
1764 


1850           young_gen->from_space()->capacity_in_bytes() -
1851           young_gen->to_space()->capacity_in_bytes();
1852 
1853         // Used for diagnostics
1854         size_policy->clear_generation_free_space_flags();
1855 
1856         size_policy->compute_generations_free_space(young_live,
1857                                                     eden_live,
1858                                                     old_live,
1859                                                     cur_eden,
1860                                                     max_old_gen_size,
1861                                                     max_eden_size,
1862                                                     true /* full gc*/);
1863 
1864         size_policy->check_gc_overhead_limit(young_live,
1865                                              eden_live,
1866                                              max_old_gen_size,
1867                                              max_eden_size,
1868                                              true /* full gc*/,
1869                                              gc_cause,
1870                                              heap->collector_policy());
1871 
1872         size_policy->decay_supplemental_growth(true /* full gc*/);
1873 
1874         heap->resize_old_gen(
1875           size_policy->calculated_old_free_size_in_bytes());
1876 
1877         heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(),
1878                                size_policy->calculated_survivor_size_in_bytes());
1879       }
1880 
1881       log_debug(gc, ergo)("AdaptiveSizeStop: collection: %d ", heap->total_collections());
1882     }
1883 
1884     if (UsePerfData) {
1885       PSGCAdaptivePolicyCounters* const counters = heap->gc_policy_counters();
1886       counters->update_counters();
1887       counters->update_old_capacity(old_gen->capacity_in_bytes());
1888       counters->update_young_capacity(young_gen->capacity_in_bytes());
1889     }
1890 




1690 // may be true because this method can be called without intervening
1691 // activity.  For example when the heap space is tight and full measure
1692 // are being taken to free space.
1693 void PSParallelCompact::invoke(bool maximum_heap_compaction) {
1694   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
1695   assert(Thread::current() == (Thread*)VMThread::vm_thread(),
1696          "should be in vm thread");
1697 
1698   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
1699   GCCause::Cause gc_cause = heap->gc_cause();
1700   assert(!heap->is_gc_active(), "not reentrant");
1701 
1702   PSAdaptiveSizePolicy* policy = heap->size_policy();
1703   IsGCActiveMark mark;
1704 
1705   if (ScavengeBeforeFullGC) {
1706     PSScavenge::invoke_no_policy();
1707   }
1708 
1709   const bool clear_all_soft_refs =
1710     heap->collector_policy()->as_generation_policy()->should_clear_all_soft_refs();
1711 
1712   PSParallelCompact::invoke_no_policy(clear_all_soft_refs ||
1713                                       maximum_heap_compaction);
1714 }
1715 
1716 // This method contains no policy. You should probably
1717 // be calling invoke() instead.
1718 bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
1719   assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
1720   assert(ref_processor() != NULL, "Sanity");
1721 
1722   if (GCLocker::check_active_before_gc()) {
1723     return false;
1724   }
1725 
1726   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
1727 
1728   GCIdMark gc_id_mark;
1729   _gc_timer.register_gc_start();
1730   _gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start());
1731 
1732   TimeStamp marking_start;
1733   TimeStamp compaction_start;
1734   TimeStamp collection_exit;
1735 
1736   GCCause::Cause gc_cause = heap->gc_cause();
1737   PSYoungGen* young_gen = heap->young_gen();
1738   PSOldGen* old_gen = heap->old_gen();
1739   PSAdaptiveSizePolicy* size_policy = heap->size_policy();
1740 
1741   // The scope of casr should end after code that can change
1742   // CollectorPolicy::_should_clear_all_soft_refs.
1743   ClearedAllSoftRefs casr(maximum_heap_compaction,
1744                           heap->collector_policy()->as_generation_policy());
1745 
1746   if (ZapUnusedHeapArea) {
1747     // Save information needed to minimize mangling
1748     heap->record_gen_tops_before_GC();
1749   }
1750 
1751   // Make sure data structures are sane, make the heap parsable, and do other
1752   // miscellaneous bookkeeping.
1753   pre_compact();
1754 
1755   PreGCValues pre_gc_values(heap);
1756 
1757   // Get the compaction manager reserved for the VM thread.
1758   ParCompactionManager* const vmthread_cm =
1759     ParCompactionManager::manager_array(gc_task_manager()->workers());
1760 
1761   {
1762     ResourceMark rm;
1763     HandleMark hm;
1764 


1850           young_gen->from_space()->capacity_in_bytes() -
1851           young_gen->to_space()->capacity_in_bytes();
1852 
1853         // Used for diagnostics
1854         size_policy->clear_generation_free_space_flags();
1855 
1856         size_policy->compute_generations_free_space(young_live,
1857                                                     eden_live,
1858                                                     old_live,
1859                                                     cur_eden,
1860                                                     max_old_gen_size,
1861                                                     max_eden_size,
1862                                                     true /* full gc*/);
1863 
1864         size_policy->check_gc_overhead_limit(young_live,
1865                                              eden_live,
1866                                              max_old_gen_size,
1867                                              max_eden_size,
1868                                              true /* full gc*/,
1869                                              gc_cause,
1870                                              heap->collector_policy()->as_generation_policy());
1871 
1872         size_policy->decay_supplemental_growth(true /* full gc*/);
1873 
1874         heap->resize_old_gen(
1875           size_policy->calculated_old_free_size_in_bytes());
1876 
1877         heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(),
1878                                size_policy->calculated_survivor_size_in_bytes());
1879       }
1880 
1881       log_debug(gc, ergo)("AdaptiveSizeStop: collection: %d ", heap->total_collections());
1882     }
1883 
1884     if (UsePerfData) {
1885       PSGCAdaptivePolicyCounters* const counters = heap->gc_policy_counters();
1886       counters->update_counters();
1887       counters->update_old_capacity(old_gen->capacity_in_bytes());
1888       counters->update_young_capacity(young_gen->capacity_in_bytes());
1889     }
1890 


< prev index next >