< prev index next >

src/hotspot/share/gc/parallel/psParallelCompact.cpp

Print this page




1690 // may be true because this method can be called without intervening
1691 // activity.  For example when the heap space is tight and full measure
1692 // are being taken to free space.
1693 void PSParallelCompact::invoke(bool maximum_heap_compaction) {
1694   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
1695   assert(Thread::current() == (Thread*)VMThread::vm_thread(),
1696          "should be in vm thread");
1697 
1698   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
1699   GCCause::Cause gc_cause = heap->gc_cause();
1700   assert(!heap->is_gc_active(), "not reentrant");
1701 
1702   PSAdaptiveSizePolicy* policy = heap->size_policy();
1703   IsGCActiveMark mark;
1704 
1705   if (ScavengeBeforeFullGC) {
1706     PSScavenge::invoke_no_policy();
1707   }
1708 
1709   const bool clear_all_soft_refs =
1710     heap->collector_policy()->should_clear_all_soft_refs();
1711 
1712   PSParallelCompact::invoke_no_policy(clear_all_soft_refs ||
1713                                       maximum_heap_compaction);
1714 }
1715 
1716 // This method contains no policy. You should probably
1717 // be calling invoke() instead.
1718 bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
1719   assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
1720   assert(ref_processor() != NULL, "Sanity");
1721 
1722   if (GCLocker::check_active_before_gc()) {
1723     return false;
1724   }
1725 
1726   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
1727 
1728   GCIdMark gc_id_mark;
1729   _gc_timer.register_gc_start();
1730   _gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start());
1731 
1732   TimeStamp marking_start;
1733   TimeStamp compaction_start;
1734   TimeStamp collection_exit;
1735 
1736   GCCause::Cause gc_cause = heap->gc_cause();
1737   PSYoungGen* young_gen = heap->young_gen();
1738   PSOldGen* old_gen = heap->old_gen();
1739   PSAdaptiveSizePolicy* size_policy = heap->size_policy();
1740 
1741   // The scope of casr should end after code that can change
1742   // CollectorPolicy::_should_clear_all_soft_refs.
1743   ClearedAllSoftRefs casr(maximum_heap_compaction,
1744                           heap->collector_policy());
1745 
1746   if (ZapUnusedHeapArea) {
1747     // Save information needed to minimize mangling
1748     heap->record_gen_tops_before_GC();
1749   }
1750 
1751   // Make sure data structures are sane, make the heap parsable, and do other
1752   // miscellaneous bookkeeping.
1753   pre_compact();
1754 
1755   PreGCValues pre_gc_values(heap);
1756 
1757   // Get the compaction manager reserved for the VM thread.
1758   ParCompactionManager* const vmthread_cm =
1759     ParCompactionManager::manager_array(gc_task_manager()->workers());
1760 
1761   {
1762     ResourceMark rm;
1763     HandleMark hm;
1764 


1852           young_gen->from_space()->capacity_in_bytes() -
1853           young_gen->to_space()->capacity_in_bytes();
1854 
1855         // Used for diagnostics
1856         size_policy->clear_generation_free_space_flags();
1857 
1858         size_policy->compute_generations_free_space(young_live,
1859                                                     eden_live,
1860                                                     old_live,
1861                                                     cur_eden,
1862                                                     max_old_gen_size,
1863                                                     max_eden_size,
1864                                                     true /* full gc*/);
1865 
1866         size_policy->check_gc_overhead_limit(young_live,
1867                                              eden_live,
1868                                              max_old_gen_size,
1869                                              max_eden_size,
1870                                              true /* full gc*/,
1871                                              gc_cause,
1872                                              heap->collector_policy());
1873 
1874         size_policy->decay_supplemental_growth(true /* full gc*/);
1875 
1876         heap->resize_old_gen(
1877           size_policy->calculated_old_free_size_in_bytes());
1878 
1879         heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(),
1880                                size_policy->calculated_survivor_size_in_bytes());
1881       }
1882 
1883       log_debug(gc, ergo)("AdaptiveSizeStop: collection: %d ", heap->total_collections());
1884     }
1885 
1886     if (UsePerfData) {
1887       PSGCAdaptivePolicyCounters* const counters = heap->gc_policy_counters();
1888       counters->update_counters();
1889       counters->update_old_capacity(old_gen->capacity_in_bytes());
1890       counters->update_young_capacity(young_gen->capacity_in_bytes());
1891     }
1892 




1690 // may be true because this method can be called without intervening
1691 // activity.  For example when the heap space is tight and full measure
1692 // are being taken to free space.
1693 void PSParallelCompact::invoke(bool maximum_heap_compaction) {
1694   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
1695   assert(Thread::current() == (Thread*)VMThread::vm_thread(),
1696          "should be in vm thread");
1697 
1698   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
1699   GCCause::Cause gc_cause = heap->gc_cause();
1700   assert(!heap->is_gc_active(), "not reentrant");
1701 
1702   PSAdaptiveSizePolicy* policy = heap->size_policy();
1703   IsGCActiveMark mark;
1704 
1705   if (ScavengeBeforeFullGC) {
1706     PSScavenge::invoke_no_policy();
1707   }
1708 
1709   const bool clear_all_soft_refs =
1710     heap->soft_ref_policy()->should_clear_all_soft_refs();
1711 
1712   PSParallelCompact::invoke_no_policy(clear_all_soft_refs ||
1713                                       maximum_heap_compaction);
1714 }
1715 
1716 // This method contains no policy. You should probably
1717 // be calling invoke() instead.
1718 bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
1719   assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
1720   assert(ref_processor() != NULL, "Sanity");
1721 
1722   if (GCLocker::check_active_before_gc()) {
1723     return false;
1724   }
1725 
1726   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
1727 
1728   GCIdMark gc_id_mark;
1729   _gc_timer.register_gc_start();
1730   _gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start());
1731 
1732   TimeStamp marking_start;
1733   TimeStamp compaction_start;
1734   TimeStamp collection_exit;
1735 
1736   GCCause::Cause gc_cause = heap->gc_cause();
1737   PSYoungGen* young_gen = heap->young_gen();
1738   PSOldGen* old_gen = heap->old_gen();
1739   PSAdaptiveSizePolicy* size_policy = heap->size_policy();
1740 
1741   // The scope of casr should end after code that can change
1742   // CollectorPolicy::_should_clear_all_soft_refs.
1743   ClearedAllSoftRefs casr(maximum_heap_compaction,
1744                           heap->soft_ref_policy());
1745 
1746   if (ZapUnusedHeapArea) {
1747     // Save information needed to minimize mangling
1748     heap->record_gen_tops_before_GC();
1749   }
1750 
1751   // Make sure data structures are sane, make the heap parsable, and do other
1752   // miscellaneous bookkeeping.
1753   pre_compact();
1754 
1755   PreGCValues pre_gc_values(heap);
1756 
1757   // Get the compaction manager reserved for the VM thread.
1758   ParCompactionManager* const vmthread_cm =
1759     ParCompactionManager::manager_array(gc_task_manager()->workers());
1760 
1761   {
1762     ResourceMark rm;
1763     HandleMark hm;
1764 


1852           young_gen->from_space()->capacity_in_bytes() -
1853           young_gen->to_space()->capacity_in_bytes();
1854 
1855         // Used for diagnostics
1856         size_policy->clear_generation_free_space_flags();
1857 
1858         size_policy->compute_generations_free_space(young_live,
1859                                                     eden_live,
1860                                                     old_live,
1861                                                     cur_eden,
1862                                                     max_old_gen_size,
1863                                                     max_eden_size,
1864                                                     true /* full gc*/);
1865 
1866         size_policy->check_gc_overhead_limit(young_live,
1867                                              eden_live,
1868                                              max_old_gen_size,
1869                                              max_eden_size,
1870                                              true /* full gc*/,
1871                                              gc_cause,
1872                                              heap->soft_ref_policy());
1873 
1874         size_policy->decay_supplemental_growth(true /* full gc*/);
1875 
1876         heap->resize_old_gen(
1877           size_policy->calculated_old_free_size_in_bytes());
1878 
1879         heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(),
1880                                size_policy->calculated_survivor_size_in_bytes());
1881       }
1882 
1883       log_debug(gc, ergo)("AdaptiveSizeStop: collection: %d ", heap->total_collections());
1884     }
1885 
1886     if (UsePerfData) {
1887       PSGCAdaptivePolicyCounters* const counters = heap->gc_policy_counters();
1888       counters->update_counters();
1889       counters->update_old_capacity(old_gen->capacity_in_bytes());
1890       counters->update_young_capacity(young_gen->capacity_in_bytes());
1891     }
1892 


< prev index next >