< prev index next >

src/hotspot/share/gc/parallel/psParallelCompact.cpp

Print this page




  34 #include "gc/parallel/pcTasks.hpp"
  35 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
  36 #include "gc/parallel/psCompactionManager.inline.hpp"
  37 #include "gc/parallel/psMarkSweep.hpp"
  38 #include "gc/parallel/psMarkSweepDecorator.hpp"
  39 #include "gc/parallel/psOldGen.hpp"
  40 #include "gc/parallel/psParallelCompact.inline.hpp"
  41 #include "gc/parallel/psPromotionManager.inline.hpp"
  42 #include "gc/parallel/psScavenge.hpp"
  43 #include "gc/parallel/psYoungGen.hpp"
  44 #include "gc/shared/gcCause.hpp"
  45 #include "gc/shared/gcHeapSummary.hpp"
  46 #include "gc/shared/gcId.hpp"
  47 #include "gc/shared/gcLocker.inline.hpp"
  48 #include "gc/shared/gcTimer.hpp"
  49 #include "gc/shared/gcTrace.hpp"
  50 #include "gc/shared/gcTraceTime.inline.hpp"
  51 #include "gc/shared/isGCActiveMark.hpp"
  52 #include "gc/shared/referencePolicy.hpp"
  53 #include "gc/shared/referenceProcessor.hpp"

  54 #include "gc/shared/spaceDecorator.hpp"
  55 #include "gc/shared/weakProcessor.hpp"
  56 #include "logging/log.hpp"
  57 #include "memory/resourceArea.hpp"
  58 #include "oops/instanceKlass.inline.hpp"
  59 #include "oops/instanceMirrorKlass.inline.hpp"
  60 #include "oops/methodData.hpp"
  61 #include "oops/objArrayKlass.inline.hpp"
  62 #include "oops/oop.inline.hpp"
  63 #include "runtime/atomic.hpp"
  64 #include "runtime/safepoint.hpp"
  65 #include "runtime/vmThread.hpp"
  66 #include "services/management.hpp"
  67 #include "services/memTracker.hpp"
  68 #include "services/memoryService.hpp"
  69 #include "utilities/align.hpp"
  70 #include "utilities/debug.hpp"
  71 #include "utilities/events.hpp"
  72 #include "utilities/formatBuffer.hpp"
  73 #include "utilities/stack.inline.hpp"


1690 // may be true because this method can be called without intervening
1691 // activity.  For example when the heap space is tight and full measure
1692 // are being taken to free space.
1693 void PSParallelCompact::invoke(bool maximum_heap_compaction) {
1694   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
1695   assert(Thread::current() == (Thread*)VMThread::vm_thread(),
1696          "should be in vm thread");
1697 
1698   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
1699   GCCause::Cause gc_cause = heap->gc_cause();
1700   assert(!heap->is_gc_active(), "not reentrant");
1701 
1702   PSAdaptiveSizePolicy* policy = heap->size_policy();
1703   IsGCActiveMark mark;
1704 
1705   if (ScavengeBeforeFullGC) {
1706     PSScavenge::invoke_no_policy();
1707   }
1708 
1709   const bool clear_all_soft_refs =
1710     heap->collector_policy()->should_clear_all_soft_refs();
1711 
1712   PSParallelCompact::invoke_no_policy(clear_all_soft_refs ||
1713                                       maximum_heap_compaction);
1714 }
1715 
1716 // This method contains no policy. You should probably
1717 // be calling invoke() instead.
1718 bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
1719   assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
1720   assert(ref_processor() != NULL, "Sanity");
1721 
1722   if (GCLocker::check_active_before_gc()) {
1723     return false;
1724   }
1725 
1726   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
1727 
1728   GCIdMark gc_id_mark;
1729   _gc_timer.register_gc_start();
1730   _gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start());
1731 
1732   TimeStamp marking_start;
1733   TimeStamp compaction_start;
1734   TimeStamp collection_exit;
1735 
1736   GCCause::Cause gc_cause = heap->gc_cause();
1737   PSYoungGen* young_gen = heap->young_gen();
1738   PSOldGen* old_gen = heap->old_gen();
1739   PSAdaptiveSizePolicy* size_policy = heap->size_policy();
1740 
1741   // The scope of casr should end after code that can change
1742   // CollectorPolicy::_should_clear_all_soft_refs.
1743   ClearedAllSoftRefs casr(maximum_heap_compaction,
1744                           heap->collector_policy());
1745 
1746   if (ZapUnusedHeapArea) {
1747     // Save information needed to minimize mangling
1748     heap->record_gen_tops_before_GC();
1749   }
1750 
1751   // Make sure data structures are sane, make the heap parsable, and do other
1752   // miscellaneous bookkeeping.
1753   pre_compact();
1754 
1755   PreGCValues pre_gc_values(heap);
1756 
1757   // Get the compaction manager reserved for the VM thread.
1758   ParCompactionManager* const vmthread_cm =
1759     ParCompactionManager::manager_array(gc_task_manager()->workers());
1760 
1761   {
1762     ResourceMark rm;
1763     HandleMark hm;
1764 


1852           young_gen->from_space()->capacity_in_bytes() -
1853           young_gen->to_space()->capacity_in_bytes();
1854 
1855         // Used for diagnostics
1856         size_policy->clear_generation_free_space_flags();
1857 
1858         size_policy->compute_generations_free_space(young_live,
1859                                                     eden_live,
1860                                                     old_live,
1861                                                     cur_eden,
1862                                                     max_old_gen_size,
1863                                                     max_eden_size,
1864                                                     true /* full gc*/);
1865 
1866         size_policy->check_gc_overhead_limit(young_live,
1867                                              eden_live,
1868                                              max_old_gen_size,
1869                                              max_eden_size,
1870                                              true /* full gc*/,
1871                                              gc_cause,
1872                                              heap->collector_policy());
1873 
1874         size_policy->decay_supplemental_growth(true /* full gc*/);
1875 
1876         heap->resize_old_gen(
1877           size_policy->calculated_old_free_size_in_bytes());
1878 
1879         heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(),
1880                                size_policy->calculated_survivor_size_in_bytes());
1881       }
1882 
1883       log_debug(gc, ergo)("AdaptiveSizeStop: collection: %d ", heap->total_collections());
1884     }
1885 
1886     if (UsePerfData) {
1887       PSGCAdaptivePolicyCounters* const counters = heap->gc_policy_counters();
1888       counters->update_counters();
1889       counters->update_old_capacity(old_gen->capacity_in_bytes());
1890       counters->update_young_capacity(young_gen->capacity_in_bytes());
1891     }
1892 




  34 #include "gc/parallel/pcTasks.hpp"
  35 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
  36 #include "gc/parallel/psCompactionManager.inline.hpp"
  37 #include "gc/parallel/psMarkSweep.hpp"
  38 #include "gc/parallel/psMarkSweepDecorator.hpp"
  39 #include "gc/parallel/psOldGen.hpp"
  40 #include "gc/parallel/psParallelCompact.inline.hpp"
  41 #include "gc/parallel/psPromotionManager.inline.hpp"
  42 #include "gc/parallel/psScavenge.hpp"
  43 #include "gc/parallel/psYoungGen.hpp"
  44 #include "gc/shared/gcCause.hpp"
  45 #include "gc/shared/gcHeapSummary.hpp"
  46 #include "gc/shared/gcId.hpp"
  47 #include "gc/shared/gcLocker.inline.hpp"
  48 #include "gc/shared/gcTimer.hpp"
  49 #include "gc/shared/gcTrace.hpp"
  50 #include "gc/shared/gcTraceTime.inline.hpp"
  51 #include "gc/shared/isGCActiveMark.hpp"
  52 #include "gc/shared/referencePolicy.hpp"
  53 #include "gc/shared/referenceProcessor.hpp"
  54 #include "gc/shared/softRefPolicy.hpp"
  55 #include "gc/shared/spaceDecorator.hpp"
  56 #include "gc/shared/weakProcessor.hpp"
  57 #include "logging/log.hpp"
  58 #include "memory/resourceArea.hpp"
  59 #include "oops/instanceKlass.inline.hpp"
  60 #include "oops/instanceMirrorKlass.inline.hpp"
  61 #include "oops/methodData.hpp"
  62 #include "oops/objArrayKlass.inline.hpp"
  63 #include "oops/oop.inline.hpp"
  64 #include "runtime/atomic.hpp"
  65 #include "runtime/safepoint.hpp"
  66 #include "runtime/vmThread.hpp"
  67 #include "services/management.hpp"
  68 #include "services/memTracker.hpp"
  69 #include "services/memoryService.hpp"
  70 #include "utilities/align.hpp"
  71 #include "utilities/debug.hpp"
  72 #include "utilities/events.hpp"
  73 #include "utilities/formatBuffer.hpp"
  74 #include "utilities/stack.inline.hpp"


1691 // may be true because this method can be called without intervening
1692 // activity.  For example when the heap space is tight and full measure
1693 // are being taken to free space.
1694 void PSParallelCompact::invoke(bool maximum_heap_compaction) {
1695   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
1696   assert(Thread::current() == (Thread*)VMThread::vm_thread(),
1697          "should be in vm thread");
1698 
1699   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
1700   GCCause::Cause gc_cause = heap->gc_cause();
1701   assert(!heap->is_gc_active(), "not reentrant");
1702 
1703   PSAdaptiveSizePolicy* policy = heap->size_policy();
1704   IsGCActiveMark mark;
1705 
1706   if (ScavengeBeforeFullGC) {
1707     PSScavenge::invoke_no_policy();
1708   }
1709 
1710   const bool clear_all_soft_refs =
1711     heap->soft_ref_policy()->should_clear_all_soft_refs();
1712 
1713   PSParallelCompact::invoke_no_policy(clear_all_soft_refs ||
1714                                       maximum_heap_compaction);
1715 }
1716 
1717 // This method contains no policy. You should probably
1718 // be calling invoke() instead.
1719 bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
1720   assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
1721   assert(ref_processor() != NULL, "Sanity");
1722 
1723   if (GCLocker::check_active_before_gc()) {
1724     return false;
1725   }
1726 
1727   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
1728 
1729   GCIdMark gc_id_mark;
1730   _gc_timer.register_gc_start();
1731   _gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start());
1732 
1733   TimeStamp marking_start;
1734   TimeStamp compaction_start;
1735   TimeStamp collection_exit;
1736 
1737   GCCause::Cause gc_cause = heap->gc_cause();
1738   PSYoungGen* young_gen = heap->young_gen();
1739   PSOldGen* old_gen = heap->old_gen();
1740   PSAdaptiveSizePolicy* size_policy = heap->size_policy();
1741 
1742   // The scope of casr should end after code that can change
1743   // SoftRefPolicy::_should_clear_all_soft_refs.
1744   ClearedAllSoftRefs casr(maximum_heap_compaction,
1745                           heap->soft_ref_policy());
1746 
1747   if (ZapUnusedHeapArea) {
1748     // Save information needed to minimize mangling
1749     heap->record_gen_tops_before_GC();
1750   }
1751 
1752   // Make sure data structures are sane, make the heap parsable, and do other
1753   // miscellaneous bookkeeping.
1754   pre_compact();
1755 
1756   PreGCValues pre_gc_values(heap);
1757 
1758   // Get the compaction manager reserved for the VM thread.
1759   ParCompactionManager* const vmthread_cm =
1760     ParCompactionManager::manager_array(gc_task_manager()->workers());
1761 
1762   {
1763     ResourceMark rm;
1764     HandleMark hm;
1765 


1853           young_gen->from_space()->capacity_in_bytes() -
1854           young_gen->to_space()->capacity_in_bytes();
1855 
1856         // Used for diagnostics
1857         size_policy->clear_generation_free_space_flags();
1858 
1859         size_policy->compute_generations_free_space(young_live,
1860                                                     eden_live,
1861                                                     old_live,
1862                                                     cur_eden,
1863                                                     max_old_gen_size,
1864                                                     max_eden_size,
1865                                                     true /* full gc*/);
1866 
1867         size_policy->check_gc_overhead_limit(young_live,
1868                                              eden_live,
1869                                              max_old_gen_size,
1870                                              max_eden_size,
1871                                              true /* full gc*/,
1872                                              gc_cause,
1873                                              heap->soft_ref_policy());
1874 
1875         size_policy->decay_supplemental_growth(true /* full gc*/);
1876 
1877         heap->resize_old_gen(
1878           size_policy->calculated_old_free_size_in_bytes());
1879 
1880         heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(),
1881                                size_policy->calculated_survivor_size_in_bytes());
1882       }
1883 
1884       log_debug(gc, ergo)("AdaptiveSizeStop: collection: %d ", heap->total_collections());
1885     }
1886 
1887     if (UsePerfData) {
1888       PSGCAdaptivePolicyCounters* const counters = heap->gc_policy_counters();
1889       counters->update_counters();
1890       counters->update_old_capacity(old_gen->capacity_in_bytes());
1891       counters->update_young_capacity(young_gen->capacity_in_bytes());
1892     }
1893 


< prev index next >