< prev index next >

src/hotspot/share/gc/g1/g1ConcurrentMark.cpp

Print this page




  31 #include "gc/g1/g1CollectorState.hpp"
  32 #include "gc/g1/g1ConcurrentMark.inline.hpp"
  33 #include "gc/g1/g1HeapVerifier.hpp"
  34 #include "gc/g1/g1OopClosures.inline.hpp"
  35 #include "gc/g1/g1CardLiveData.inline.hpp"
  36 #include "gc/g1/g1Policy.hpp"
  37 #include "gc/g1/g1StringDedup.hpp"
  38 #include "gc/g1/heapRegion.inline.hpp"
  39 #include "gc/g1/heapRegionRemSet.hpp"
  40 #include "gc/g1/heapRegionSet.inline.hpp"
  41 #include "gc/g1/suspendibleThreadSet.hpp"
  42 #include "gc/shared/gcId.hpp"
  43 #include "gc/shared/gcTimer.hpp"
  44 #include "gc/shared/gcTrace.hpp"
  45 #include "gc/shared/gcTraceTime.inline.hpp"
  46 #include "gc/shared/genOopClosures.inline.hpp"
  47 #include "gc/shared/referencePolicy.hpp"
  48 #include "gc/shared/strongRootsScope.hpp"
  49 #include "gc/shared/taskqueue.inline.hpp"
  50 #include "gc/shared/vmGCOperations.hpp"

  51 #include "logging/log.hpp"
  52 #include "memory/allocation.hpp"
  53 #include "memory/resourceArea.hpp"
  54 #include "oops/oop.inline.hpp"
  55 #include "runtime/atomic.hpp"
  56 #include "runtime/handles.inline.hpp"
  57 #include "runtime/java.hpp"
  58 #include "runtime/prefetch.inline.hpp"
  59 #include "services/memTracker.hpp"
  60 #include "utilities/align.hpp"
  61 #include "utilities/growableArray.hpp"
  62 
  63 bool G1CMBitMapClosure::do_addr(HeapWord* const addr) {
  64   assert(addr < _cm->finger(), "invariant");
  65   assert(addr >= _task->finger(), "invariant");
  66 
  67   // We move that task's local finger along.
  68   _task->move_finger_to(addr);
  69 
  70   _task->scan_task_entry(G1TaskQueueEntry::from_oop(oop(addr)));


1586 
1587 void G1ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
1588   if (has_overflown()) {
1589     // Skip processing the discovered references if we have
1590     // overflown the global marking stack. Reference objects
1591     // only get discovered once so it is OK to not
1592     // de-populate the discovered reference lists. We could have,
1593     // but the only benefit would be that, when marking restarts,
1594     // less reference objects are discovered.
1595     return;
1596   }
1597 
1598   ResourceMark rm;
1599   HandleMark   hm;
1600 
1601   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1602 
1603   // Is alive closure.
1604   G1CMIsAliveClosure g1_is_alive(g1h);
1605 
1606   // Inner scope to exclude the cleaning of the string and symbol
1607   // tables from the displayed time.
1608   {
1609     GCTraceTime(Debug, gc, phases) trace("Reference Processing", _gc_timer_cm);
1610 
1611     ReferenceProcessor* rp = g1h->ref_processor_cm();
1612 
1613     // See the comment in G1CollectedHeap::ref_processing_init()
1614     // about how reference processing currently works in G1.
1615 
1616     // Set the soft reference policy
1617     rp->setup_policy(clear_all_soft_refs);
1618     assert(_global_mark_stack.is_empty(), "mark stack should be empty");
1619 
1620     // Instances of the 'Keep Alive' and 'Complete GC' closures used
1621     // in serial reference processing. Note these closures are also
1622     // used for serially processing (by the the current thread) the
1623     // JNI references during parallel reference processing.
1624     //
1625     // These closures do not need to synchronize with the worker
1626     // threads involved in parallel reference processing as these
1627     // instances are executed serially by the current thread (e.g.
1628     // reference processing is not multi-threaded and is thus
1629     // performed by the current thread instead of a gang worker).
1630     //
1631     // The gang tasks involved in parallel reference processing create
1632     // their own instances of these closures, which do their own
1633     // synchronization among themselves.
1634     G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */);
1635     G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), true /* is_serial */);
1636 














1637     // We need at least one active thread. If reference processing
1638     // is not multi-threaded we use the current (VMThread) thread,
1639     // otherwise we use the work gang from the G1CollectedHeap and
1640     // we utilize all the worker threads we can.
1641     bool processing_is_mt = rp->processing_is_mt();
1642     uint active_workers = (processing_is_mt ? g1h->workers()->active_workers() : 1U);
1643     active_workers = MAX2(MIN2(active_workers, _max_worker_id), 1U);
1644 
1645     // Parallel processing task executor.
1646     G1CMRefProcTaskExecutor par_task_executor(g1h, this,
1647                                               g1h->workers(), active_workers);
1648     AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL);
1649 
1650     // Set the concurrency level. The phase was already set prior to
1651     // executing the remark task.
1652     set_concurrency(active_workers);
1653 
1654     // Set the degree of MT processing here.  If the discovery was done MT,
1655     // the number of threads involved during discovery could differ from
1656     // the number of active workers.  This is OK as long as the discovered


1668                                           &pt);
1669     _gc_tracer_cm->report_gc_reference_stats(stats);
1670     pt.print_all_references();
1671 
1672     // The do_oop work routines of the keep_alive and drain_marking_stack
1673     // oop closures will set the has_overflown flag if we overflow the
1674     // global marking stack.
1675 
1676     assert(has_overflown() || _global_mark_stack.is_empty(),
1677             "Mark stack should be empty (unless it has overflown)");
1678 
1679     assert(rp->num_q() == active_workers, "why not");
1680 
1681     rp->enqueue_discovered_references(executor, &pt);
1682 
1683     rp->verify_no_references_recorded();
1684 
1685     pt.print_enqueue_phase();
1686 
1687     assert(!rp->discovery_enabled(), "Post condition");





1688   }
1689 
1690   if (has_overflown()) {
1691     // We can not trust g1_is_alive if the marking stack overflowed
1692     return;
1693   }
1694 
1695   assert(_global_mark_stack.is_empty(), "Marking should have completed");
1696 
1697   // Unload Klasses, String, Symbols, Code Cache, etc.
1698   if (ClassUnloadingWithConcurrentMark) {
1699     GCTraceTime(Debug, gc, phases) debug("Class Unloading", _gc_timer_cm);
1700     bool purged_classes = SystemDictionary::do_unloading(&g1_is_alive, _gc_timer_cm, false /* Defer cleaning */);
1701     g1h->complete_cleaning(&g1_is_alive, purged_classes);
1702   } else {
1703     GCTraceTime(Debug, gc, phases) debug("Cleanup", _gc_timer_cm);
1704     // No need to clean string table and symbol table as they are treated as strong roots when
1705     // class unloading is disabled.
1706     g1h->partial_cleaning(&g1_is_alive, false, false, G1StringDedup::is_enabled());
1707 




  31 #include "gc/g1/g1CollectorState.hpp"
  32 #include "gc/g1/g1ConcurrentMark.inline.hpp"
  33 #include "gc/g1/g1HeapVerifier.hpp"
  34 #include "gc/g1/g1OopClosures.inline.hpp"
  35 #include "gc/g1/g1CardLiveData.inline.hpp"
  36 #include "gc/g1/g1Policy.hpp"
  37 #include "gc/g1/g1StringDedup.hpp"
  38 #include "gc/g1/heapRegion.inline.hpp"
  39 #include "gc/g1/heapRegionRemSet.hpp"
  40 #include "gc/g1/heapRegionSet.inline.hpp"
  41 #include "gc/g1/suspendibleThreadSet.hpp"
  42 #include "gc/shared/gcId.hpp"
  43 #include "gc/shared/gcTimer.hpp"
  44 #include "gc/shared/gcTrace.hpp"
  45 #include "gc/shared/gcTraceTime.inline.hpp"
  46 #include "gc/shared/genOopClosures.inline.hpp"
  47 #include "gc/shared/referencePolicy.hpp"
  48 #include "gc/shared/strongRootsScope.hpp"
  49 #include "gc/shared/taskqueue.inline.hpp"
  50 #include "gc/shared/vmGCOperations.hpp"
  51 #include "gc/shared/weakProcessor.hpp"
  52 #include "logging/log.hpp"
  53 #include "memory/allocation.hpp"
  54 #include "memory/resourceArea.hpp"
  55 #include "oops/oop.inline.hpp"
  56 #include "runtime/atomic.hpp"
  57 #include "runtime/handles.inline.hpp"
  58 #include "runtime/java.hpp"
  59 #include "runtime/prefetch.inline.hpp"
  60 #include "services/memTracker.hpp"
  61 #include "utilities/align.hpp"
  62 #include "utilities/growableArray.hpp"
  63 
  64 bool G1CMBitMapClosure::do_addr(HeapWord* const addr) {
  65   assert(addr < _cm->finger(), "invariant");
  66   assert(addr >= _task->finger(), "invariant");
  67 
  68   // We move that task's local finger along.
  69   _task->move_finger_to(addr);
  70 
  71   _task->scan_task_entry(G1TaskQueueEntry::from_oop(oop(addr)));


1587 
1588 void G1ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
1589   if (has_overflown()) {
1590     // Skip processing the discovered references if we have
1591     // overflown the global marking stack. Reference objects
1592     // only get discovered once so it is OK to not
1593     // de-populate the discovered reference lists. We could have,
1594     // but the only benefit would be that, when marking restarts,
1595     // less reference objects are discovered.
1596     return;
1597   }
1598 
1599   ResourceMark rm;
1600   HandleMark   hm;
1601 
1602   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1603 
1604   // Is alive closure.
1605   G1CMIsAliveClosure g1_is_alive(g1h);
1606 














1607   // Instances of the 'Keep Alive' and 'Complete GC' closures used
1608   // in serial reference processing. Note these closures are also
1609   // used for serially processing (by the the current thread) the
1610   // JNI references during parallel reference processing.
1611   //
1612   // These closures do not need to synchronize with the worker
1613   // threads involved in parallel reference processing as these
1614   // instances are executed serially by the current thread (e.g.
1615   // reference processing is not multi-threaded and is thus
1616   // performed by the current thread instead of a gang worker).
1617   //
1618   // The gang tasks involved in parallel reference processing create
1619   // their own instances of these closures, which do their own
1620   // synchronization among themselves.
1621   G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */);
1622   G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), true /* is_serial */);
1623 
1624   // Inner scope to exclude the cleaning of the string and symbol
1625   // tables from the displayed time.
1626   {
1627     GCTraceTime(Debug, gc, phases) trace("Reference Processing", _gc_timer_cm);
1628 
1629     ReferenceProcessor* rp = g1h->ref_processor_cm();
1630 
1631     // See the comment in G1CollectedHeap::ref_processing_init()
1632     // about how reference processing currently works in G1.
1633 
1634     // Set the soft reference policy
1635     rp->setup_policy(clear_all_soft_refs);
1636     assert(_global_mark_stack.is_empty(), "mark stack should be empty");
1637 
1638     // We need at least one active thread. If reference processing
1639     // is not multi-threaded we use the current (VMThread) thread,
1640     // otherwise we use the work gang from the G1CollectedHeap and
1641     // we utilize all the worker threads we can.
1642     bool processing_is_mt = rp->processing_is_mt();
1643     uint active_workers = (processing_is_mt ? g1h->workers()->active_workers() : 1U);
1644     active_workers = MAX2(MIN2(active_workers, _max_worker_id), 1U);
1645 
1646     // Parallel processing task executor.
1647     G1CMRefProcTaskExecutor par_task_executor(g1h, this,
1648                                               g1h->workers(), active_workers);
1649     AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL);
1650 
1651     // Set the concurrency level. The phase was already set prior to
1652     // executing the remark task.
1653     set_concurrency(active_workers);
1654 
1655     // Set the degree of MT processing here.  If the discovery was done MT,
1656     // the number of threads involved during discovery could differ from
1657     // the number of active workers.  This is OK as long as the discovered


1669                                           &pt);
1670     _gc_tracer_cm->report_gc_reference_stats(stats);
1671     pt.print_all_references();
1672 
1673     // The do_oop work routines of the keep_alive and drain_marking_stack
1674     // oop closures will set the has_overflown flag if we overflow the
1675     // global marking stack.
1676 
1677     assert(has_overflown() || _global_mark_stack.is_empty(),
1678             "Mark stack should be empty (unless it has overflown)");
1679 
1680     assert(rp->num_q() == active_workers, "why not");
1681 
1682     rp->enqueue_discovered_references(executor, &pt);
1683 
1684     rp->verify_no_references_recorded();
1685 
1686     pt.print_enqueue_phase();
1687 
1688     assert(!rp->discovery_enabled(), "Post condition");
1689   }
1690 
1691   {
1692     GCTraceTime(Debug, gc, phases) debug("Weak Processing", _gc_timer_cm);
1693     WeakProcessor::unlink_or_oops_do(&g1_is_alive, &g1_keep_alive, &g1_drain_mark_stack);
1694   }
1695 
1696   if (has_overflown()) {
1697     // We can not trust g1_is_alive if the marking stack overflowed
1698     return;
1699   }
1700 
1701   assert(_global_mark_stack.is_empty(), "Marking should have completed");
1702 
1703   // Unload Klasses, String, Symbols, Code Cache, etc.
1704   if (ClassUnloadingWithConcurrentMark) {
1705     GCTraceTime(Debug, gc, phases) debug("Class Unloading", _gc_timer_cm);
1706     bool purged_classes = SystemDictionary::do_unloading(&g1_is_alive, _gc_timer_cm, false /* Defer cleaning */);
1707     g1h->complete_cleaning(&g1_is_alive, purged_classes);
1708   } else {
1709     GCTraceTime(Debug, gc, phases) debug("Cleanup", _gc_timer_cm);
1710     // No need to clean string table and symbol table as they are treated as strong roots when
1711     // class unloading is disabled.
1712     g1h->partial_cleaning(&g1_is_alive, false, false, G1StringDedup::is_enabled());
1713 


< prev index next >