< prev index next >

src/share/vm/gc/g1/g1ConcurrentMark.cpp

Print this page
rev 12504 : 8171238: Unify cleanup code used in G1 Remark and Full GC marking
Reviewed-by:


1642     _enq_task.work(worker_id);
1643   }
1644 };
1645 
1646 void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) {
1647   assert(_workers != NULL, "Need parallel worker threads.");
1648   assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT");
1649 
1650   G1CMRefEnqueueTaskProxy enq_task_proxy(enq_task);
1651 
1652   // Not strictly necessary but...
1653   //
1654   // We need to reset the concurrency level before each
1655   // proxy task execution, so that the termination protocol
1656   // and overflow handling in G1CMTask::do_marking_step() knows
1657   // how many workers to wait for.
1658   _cm->set_concurrency(_active_workers);
1659   _workers->run_task(&enq_task_proxy);
1660 }
1661 
1662 void G1ConcurrentMark::weakRefsWorkParallelPart(BoolObjectClosure* is_alive, bool purged_classes) {
1663   G1CollectedHeap::heap()->parallel_cleaning(is_alive, true, true, purged_classes);
1664 }
1665 
1666 void G1ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
1667   if (has_overflown()) {
1668     // Skip processing the discovered references if we have
1669     // overflown the global marking stack. Reference objects
1670     // only get discovered once so it is OK to not
1671     // de-populate the discovered reference lists. We could have,
1672     // but the only benefit would be that, when marking restarts,
1673     // less reference objects are discovered.
1674     return;
1675   }
1676 
1677   ResourceMark rm;
1678   HandleMark   hm;
1679 
1680   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1681 
1682   // Is alive closure.
1683   G1CMIsAliveClosure g1_is_alive(g1h);
1684 
1685   // Inner scope to exclude the cleaning of the string and symbol


1758       set_has_overflown();
1759     }
1760 
1761     assert(rp->num_q() == active_workers, "why not");
1762 
1763     rp->enqueue_discovered_references(executor);
1764 
1765     rp->verify_no_references_recorded();
1766     assert(!rp->discovery_enabled(), "Post condition");
1767   }
1768 
1769   if (has_overflown()) {
1770     // We can not trust g1_is_alive if the marking stack overflowed
1771     return;
1772   }
1773 
1774   assert(_global_mark_stack.is_empty(), "Marking should have completed");
1775 
1776   // Unload Klasses, String, Symbols, Code Cache, etc.
1777   if (ClassUnloadingWithConcurrentMark) {
1778     bool purged_classes;
1779 
1780     {
1781       GCTraceTime(Debug, gc, phases) trace("System Dictionary Unloading", _gc_timer_cm);
1782       purged_classes = SystemDictionary::do_unloading(&g1_is_alive, false /* Defer klass cleaning */);
1783     }
1784 
1785     {
1786       GCTraceTime(Debug, gc, phases) trace("Parallel Unloading", _gc_timer_cm);
1787       weakRefsWorkParallelPart(&g1_is_alive, purged_classes);
1788     }
1789   }
1790 
1791   if (G1StringDedup::is_enabled()) {
1792     GCTraceTime(Debug, gc, phases) trace("String Deduplication Unlink", _gc_timer_cm);
1793     G1StringDedup::unlink(&g1_is_alive);
1794   }
1795 }
1796 
1797 void G1ConcurrentMark::swapMarkBitMaps() {
1798   G1CMBitMapRO* temp = _prevMarkBitMap;
1799   _prevMarkBitMap    = (G1CMBitMapRO*)_nextMarkBitMap;
1800   _nextMarkBitMap    = (G1CMBitMap*)  temp;
1801 }
1802 
1803 // Closure for marking entries in SATB buffers.
1804 class G1CMSATBBufferClosure : public SATBBufferClosure {
1805 private:
1806   G1CMTask* _task;
1807   G1CollectedHeap* _g1h;
1808 
1809   // This is very similar to G1CMTask::deal_with_reference, but with
1810   // more relaxed requirements for the argument, so this must be more
1811   // circumspect about treating the argument as an object.
1812   void do_entry(void* entry) const {
1813     _task->increment_refs_reached();




1642     _enq_task.work(worker_id);
1643   }
1644 };
1645 
1646 void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) {
1647   assert(_workers != NULL, "Need parallel worker threads.");
1648   assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT");
1649 
1650   G1CMRefEnqueueTaskProxy enq_task_proxy(enq_task);
1651 
1652   // Not strictly necessary but...
1653   //
1654   // We need to reset the concurrency level before each
1655   // proxy task execution, so that the termination protocol
1656   // and overflow handling in G1CMTask::do_marking_step() knows
1657   // how many workers to wait for.
1658   _cm->set_concurrency(_active_workers);
1659   _workers->run_task(&enq_task_proxy);
1660 }
1661 




1662 void G1ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
1663   if (has_overflown()) {
1664     // Skip processing the discovered references if we have
1665     // overflown the global marking stack. Reference objects
1666     // only get discovered once so it is OK to not
1667     // de-populate the discovered reference lists. We could have,
1668     // but the only benefit would be that, when marking restarts,
1669     // less reference objects are discovered.
1670     return;
1671   }
1672 
1673   ResourceMark rm;
1674   HandleMark   hm;
1675 
1676   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1677 
1678   // Is alive closure.
1679   G1CMIsAliveClosure g1_is_alive(g1h);
1680 
1681   // Inner scope to exclude the cleaning of the string and symbol


1754       set_has_overflown();
1755     }
1756 
1757     assert(rp->num_q() == active_workers, "why not");
1758 
1759     rp->enqueue_discovered_references(executor);
1760 
1761     rp->verify_no_references_recorded();
1762     assert(!rp->discovery_enabled(), "Post condition");
1763   }
1764 
1765   if (has_overflown()) {
1766     // We can not trust g1_is_alive if the marking stack overflowed
1767     return;
1768   }
1769 
1770   assert(_global_mark_stack.is_empty(), "Marking should have completed");
1771 
1772   // Unload Klasses, String, Symbols, Code Cache, etc.
1773   if (ClassUnloadingWithConcurrentMark) {
1774     GCTraceTime(Debug, gc, phases) debug("Class Unloading", _gc_timer_cm);
1775     bool purged_classes = SystemDictionary::do_unloading(&g1_is_alive, false /* Defer klass cleaning */);
1776     g1h->full_cleaning(&g1_is_alive, purged_classes);
1777   } else {
1778     GCTraceTime(Debug, gc, phases) debug("Cleanup", _gc_timer_cm);
1779     // No need to clean string table and symbol table as they are treated as strong roots when
1780     // class unloading is disabled.
1781     g1h->partial_cleaning(&g1_is_alive, false, false, G1StringDedup::is_enabled());




1782 



1783   }
1784 }
1785 
1786 void G1ConcurrentMark::swapMarkBitMaps() {
1787   G1CMBitMapRO* temp = _prevMarkBitMap;
1788   _prevMarkBitMap    = (G1CMBitMapRO*)_nextMarkBitMap;
1789   _nextMarkBitMap    = (G1CMBitMap*)  temp;
1790 }
1791 
1792 // Closure for marking entries in SATB buffers.
1793 class G1CMSATBBufferClosure : public SATBBufferClosure {
1794 private:
1795   G1CMTask* _task;
1796   G1CollectedHeap* _g1h;
1797 
1798   // This is very similar to G1CMTask::deal_with_reference, but with
1799   // more relaxed requirements for the argument, so this must be more
1800   // circumspect about treating the argument as an object.
1801   void do_entry(void* entry) const {
1802     _task->increment_refs_reached();


< prev index next >