src/share/vm/gc_implementation/g1/concurrentMark.cpp

Print this page




   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"

  26 #include "classfile/symbolTable.hpp"

  27 #include "gc_implementation/g1/concurrentMark.inline.hpp"
  28 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
  29 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
  30 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
  31 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
  32 #include "gc_implementation/g1/g1Log.hpp"
  33 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
  34 #include "gc_implementation/g1/g1RemSet.hpp"
  35 #include "gc_implementation/g1/heapRegion.inline.hpp"
  36 #include "gc_implementation/g1/heapRegionRemSet.hpp"
  37 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
  38 #include "gc_implementation/shared/vmGCOperations.hpp"
  39 #include "gc_implementation/shared/gcTimer.hpp"
  40 #include "gc_implementation/shared/gcTrace.hpp"
  41 #include "gc_implementation/shared/gcTraceTime.hpp"
  42 #include "memory/genOopClosures.inline.hpp"
  43 #include "memory/referencePolicy.hpp"
  44 #include "memory/resourceArea.hpp"
  45 #include "oops/oop.inline.hpp"
  46 #include "runtime/handles.inline.hpp"
  47 #include "runtime/java.hpp"
  48 #include "runtime/atomic.inline.hpp"
  49 #include "runtime/prefetch.inline.hpp"
  50 #include "services/memTracker.hpp"
  51 
  52 // Concurrent marking bit map wrapper
  53 
  54 CMBitMapRO::CMBitMapRO(int shifter) :
  55   _bm(),
  56   _shifter(shifter) {
  57   _bmStartWord = 0;
  58   _bmWordSize = 0;
  59 }
  60 
  61 HeapWord* CMBitMapRO::getNextMarkedWordAddress(HeapWord* addr,
  62                                                HeapWord* limit) const {
  63   // First we must round addr *up* to a possible object boundary.
  64   addr = (HeapWord*)align_size_up((intptr_t)addr,
  65                                   HeapWordSize << _shifter);
  66   size_t addrOffset = heapWordToOffset(addr);
  67   if (limit == NULL) {
  68     limit = _bmStartWord + _bmWordSize;
  69   }
  70   size_t limitOffset = heapWordToOffset(limit);
  71   size_t nextOffset = _bm.get_next_one_offset(addrOffset, limitOffset);
  72   HeapWord* nextAddr = offsetToHeapWord(nextOffset);
  73   assert(nextAddr >= addr, "get_next_one postcondition");
  74   assert(nextAddr == limit || isMarked(nextAddr),
  75          "get_next_one postcondition");
  76   return nextAddr;
  77 }
  78 
  79 HeapWord* CMBitMapRO::getNextUnmarkedWordAddress(HeapWord* addr,
  80                                                  HeapWord* limit) const {
  81   size_t addrOffset = heapWordToOffset(addr);
  82   if (limit == NULL) {
  83     limit = _bmStartWord + _bmWordSize;
  84   }
  85   size_t limitOffset = heapWordToOffset(limit);
  86   size_t nextOffset = _bm.get_next_zero_offset(addrOffset, limitOffset);
  87   HeapWord* nextAddr = offsetToHeapWord(nextOffset);
  88   assert(nextAddr >= addr, "get_next_one postcondition");
  89   assert(nextAddr == limit || !isMarked(nextAddr),
  90          "get_next_one postcondition");
  91   return nextAddr;
  92 }
  93 
  94 int CMBitMapRO::heapWordDiffToOffsetDiff(size_t diff) const {
  95   assert((diff & ((1 << _shifter) - 1)) == 0, "argument check");
  96   return (int) (diff >> _shifter);
  97 }
  98 
  99 #ifndef PRODUCT
 100 bool CMBitMapRO::covers(ReservedSpace heap_rs) const {


1206   ConcurrentMark* _cm;
1207 
1208 public:
1209   CMRootRegionScanTask(ConcurrentMark* cm) :
1210     AbstractGangTask("Root Region Scan"), _cm(cm) { }
1211 
1212   void work(uint worker_id) {
1213     assert(Thread::current()->is_ConcurrentGC_thread(),
1214            "this should only be done by a conc GC thread");
1215 
1216     CMRootRegions* root_regions = _cm->root_regions();
1217     HeapRegion* hr = root_regions->claim_next();
1218     while (hr != NULL) {
1219       _cm->scanRootRegion(hr, worker_id);
1220       hr = root_regions->claim_next();
1221     }
1222   }
1223 };
1224 
1225 void ConcurrentMark::scanRootRegions() {



1226   // scan_in_progress() will have been set to true only if there was
1227   // at least one root region to scan. So, if it's false, we
1228   // should not attempt to do any further work.
1229   if (root_regions()->scan_in_progress()) {
1230     _parallel_marking_threads = calc_parallel_marking_threads();
1231     assert(parallel_marking_threads() <= max_parallel_marking_threads(),
1232            "Maximum number of marking threads exceeded");
1233     uint active_workers = MAX2(1U, parallel_marking_threads());
1234 
1235     CMRootRegionScanTask task(this);
1236     if (use_parallel_marking_threads()) {
1237       _parallel_workers->set_active_workers((int) active_workers);
1238       _parallel_workers->run_task(&task);
1239     } else {
1240       task.work(0);
1241     }
1242 
1243     // It's possible that has_aborted() is true here without actually
1244     // aborting the survivor scan earlier. This is OK as it's
1245     // mainly used for sanity checking.


1254   // However that wouldn't be right, because it's possible that
1255   // a safepoint is indeed in progress as a younger generation
1256   // stop-the-world GC happens even as we mark in this generation.
1257 
1258   _restart_for_overflow = false;
1259   force_overflow_conc()->init();
1260 
1261   // _g1h has _n_par_threads
1262   _parallel_marking_threads = calc_parallel_marking_threads();
1263   assert(parallel_marking_threads() <= max_parallel_marking_threads(),
1264     "Maximum number of marking threads exceeded");
1265 
1266   uint active_workers = MAX2(1U, parallel_marking_threads());
1267 
1268   // Parallel task terminator is set in "set_concurrency_and_phase()"
1269   set_concurrency_and_phase(active_workers, true /* concurrent */);
1270 
1271   CMConcurrentMarkingTask markingTask(this, cmThread());
1272   if (use_parallel_marking_threads()) {
1273     _parallel_workers->set_active_workers((int)active_workers);
1274     // Don't set _n_par_threads because it affects MT in process_strong_roots()
1275     // and the decisions on that MT processing is made elsewhere.
1276     assert(_parallel_workers->active_workers() > 0, "Should have been set");
1277     _parallel_workers->run_task(&markingTask);
1278   } else {
1279     markingTask.work(0);
1280   }
1281   print_stats();
1282 }
1283 
1284 void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
1285   // world is stopped at this checkpoint
1286   assert(SafepointSynchronize::is_at_safepoint(),
1287          "world should be stopped");
1288 
1289   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1290 
1291   // If a full collection has happened, we shouldn't do this.
1292   if (has_aborted()) {
1293     g1h->set_marking_complete(); // So bitmap clearing isn't confused
1294     return;


2125 
2126   // this will also free any regions totally full of garbage objects,
2127   // and sort the regions.
2128   g1h->g1_policy()->record_concurrent_mark_cleanup_end((int)n_workers);
2129 
2130   // Statistics.
2131   double end = os::elapsedTime();
2132   _cleanup_times.add((end - start) * 1000.0);
2133 
2134   if (G1Log::fine()) {
2135     g1h->print_size_transition(gclog_or_tty,
2136                                start_used_bytes,
2137                                g1h->used(),
2138                                g1h->capacity());
2139   }
2140 
2141   // Clean up will have freed any regions completely full of garbage.
2142   // Update the soft reference policy with the new heap occupancy.
2143   Universe::update_heap_info_at_gc();
2144 
2145   // We need to make this be a "collection" so any collection pause that
2146   // races with it goes around and waits for completeCleanup to finish.
2147   g1h->increment_total_collections();
2148 
2149   // We reclaimed old regions so we should calculate the sizes to make
2150   // sure we update the old gen/space data.
2151   g1h->g1mm()->update_sizes();
2152 
2153   if (VerifyDuringGC) {
2154     HandleMark hm;  // handle scope
2155     Universe::heap()->prepare_for_verify();
2156     Universe::verify(VerifyOption_G1UsePrevMarking,
2157                      " VerifyDuringGC:(after)");
2158   }

2159   g1h->check_bitmaps("Cleanup End");
2160 
2161   g1h->verify_region_sets_optional();













2162   g1h->trace_heap_after_concurrent_cycle();
2163 }
2164 
2165 void ConcurrentMark::completeCleanup() {
2166   if (has_aborted()) return;
2167 
2168   G1CollectedHeap* g1h = G1CollectedHeap::heap();
2169 
2170   _cleanup_list.verify_optional();
2171   FreeRegionList tmp_free_list("Tmp Free List");
2172 
2173   if (G1ConcRegionFreeingVerbose) {
2174     gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : "
2175                            "cleanup list has %u entries",
2176                            _cleanup_list.length());
2177   }
2178 
2179   // Noone else should be accessing the _cleanup_list at this point,
2180   // so it's not necessary to take any locks
2181   while (!_cleanup_list.is_empty()) {


2428 };
2429 
2430 void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) {
2431   assert(_workers != NULL, "Need parallel worker threads.");
2432   assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT");
2433 
2434   G1CMRefEnqueueTaskProxy enq_task_proxy(enq_task);
2435 
2436   // Not strictly necessary but...
2437   //
2438   // We need to reset the concurrency level before each
2439   // proxy task execution, so that the termination protocol
2440   // and overflow handling in CMTask::do_marking_step() knows
2441   // how many workers to wait for.
2442   _cm->set_concurrency(_active_workers);
2443   _g1h->set_par_threads(_active_workers);
2444   _workers->run_task(&enq_task_proxy);
2445   _g1h->set_par_threads(0);
2446 }
2447 




















2448 void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
2449   if (has_overflown()) {
2450     // Skip processing the discovered references if we have
2451     // overflown the global marking stack. Reference objects
2452     // only get discovered once so it is OK to not
2453     // de-populate the discovered reference lists. We could have,
2454     // but the only benefit would be that, when marking restarts,
2455     // less reference objects are discovered.
2456     return;
2457   }
2458 
2459   ResourceMark rm;
2460   HandleMark   hm;
2461 
2462   G1CollectedHeap* g1h = G1CollectedHeap::heap();
2463 
2464   // Is alive closure.
2465   G1CMIsAliveClosure g1_is_alive(g1h);
2466 
2467   // Inner scope to exclude the cleaning of the string and symbol


2540 
2541     if (_markStack.overflow()) {
2542       // This should have been done already when we tried to push an
2543       // entry on to the global mark stack. But let's do it again.
2544       set_has_overflown();
2545     }
2546 
2547     assert(rp->num_q() == active_workers, "why not");
2548 
2549     rp->enqueue_discovered_references(executor);
2550 
2551     rp->verify_no_references_recorded();
2552     assert(!rp->discovery_enabled(), "Post condition");
2553   }
2554 
2555   if (has_overflown()) {
2556     // We can not trust g1_is_alive if the marking stack overflowed
2557     return;
2558   }
2559 
2560   g1h->unlink_string_and_symbol_table(&g1_is_alive,
2561                                       /* process_strings */ false, // currently strings are always roots
2562                                       /* process_symbols */ true);



















2563 }
2564 
2565 void ConcurrentMark::swapMarkBitMaps() {
2566   CMBitMapRO* temp = _prevMarkBitMap;
2567   _prevMarkBitMap  = (CMBitMapRO*)_nextMarkBitMap;
2568   _nextMarkBitMap  = (CMBitMap*)  temp;
2569 }
2570 



















































2571 class CMRemarkTask: public AbstractGangTask {
2572 private:
2573   ConcurrentMark* _cm;
2574   bool            _is_serial;
2575 public:
2576   void work(uint worker_id) {
2577     // Since all available tasks are actually started, we should
2578     // only proceed if we're supposed to be active.
2579     if (worker_id < _cm->active_tasks()) {
2580       CMTask* task = _cm->task(worker_id);
2581       task->record_start_time();








2582       do {
2583         task->do_marking_step(1000000000.0 /* something very large */,
2584                               true         /* do_termination       */,
2585                               _is_serial);
2586       } while (task->has_aborted() && !_cm->has_overflown());
2587       // If we overflow, then we do not want to restart. We instead
2588       // want to abort remark and do concurrent marking again.
2589       task->record_end_time();
2590     }
2591   }
2592 
2593   CMRemarkTask(ConcurrentMark* cm, int active_workers, bool is_serial) :
2594     AbstractGangTask("Par Remark"), _cm(cm), _is_serial(is_serial) {
2595     _cm->terminator()->reset_for_reuse(active_workers);
2596   }
2597 };
2598 
2599 void ConcurrentMark::checkpointRootsFinalWork() {
2600   ResourceMark rm;
2601   HandleMark   hm;
2602   G1CollectedHeap* g1h = G1CollectedHeap::heap();
2603 


2604   g1h->ensure_parsability(false);
2605 
2606   if (G1CollectedHeap::use_parallel_gc_threads()) {
2607     G1CollectedHeap::StrongRootsScope srs(g1h);
2608     // this is remark, so we'll use up all active threads
2609     uint active_workers = g1h->workers()->active_workers();
2610     if (active_workers == 0) {
2611       assert(active_workers > 0, "Should have been set earlier");
2612       active_workers = (uint) ParallelGCThreads;
2613       g1h->workers()->set_active_workers(active_workers);
2614     }
2615     set_concurrency_and_phase(active_workers, false /* concurrent */);
2616     // Leave _parallel_marking_threads at it's
2617     // value originally calculated in the ConcurrentMark
2618     // constructor and pass values of the active workers
2619     // through the gang in the task.
2620 
2621     CMRemarkTask remarkTask(this, active_workers, false /* is_serial */);
2622     // We will start all available threads, even if we decide that the
2623     // active_workers will be fewer. The extra ones will just bail out


3413     assert(_nextMarkBitMap->isMarked(addr), "invariant");
3414     assert( addr < _cm->finger(), "invariant");
3415 
3416     statsOnly( _task->increase_objs_found_on_bitmap() );
3417     assert(addr >= _task->finger(), "invariant");
3418 
3419     // We move that task's local finger along.
3420     _task->move_finger_to(addr);
3421 
3422     _task->scan_object(oop(addr));
3423     // we only partially drain the local queue and global stack
3424     _task->drain_local_queue(true);
3425     _task->drain_global_stack(true);
3426 
3427     // if the has_aborted flag has been raised, we need to bail out of
3428     // the iteration
3429     return !_task->has_aborted();
3430   }
3431 };
3432 
3433 // Closure for iterating over objects, currently only used for
3434 // processing SATB buffers.
3435 class CMObjectClosure : public ObjectClosure {
3436 private:
3437   CMTask* _task;
3438 
3439 public:
3440   void do_object(oop obj) {
3441     _task->deal_with_reference(obj);
3442   }
3443 
3444   CMObjectClosure(CMTask* task) : _task(task) { }
3445 };
3446 
3447 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h,
3448                                ConcurrentMark* cm,
3449                                CMTask* task)
3450   : _g1h(g1h), _cm(cm), _task(task) {
3451   assert(_ref_processor == NULL, "should be initialized to NULL");
3452 
3453   if (G1UseConcMarkReferenceProcessing) {
3454     _ref_processor = g1h->ref_processor_cm();
3455     assert(_ref_processor != NULL, "should not be NULL");
3456   }
3457 }
3458 
3459 void CMTask::setup_for_region(HeapRegion* hr) {
3460   assert(hr != NULL,
3461         "claim_region() should have filtered out NULL regions");
3462   assert(!hr->continuesHumongous(),
3463         "claim_region() should have filtered out continues humongous regions");
3464 
3465   if (_cm->verbose_low()) {
3466     gclog_or_tty->print_cr("[%u] setting up for region "PTR_FORMAT,


3888 
3889   // This keeps claiming and applying the closure to completed buffers
3890   // until we run out of buffers or we need to abort.
3891   if (G1CollectedHeap::use_parallel_gc_threads()) {
3892     while (!has_aborted() &&
3893            satb_mq_set.par_apply_closure_to_completed_buffer(_worker_id)) {
3894       if (_cm->verbose_medium()) {
3895         gclog_or_tty->print_cr("[%u] processed an SATB buffer", _worker_id);
3896       }
3897       statsOnly( ++_satb_buffers_processed );
3898       regular_clock_call();
3899     }
3900   } else {
3901     while (!has_aborted() &&
3902            satb_mq_set.apply_closure_to_completed_buffer()) {
3903       if (_cm->verbose_medium()) {
3904         gclog_or_tty->print_cr("[%u] processed an SATB buffer", _worker_id);
3905       }
3906       statsOnly( ++_satb_buffers_processed );
3907       regular_clock_call();
3908     }
3909   }
3910 
3911   if (!concurrent() && !has_aborted()) {
3912     // We should only do this during remark.
3913     if (G1CollectedHeap::use_parallel_gc_threads()) {
3914       satb_mq_set.par_iterate_closure_all_threads(_worker_id);
3915     } else {
3916       satb_mq_set.iterate_closure_all_threads();
3917     }
3918   }
3919 
3920   _draining_satb_buffers = false;
3921 
3922   assert(has_aborted() ||
3923          concurrent() ||
3924          satb_mq_set.completed_buffers_num() == 0, "invariant");
3925 
3926   if (G1CollectedHeap::use_parallel_gc_threads()) {
3927     satb_mq_set.set_par_closure(_worker_id, NULL);
3928   } else {
3929     satb_mq_set.set_closure(NULL);
3930   }
3931 
3932   // again, this was a potentially expensive operation, decrease the
3933   // limits to get the regular clock call early
3934   decrease_limits();
3935 }
3936 




   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/metadataOnStackMark.hpp"
  27 #include "classfile/symbolTable.hpp"
  28 #include "code/codeCache.hpp"
  29 #include "gc_implementation/g1/concurrentMark.inline.hpp"
  30 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
  31 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
  32 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
  33 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
  34 #include "gc_implementation/g1/g1Log.hpp"
  35 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
  36 #include "gc_implementation/g1/g1RemSet.hpp"
  37 #include "gc_implementation/g1/heapRegion.inline.hpp"
  38 #include "gc_implementation/g1/heapRegionRemSet.hpp"
  39 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
  40 #include "gc_implementation/shared/vmGCOperations.hpp"
  41 #include "gc_implementation/shared/gcTimer.hpp"
  42 #include "gc_implementation/shared/gcTrace.hpp"
  43 #include "gc_implementation/shared/gcTraceTime.hpp"
  44 #include "memory/genOopClosures.inline.hpp"
  45 #include "memory/referencePolicy.hpp"
  46 #include "memory/resourceArea.hpp"
  47 #include "oops/oop.inline.hpp"
  48 #include "runtime/handles.inline.hpp"
  49 #include "runtime/java.hpp"
  50 #include "runtime/atomic.inline.hpp"
  51 #include "runtime/prefetch.inline.hpp"
  52 #include "services/memTracker.hpp"
  53 
  54 // Concurrent marking bit map wrapper
  55 
  56 CMBitMapRO::CMBitMapRO(int shifter) :
  57   _bm(),
  58   _shifter(shifter) {
  59   _bmStartWord = 0;
  60   _bmWordSize = 0;
  61 }
  62 
  63 HeapWord* CMBitMapRO::getNextMarkedWordAddress(const HeapWord* addr,
  64                                                const HeapWord* limit) const {
  65   // First we must round addr *up* to a possible object boundary.
  66   addr = (HeapWord*)align_size_up((intptr_t)addr,
  67                                   HeapWordSize << _shifter);
  68   size_t addrOffset = heapWordToOffset(addr);
  69   if (limit == NULL) {
  70     limit = _bmStartWord + _bmWordSize;
  71   }
  72   size_t limitOffset = heapWordToOffset(limit);
  73   size_t nextOffset = _bm.get_next_one_offset(addrOffset, limitOffset);
  74   HeapWord* nextAddr = offsetToHeapWord(nextOffset);
  75   assert(nextAddr >= addr, "get_next_one postcondition");
  76   assert(nextAddr == limit || isMarked(nextAddr),
  77          "get_next_one postcondition");
  78   return nextAddr;
  79 }
  80 
  81 HeapWord* CMBitMapRO::getNextUnmarkedWordAddress(const HeapWord* addr,
  82                                                  const HeapWord* limit) const {
  83   size_t addrOffset = heapWordToOffset(addr);
  84   if (limit == NULL) {
  85     limit = _bmStartWord + _bmWordSize;
  86   }
  87   size_t limitOffset = heapWordToOffset(limit);
  88   size_t nextOffset = _bm.get_next_zero_offset(addrOffset, limitOffset);
  89   HeapWord* nextAddr = offsetToHeapWord(nextOffset);
  90   assert(nextAddr >= addr, "get_next_one postcondition");
  91   assert(nextAddr == limit || !isMarked(nextAddr),
  92          "get_next_one postcondition");
  93   return nextAddr;
  94 }
  95 
  96 int CMBitMapRO::heapWordDiffToOffsetDiff(size_t diff) const {
  97   assert((diff & ((1 << _shifter) - 1)) == 0, "argument check");
  98   return (int) (diff >> _shifter);
  99 }
 100 
 101 #ifndef PRODUCT
 102 bool CMBitMapRO::covers(ReservedSpace heap_rs) const {


1208   ConcurrentMark* _cm;
1209 
1210 public:
1211   CMRootRegionScanTask(ConcurrentMark* cm) :
1212     AbstractGangTask("Root Region Scan"), _cm(cm) { }
1213 
1214   void work(uint worker_id) {
1215     assert(Thread::current()->is_ConcurrentGC_thread(),
1216            "this should only be done by a conc GC thread");
1217 
1218     CMRootRegions* root_regions = _cm->root_regions();
1219     HeapRegion* hr = root_regions->claim_next();
1220     while (hr != NULL) {
1221       _cm->scanRootRegion(hr, worker_id);
1222       hr = root_regions->claim_next();
1223     }
1224   }
1225 };
1226 
1227 void ConcurrentMark::scanRootRegions() {
1228   // Start of concurrent marking.
1229   ClassLoaderDataGraph::clear_claimed_marks();
1230 
1231   // scan_in_progress() will have been set to true only if there was
1232   // at least one root region to scan. So, if it's false, we
1233   // should not attempt to do any further work.
1234   if (root_regions()->scan_in_progress()) {
1235     _parallel_marking_threads = calc_parallel_marking_threads();
1236     assert(parallel_marking_threads() <= max_parallel_marking_threads(),
1237            "Maximum number of marking threads exceeded");
1238     uint active_workers = MAX2(1U, parallel_marking_threads());
1239 
1240     CMRootRegionScanTask task(this);
1241     if (use_parallel_marking_threads()) {
1242       _parallel_workers->set_active_workers((int) active_workers);
1243       _parallel_workers->run_task(&task);
1244     } else {
1245       task.work(0);
1246     }
1247 
1248     // It's possible that has_aborted() is true here without actually
1249     // aborting the survivor scan earlier. This is OK as it's
1250     // mainly used for sanity checking.


1259   // However that wouldn't be right, because it's possible that
1260   // a safepoint is indeed in progress as a younger generation
1261   // stop-the-world GC happens even as we mark in this generation.
1262 
1263   _restart_for_overflow = false;
1264   force_overflow_conc()->init();
1265 
1266   // _g1h has _n_par_threads
1267   _parallel_marking_threads = calc_parallel_marking_threads();
1268   assert(parallel_marking_threads() <= max_parallel_marking_threads(),
1269     "Maximum number of marking threads exceeded");
1270 
1271   uint active_workers = MAX2(1U, parallel_marking_threads());
1272 
1273   // Parallel task terminator is set in "set_concurrency_and_phase()"
1274   set_concurrency_and_phase(active_workers, true /* concurrent */);
1275 
1276   CMConcurrentMarkingTask markingTask(this, cmThread());
1277   if (use_parallel_marking_threads()) {
1278     _parallel_workers->set_active_workers((int)active_workers);
1279     // Don't set _n_par_threads because it affects MT in process_roots()
1280     // and the decisions on that MT processing is made elsewhere.
1281     assert(_parallel_workers->active_workers() > 0, "Should have been set");
1282     _parallel_workers->run_task(&markingTask);
1283   } else {
1284     markingTask.work(0);
1285   }
1286   print_stats();
1287 }
1288 
1289 void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
1290   // world is stopped at this checkpoint
1291   assert(SafepointSynchronize::is_at_safepoint(),
1292          "world should be stopped");
1293 
1294   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1295 
1296   // If a full collection has happened, we shouldn't do this.
1297   if (has_aborted()) {
1298     g1h->set_marking_complete(); // So bitmap clearing isn't confused
1299     return;


2130 
2131   // this will also free any regions totally full of garbage objects,
2132   // and sort the regions.
2133   g1h->g1_policy()->record_concurrent_mark_cleanup_end((int)n_workers);
2134 
2135   // Statistics.
2136   double end = os::elapsedTime();
2137   _cleanup_times.add((end - start) * 1000.0);
2138 
2139   if (G1Log::fine()) {
2140     g1h->print_size_transition(gclog_or_tty,
2141                                start_used_bytes,
2142                                g1h->used(),
2143                                g1h->capacity());
2144   }
2145 
2146   // Clean up will have freed any regions completely full of garbage.
2147   // Update the soft reference policy with the new heap occupancy.
2148   Universe::update_heap_info_at_gc();
2149 








2150   if (VerifyDuringGC) {
2151     HandleMark hm;  // handle scope
2152     Universe::heap()->prepare_for_verify();
2153     Universe::verify(VerifyOption_G1UsePrevMarking,
2154                      " VerifyDuringGC:(after)");
2155   }
2156 
2157   g1h->check_bitmaps("Cleanup End");
2158 
2159   g1h->verify_region_sets_optional();
2160 
2161   // We need to make this be a "collection" so any collection pause that
2162   // races with it goes around and waits for completeCleanup to finish.
2163   g1h->increment_total_collections();
2164 
2165   // Clean out dead classes and update Metaspace sizes.
2166   ClassLoaderDataGraph::purge();
2167   MetaspaceGC::compute_new_size();
2168 
2169   // We reclaimed old regions so we should calculate the sizes to make
2170   // sure we update the old gen/space data.
2171   g1h->g1mm()->update_sizes();
2172 
2173   g1h->trace_heap_after_concurrent_cycle();
2174 }
2175 
2176 void ConcurrentMark::completeCleanup() {
2177   if (has_aborted()) return;
2178 
2179   G1CollectedHeap* g1h = G1CollectedHeap::heap();
2180 
2181   _cleanup_list.verify_optional();
2182   FreeRegionList tmp_free_list("Tmp Free List");
2183 
2184   if (G1ConcRegionFreeingVerbose) {
2185     gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : "
2186                            "cleanup list has %u entries",
2187                            _cleanup_list.length());
2188   }
2189 
2190   // Noone else should be accessing the _cleanup_list at this point,
2191   // so it's not necessary to take any locks
2192   while (!_cleanup_list.is_empty()) {


2439 };
2440 
2441 void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) {
2442   assert(_workers != NULL, "Need parallel worker threads.");
2443   assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT");
2444 
2445   G1CMRefEnqueueTaskProxy enq_task_proxy(enq_task);
2446 
2447   // Not strictly necessary but...
2448   //
2449   // We need to reset the concurrency level before each
2450   // proxy task execution, so that the termination protocol
2451   // and overflow handling in CMTask::do_marking_step() knows
2452   // how many workers to wait for.
2453   _cm->set_concurrency(_active_workers);
2454   _g1h->set_par_threads(_active_workers);
2455   _workers->run_task(&enq_task_proxy);
2456   _g1h->set_par_threads(0);
2457 }
2458 
2459 void ConcurrentMark::weakRefsWorkParallelPart(BoolObjectClosure* is_alive, bool purged_classes) {
2460   G1CollectedHeap::heap()->parallel_cleaning(is_alive, true, true, purged_classes);
2461 }
2462 
2463 // Helper class to get rid of some boilerplate code.
2464 class G1RemarkGCTraceTime : public GCTraceTime {
2465   static bool doit_and_prepend(bool doit) {
2466     if (doit) {
2467       gclog_or_tty->put(' ');
2468     }
2469     return doit;
2470   }
2471 
2472  public:
2473   G1RemarkGCTraceTime(const char* title, bool doit)
2474     : GCTraceTime(title, doit_and_prepend(doit), false, G1CollectedHeap::heap()->gc_timer_cm(),
2475         G1CollectedHeap::heap()->concurrent_mark()->concurrent_gc_id()) {
2476   }
2477 };
2478 
2479 void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
2480   if (has_overflown()) {
2481     // Skip processing the discovered references if we have
2482     // overflown the global marking stack. Reference objects
2483     // only get discovered once so it is OK to not
2484     // de-populate the discovered reference lists. We could have,
2485     // but the only benefit would be that, when marking restarts,
2486     // less reference objects are discovered.
2487     return;
2488   }
2489 
2490   ResourceMark rm;
2491   HandleMark   hm;
2492 
2493   G1CollectedHeap* g1h = G1CollectedHeap::heap();
2494 
2495   // Is alive closure.
2496   G1CMIsAliveClosure g1_is_alive(g1h);
2497 
2498   // Inner scope to exclude the cleaning of the string and symbol


2571 
2572     if (_markStack.overflow()) {
2573       // This should have been done already when we tried to push an
2574       // entry on to the global mark stack. But let's do it again.
2575       set_has_overflown();
2576     }
2577 
2578     assert(rp->num_q() == active_workers, "why not");
2579 
2580     rp->enqueue_discovered_references(executor);
2581 
2582     rp->verify_no_references_recorded();
2583     assert(!rp->discovery_enabled(), "Post condition");
2584   }
2585 
2586   if (has_overflown()) {
2587     // We can not trust g1_is_alive if the marking stack overflowed
2588     return;
2589   }
2590 
2591   assert(_markStack.isEmpty(), "Marking should have completed");
2592 
2593   // Unload Klasses, String, Symbols, Code Cache, etc.
2594 
2595   G1RemarkGCTraceTime trace("Unloading", G1Log::finer());
2596 
2597   bool purged_classes;
2598 
2599   {
2600     G1RemarkGCTraceTime trace("System Dictionary Unloading", G1Log::finest());
2601     purged_classes = SystemDictionary::do_unloading(&g1_is_alive);
2602   }
2603 
2604   {
2605     G1RemarkGCTraceTime trace("Parallel Unloading", G1Log::finest());
2606     weakRefsWorkParallelPart(&g1_is_alive, purged_classes);
2607   }
2608 
2609   if (G1StringDedup::is_enabled()) {
2610     G1RemarkGCTraceTime trace("String Deduplication Unlink", G1Log::finest());
2611     G1StringDedup::unlink(&g1_is_alive);
2612   }
2613 }
2614 
2615 void ConcurrentMark::swapMarkBitMaps() {
2616   CMBitMapRO* temp = _prevMarkBitMap;
2617   _prevMarkBitMap  = (CMBitMapRO*)_nextMarkBitMap;
2618   _nextMarkBitMap  = (CMBitMap*)  temp;
2619 }
2620 
2621 class CMObjectClosure;
2622 
2623 // Closure for iterating over objects, currently only used for
2624 // processing SATB buffers.
2625 class CMObjectClosure : public ObjectClosure {
2626 private:
2627   CMTask* _task;
2628 
2629 public:
2630   void do_object(oop obj) {
2631     _task->deal_with_reference(obj);
2632   }
2633 
2634   CMObjectClosure(CMTask* task) : _task(task) { }
2635 };
2636 
2637 class G1RemarkThreadsClosure : public ThreadClosure {
2638   CMObjectClosure _cm_obj;
2639   G1CMOopClosure _cm_cl;
2640   MarkingCodeBlobClosure _code_cl;
2641   int _thread_parity;
2642   bool _is_par;
2643 
2644  public:
2645   G1RemarkThreadsClosure(G1CollectedHeap* g1h, CMTask* task, bool is_par) :
2646     _cm_obj(task), _cm_cl(g1h, g1h->concurrent_mark(), task), _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations),
2647     _thread_parity(SharedHeap::heap()->strong_roots_parity()), _is_par(is_par) {}
2648 
2649   void do_thread(Thread* thread) {
2650     if (thread->is_Java_thread()) {
2651       if (thread->claim_oops_do(_is_par, _thread_parity)) {
2652         JavaThread* jt = (JavaThread*)thread;
2653 
2654         // In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking
2655         // however the liveness of oops reachable from nmethods have very complex lifecycles:
2656         // * Alive if on the stack of an executing method
2657         // * Weakly reachable otherwise
2658         // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be
2659         // live by the SATB invariant but other oops recorded in nmethods may behave differently.
2660         jt->nmethods_do(&_code_cl);
2661 
2662         jt->satb_mark_queue().apply_closure_and_empty(&_cm_obj);
2663       }
2664     } else if (thread->is_VM_thread()) {
2665       if (thread->claim_oops_do(_is_par, _thread_parity)) {
2666         JavaThread::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(&_cm_obj);
2667       }
2668     }
2669   }
2670 };
2671 
2672 class CMRemarkTask: public AbstractGangTask {
2673 private:
2674   ConcurrentMark* _cm;
2675   bool            _is_serial;
2676 public:
2677   void work(uint worker_id) {
2678     // Since all available tasks are actually started, we should
2679     // only proceed if we're supposed to be active.
2680     if (worker_id < _cm->active_tasks()) {
2681       CMTask* task = _cm->task(worker_id);
2682       task->record_start_time();
2683       {
2684         ResourceMark rm;
2685         HandleMark hm;
2686 
2687         G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task, !_is_serial);
2688         Threads::threads_do(&threads_f);
2689       }
2690 
2691       do {
2692         task->do_marking_step(1000000000.0 /* something very large */,
2693                               true         /* do_termination       */,
2694                               _is_serial);
2695       } while (task->has_aborted() && !_cm->has_overflown());
2696       // If we overflow, then we do not want to restart. We instead
2697       // want to abort remark and do concurrent marking again.
2698       task->record_end_time();
2699     }
2700   }
2701 
2702   CMRemarkTask(ConcurrentMark* cm, int active_workers, bool is_serial) :
2703     AbstractGangTask("Par Remark"), _cm(cm), _is_serial(is_serial) {
2704     _cm->terminator()->reset_for_reuse(active_workers);
2705   }
2706 };
2707 
2708 void ConcurrentMark::checkpointRootsFinalWork() {
2709   ResourceMark rm;
2710   HandleMark   hm;
2711   G1CollectedHeap* g1h = G1CollectedHeap::heap();
2712 
2713   G1RemarkGCTraceTime trace("Finalize Marking", G1Log::finer());
2714 
2715   g1h->ensure_parsability(false);
2716 
2717   if (G1CollectedHeap::use_parallel_gc_threads()) {
2718     G1CollectedHeap::StrongRootsScope srs(g1h);
2719     // this is remark, so we'll use up all active threads
2720     uint active_workers = g1h->workers()->active_workers();
2721     if (active_workers == 0) {
2722       assert(active_workers > 0, "Should have been set earlier");
2723       active_workers = (uint) ParallelGCThreads;
2724       g1h->workers()->set_active_workers(active_workers);
2725     }
2726     set_concurrency_and_phase(active_workers, false /* concurrent */);
2727     // Leave _parallel_marking_threads at it's
2728     // value originally calculated in the ConcurrentMark
2729     // constructor and pass values of the active workers
2730     // through the gang in the task.
2731 
2732     CMRemarkTask remarkTask(this, active_workers, false /* is_serial */);
2733     // We will start all available threads, even if we decide that the
2734     // active_workers will be fewer. The extra ones will just bail out


3524     assert(_nextMarkBitMap->isMarked(addr), "invariant");
3525     assert( addr < _cm->finger(), "invariant");
3526 
3527     statsOnly( _task->increase_objs_found_on_bitmap() );
3528     assert(addr >= _task->finger(), "invariant");
3529 
3530     // We move that task's local finger along.
3531     _task->move_finger_to(addr);
3532 
3533     _task->scan_object(oop(addr));
3534     // we only partially drain the local queue and global stack
3535     _task->drain_local_queue(true);
3536     _task->drain_global_stack(true);
3537 
3538     // if the has_aborted flag has been raised, we need to bail out of
3539     // the iteration
3540     return !_task->has_aborted();
3541   }
3542 };
3543 














3544 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h,
3545                                ConcurrentMark* cm,
3546                                CMTask* task)
3547   : _g1h(g1h), _cm(cm), _task(task) {
3548   assert(_ref_processor == NULL, "should be initialized to NULL");
3549 
3550   if (G1UseConcMarkReferenceProcessing) {
3551     _ref_processor = g1h->ref_processor_cm();
3552     assert(_ref_processor != NULL, "should not be NULL");
3553   }
3554 }
3555 
3556 void CMTask::setup_for_region(HeapRegion* hr) {
3557   assert(hr != NULL,
3558         "claim_region() should have filtered out NULL regions");
3559   assert(!hr->continuesHumongous(),
3560         "claim_region() should have filtered out continues humongous regions");
3561 
3562   if (_cm->verbose_low()) {
3563     gclog_or_tty->print_cr("[%u] setting up for region "PTR_FORMAT,


3985 
3986   // This keeps claiming and applying the closure to completed buffers
3987   // until we run out of buffers or we need to abort.
3988   if (G1CollectedHeap::use_parallel_gc_threads()) {
3989     while (!has_aborted() &&
3990            satb_mq_set.par_apply_closure_to_completed_buffer(_worker_id)) {
3991       if (_cm->verbose_medium()) {
3992         gclog_or_tty->print_cr("[%u] processed an SATB buffer", _worker_id);
3993       }
3994       statsOnly( ++_satb_buffers_processed );
3995       regular_clock_call();
3996     }
3997   } else {
3998     while (!has_aborted() &&
3999            satb_mq_set.apply_closure_to_completed_buffer()) {
4000       if (_cm->verbose_medium()) {
4001         gclog_or_tty->print_cr("[%u] processed an SATB buffer", _worker_id);
4002       }
4003       statsOnly( ++_satb_buffers_processed );
4004       regular_clock_call();









4005     }
4006   }
4007 
4008   _draining_satb_buffers = false;
4009 
4010   assert(has_aborted() ||
4011          concurrent() ||
4012          satb_mq_set.completed_buffers_num() == 0, "invariant");
4013 
4014   if (G1CollectedHeap::use_parallel_gc_threads()) {
4015     satb_mq_set.set_par_closure(_worker_id, NULL);
4016   } else {
4017     satb_mq_set.set_closure(NULL);
4018   }
4019 
4020   // again, this was a potentially expensive operation, decrease the
4021   // limits to get the regular clock call early
4022   decrease_limits();
4023 }
4024