1 /*
   2  * Copyright (c) 2013, 2015, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "classfile/stringTable.hpp"
  26 #include "gc/shared/gcTimer.hpp"
  27 #include "gc/shared/isGCActiveMark.hpp"
  28 #include "gc/shared/parallelCleaning.hpp"
  29 #include "gc/shared/strongRootsScope.hpp"
  30 #include "gc/shenandoah/shenandoahBarrierSet.inline.hpp"
  31 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  32 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
  33 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  34 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  35 #include "gc/shenandoah/shenandoahRootProcessor.hpp"
  36 #include "gc/shenandoah/shenandoah_specialized_oop_closures.hpp"
  37 #include "gc/shenandoah/brooksPointer.hpp"
  38 #include "gc/shared/referenceProcessor.hpp"
  39 #include "gc/shenandoah/shenandoahTaskqueue.hpp"
  40 #include "code/codeCache.hpp"
  41 #include "classfile/symbolTable.hpp"
  42 #include "classfile/systemDictionary.hpp"
  43 #include "memory/iterator.inline.hpp"
  44 #include "oops/oop.inline.hpp"
  45 #include "gc/shared/taskqueue.inline.hpp"
  46 
  47 class ShenandoahInitMarkRootsClosure : public OopClosure {
  48 private:
  49   SCMObjToScanQueue* _queue;
  50   ShenandoahHeap* _heap;
  51 
  52   template <class T>
  53   inline void do_oop_nv(T* p) {
  54     ShenandoahConcurrentMark::mark_through_ref<T, RESOLVE, false>(p, _heap, _queue, NULL);
  55   }
  56 
  57 public:
  58   ShenandoahInitMarkRootsClosure(SCMObjToScanQueue* q) :
  59     _queue(q), _heap(ShenandoahHeap::heap()) {};
  60 
  61   void do_oop(narrowOop* p) { do_oop_nv(p); }
  62   void do_oop(oop* p)       { do_oop_nv(p); }
  63 };
  64 
  65 ShenandoahMarkRefsSuperClosure::ShenandoahMarkRefsSuperClosure(SCMObjToScanQueue* q, ReferenceProcessor* rp) :
  66   MetadataAwareOopClosure(rp),
  67   _queue(q),
  68   _heap(ShenandoahHeap::heap()),
  69   _conn_matrix(ShenandoahHeap::heap()->connection_matrix())
  70 {
  71 }
  72 
  73 class ShenandoahInitMarkRootsTask : public AbstractGangTask {
  74 private:
  75   ShenandoahRootProcessor* _rp;
  76   bool _process_refs;
  77 public:
  78   ShenandoahInitMarkRootsTask(ShenandoahRootProcessor* rp, bool process_refs) :
  79     AbstractGangTask("Shenandoah init mark roots task"),
  80     _rp(rp),
  81     _process_refs(process_refs) {
  82   }
  83 
  84   void work(uint worker_id) {
  85     assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
  86 
  87     ShenandoahHeap* heap = ShenandoahHeap::heap();
  88     SCMObjToScanQueueSet* queues = heap->concurrentMark()->task_queues();
  89     assert(queues->get_reserved() > worker_id, "Queue has not been reserved for worker id: %d", worker_id);
  90 
  91     SCMObjToScanQueue* q = queues->queue(worker_id);
  92     ShenandoahInitMarkRootsClosure mark_cl(q);
  93     CLDToOopClosure cldCl(&mark_cl);
  94     MarkingCodeBlobClosure blobsCl(&mark_cl, ! CodeBlobToOopClosure::FixRelocations);
  95 
  96     // The rationale for selecting the roots to scan is as follows:
  97     //   a. With unload_classes = true, we only want to scan the actual strong roots from the
  98     //      code cache. This will allow us to identify the dead classes, unload them, *and*
  99     //      invalidate the relevant code cache blobs. This could be only done together with
 100     //      class unloading.
 101     //   b. With unload_classes = false, we have to nominally retain all the references from code
 102     //      cache, because there could be the case of embedded class/oop in the generated code,
 103     //      which we will never visit during mark. Without code cache invalidation, as in (a),
 104     //      we risk executing that code cache blob, and crashing.
 105     //   c. With ShenandoahConcurrentCodeRoots, we avoid scanning the entire code cache here,
 106     //      and instead do that in concurrent phase under the relevant lock. This saves init mark
 107     //      pause time.
 108 
 109     ResourceMark m;
 110     if (heap->concurrentMark()->unload_classes()) {
 111       _rp->process_strong_roots(&mark_cl, _process_refs ? NULL : &mark_cl, &cldCl, &blobsCl, worker_id);
 112     } else {
 113       if (ShenandoahConcurrentCodeRoots) {
 114         CodeBlobClosure* code_blobs;
 115 #ifdef ASSERT
 116         AssertToSpaceClosure assert_to_space_oops;
 117         CodeBlobToOopClosure assert_to_space(&assert_to_space_oops,
 118                                              !CodeBlobToOopClosure::FixRelocations);
 119         code_blobs = &assert_to_space;
 120 #else
 121         code_blobs = NULL;
 122 #endif
 123         _rp->process_all_roots(&mark_cl, _process_refs ? NULL : &mark_cl, &cldCl, code_blobs, worker_id);
 124       } else {
 125         _rp->process_all_roots(&mark_cl, _process_refs ? NULL : &mark_cl, &cldCl, &blobsCl, worker_id);
 126       }
 127     }
 128   }
 129 };
 130 
 131 class ShenandoahUpdateRootsTask : public AbstractGangTask {
 132 private:
 133   ShenandoahRootProcessor* _rp;
 134 public:
 135   ShenandoahUpdateRootsTask(ShenandoahRootProcessor* rp) :
 136     AbstractGangTask("Shenandoah update roots task"),
 137     _rp(rp) {
 138   }
 139 
 140   void work(uint worker_id) {
 141     assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
 142 
 143     ShenandoahHeap* heap = ShenandoahHeap::heap();
 144     SCMUpdateRefsClosure cl;
 145     CLDToOopClosure cldCl(&cl);
 146 
 147     CodeBlobClosure* code_blobs;
 148 #ifdef ASSERT
 149     AssertToSpaceClosure assert_to_space_oops;
 150     CodeBlobToOopClosure assert_to_space(&assert_to_space_oops, !CodeBlobToOopClosure::FixRelocations);
 151     code_blobs = &assert_to_space;
 152 #else
 153     code_blobs = NULL;
 154 #endif
 155     _rp->process_all_roots(&cl, &cl, &cldCl, code_blobs, worker_id);
 156   }
 157 };
 158 
 159 class SCMConcurrentMarkingTask : public AbstractGangTask {
 160 private:
 161   ShenandoahConcurrentMark* _cm;
 162   ParallelTaskTerminator* _terminator;
 163   bool _update_refs;
 164 
 165 public:
 166   SCMConcurrentMarkingTask(ShenandoahConcurrentMark* cm, ParallelTaskTerminator* terminator, bool update_refs) :
 167     AbstractGangTask("Root Region Scan"), _cm(cm), _terminator(terminator), _update_refs(update_refs) {
 168   }
 169 
 170 
 171   void work(uint worker_id) {
 172     SCMObjToScanQueue* q = _cm->get_queue(worker_id);
 173     jushort* live_data = _cm->get_liveness(worker_id);
 174     ReferenceProcessor* rp;
 175     if (_cm->process_references()) {
 176       rp = ShenandoahHeap::heap()->ref_processor();
 177     } else {
 178       rp = NULL;
 179     }
 180     if (ShenandoahConcurrentCodeRoots && _cm->claim_codecache()) {
 181       if (! _cm->unload_classes()) {
 182         ShenandoahMarkResolveRefsClosure cl(q, rp);
 183         CodeBlobToOopClosure blobs(&cl, ! CodeBlobToOopClosure::FixRelocations);
 184         MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 185         CodeCache::blobs_do(&blobs);
 186       }
 187     }
 188 
 189     _cm->mark_loop(worker_id, _terminator, rp,
 190                    true, // cancellable
 191                    true, // drain SATBs as we go
 192                    true, // count liveness
 193                    _cm->unload_classes(),
 194                    _update_refs,
 195                    UseShenandoahMatrix);
 196   }
 197 };
 198 
 199 class SCMFinalMarkingTask : public AbstractGangTask {
 200 private:
 201   ShenandoahConcurrentMark* _cm;
 202   ParallelTaskTerminator* _terminator;
 203   bool _update_refs;
 204   bool _count_live;
 205   bool _unload_classes;
 206 
 207 public:
 208   SCMFinalMarkingTask(ShenandoahConcurrentMark* cm, ParallelTaskTerminator* terminator, bool update_refs, bool count_live, bool unload_classes) :
 209     AbstractGangTask("Shenandoah Final Marking"), _cm(cm), _terminator(terminator), _update_refs(update_refs), _count_live(count_live), _unload_classes(unload_classes) {
 210   }
 211 
 212   void work(uint worker_id) {
 213     // First drain remaining SATB buffers.
 214     // Notice that this is not strictly necessary for mark-compact. But since
 215     // it requires a StrongRootsScope around the task, we need to claim the
 216     // threads, and performance-wise it doesn't really matter. Adds about 1ms to
 217     // full-gc.
 218     _cm->drain_satb_buffers(worker_id, true);
 219 
 220     ReferenceProcessor* rp;
 221     if (_cm->process_references()) {
 222       rp = ShenandoahHeap::heap()->ref_processor();
 223     } else {
 224       rp = NULL;
 225     }
 226 
 227     _cm->mark_loop(worker_id, _terminator, rp,
 228                    false, // not cancellable
 229                    false, // do not drain SATBs, already drained
 230                    _count_live,
 231                    _unload_classes,
 232                    _update_refs,
 233                    UseShenandoahMatrix);
 234 
 235     assert(_cm->task_queues()->is_empty(), "Should be empty");
 236   }
 237 };
 238 
 239 void ShenandoahConcurrentMark::mark_roots() {
 240   assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
 241   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
 242 
 243   ShenandoahHeap* heap = ShenandoahHeap::heap();
 244 
 245   ClassLoaderDataGraph::clear_claimed_marks();
 246   WorkGang* workers = heap->workers();
 247   uint nworkers = workers->active_workers();
 248 
 249   assert(nworkers <= task_queues()->size(), "Just check");
 250 
 251   ShenandoahRootProcessor root_proc(heap, nworkers, ShenandoahCollectorPolicy::scan_thread_roots);
 252   TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
 253   task_queues()->reserve(nworkers);
 254 
 255   ShenandoahInitMarkRootsTask mark_roots(&root_proc, process_references());
 256   workers->run_task(&mark_roots);
 257   if (ShenandoahConcurrentCodeRoots) {
 258     clear_claim_codecache();
 259   }
 260 }
 261 
 262 void ShenandoahConcurrentMark::init_mark_roots() {
 263   assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
 264   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
 265 
 266   ShenandoahHeap* heap = ShenandoahHeap::heap();
 267 
 268   // Set up ref processing and class unloading.
 269   ShenandoahCollectorPolicy* policy = heap->shenandoahPolicy();
 270   set_process_references(policy->process_references());
 271   set_unload_classes(policy->unload_classes());
 272 
 273   mark_roots();
 274 }
 275 
 276 void ShenandoahConcurrentMark::update_roots() {
 277   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
 278   ShenandoahHeap* heap = ShenandoahHeap::heap();
 279 
 280   ClassLoaderDataGraph::clear_claimed_marks();
 281   uint nworkers = heap->workers()->active_workers();
 282 
 283   ShenandoahRootProcessor root_proc(heap, nworkers, ShenandoahCollectorPolicy::update_thread_roots);
 284   ShenandoahUpdateRootsTask update_roots(&root_proc);
 285   heap->workers()->run_task(&update_roots);
 286 }
 287 
 288 void ShenandoahConcurrentMark::final_update_roots() {
 289   assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
 290   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
 291 
 292   COMPILER2_PRESENT(DerivedPointerTable::clear());
 293 
 294   update_roots();
 295 
 296   COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
 297 }
 298 
 299 
 300 void ShenandoahConcurrentMark::initialize(uint workers) {
 301   _heap = ShenandoahHeap::heap();
 302 
 303   uint num_queues = MAX2(workers, 1U);
 304 
 305   _task_queues = new SCMObjToScanQueueSet((int) num_queues);
 306 
 307   for (uint i = 0; i < num_queues; ++i) {
 308     SCMObjToScanQueue* task_queue = new SCMObjToScanQueue();
 309     task_queue->initialize();
 310     _task_queues->register_queue(i, task_queue);
 311   }
 312   _process_references = false;
 313   _unload_classes = false;
 314   _claimed_codecache = 0;
 315 
 316   JavaThread::satb_mark_queue_set().set_buffer_size(ShenandoahSATBBufferSize);
 317 
 318   size_t max_regions = ShenandoahHeap::heap()->max_regions();
 319   _liveness_local = NEW_C_HEAP_ARRAY(jushort*, workers, mtGC);
 320   for (uint worker = 0; worker < workers; worker++) {
 321      _liveness_local[worker] = NEW_C_HEAP_ARRAY(jushort, max_regions, mtGC);
 322   }
 323 }
 324 
 325 void ShenandoahConcurrentMark::mark_from_roots() {
 326   ShenandoahHeap* sh = (ShenandoahHeap *) Universe::heap();
 327   WorkGang* workers = sh->workers();
 328   uint nworkers = workers->active_workers();
 329 
 330   bool update_refs = sh->need_update_refs();
 331 
 332   sh->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::conc_mark);
 333 
 334   if (process_references()) {
 335     ReferenceProcessor* rp = sh->ref_processor();
 336     rp->set_active_mt_degree(nworkers);
 337 
 338     // enable ("weak") refs discovery
 339     rp->enable_discovery(true /*verify_no_refs*/);
 340     rp->setup_policy(sh->is_full_gc_in_progress()); // snapshot the soft ref policy to be used in this cycle
 341   }
 342 
 343   task_queues()->reserve(nworkers);
 344 
 345   if (UseShenandoahOWST) {
 346     ShenandoahTaskTerminator terminator(nworkers, task_queues());
 347     SCMConcurrentMarkingTask markingTask = SCMConcurrentMarkingTask(this, &terminator, update_refs);
 348     workers->run_task(&markingTask);
 349   } else {
 350     ParallelTaskTerminator terminator(nworkers, task_queues());
 351     SCMConcurrentMarkingTask markingTask = SCMConcurrentMarkingTask(this, &terminator, update_refs);
 352     workers->run_task(&markingTask);
 353   }
 354 
 355   assert(task_queues()->is_empty() || sh->cancelled_concgc(), "Should be empty when not cancelled");
 356   if (! sh->cancelled_concgc()) {
 357     TASKQUEUE_STATS_ONLY(print_taskqueue_stats());
 358   }
 359 
 360   TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
 361 
 362   sh->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::conc_mark);
 363 }
 364 
 365 void ShenandoahConcurrentMark::finish_mark_from_roots() {
 366   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
 367 
 368   IsGCActiveMark is_active;
 369 
 370   ShenandoahHeap* sh = (ShenandoahHeap *) Universe::heap();
 371 
 372   TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
 373 
 374   shared_finish_mark_from_roots(/* full_gc = */ false);
 375 
 376   sh->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::update_roots);
 377   if (sh->need_update_refs()) {
 378     final_update_roots();
 379   }
 380   sh->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::update_roots);
 381 
 382   TASKQUEUE_STATS_ONLY(print_taskqueue_stats());
 383 
 384 #ifdef ASSERT
 385   verify_roots();
 386 
 387   if (ShenandoahDumpHeapAfterConcurrentMark) {
 388     sh->ensure_parsability(false);
 389     sh->print_all_refs("post-mark");
 390   }
 391 #endif
 392 }
 393 
 394 class ResetRecentlyAllocated : public ShenandoahHeapRegionClosure {
 395 public:
 396   bool doHeapRegion(ShenandoahHeapRegion* r) {
 397     ShenandoahHeap* sh = ShenandoahHeap::heap();
 398     r->set_recently_allocated(false);
 399     return false;
 400   }
 401 };
 402 
 403 void ShenandoahConcurrentMark::shared_finish_mark_from_roots(bool full_gc) {
 404   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
 405 
 406   ShenandoahHeap* sh = ShenandoahHeap::heap();
 407   ShenandoahCollectorPolicy* policy = sh->shenandoahPolicy();
 408 
 409   uint nworkers = sh->workers()->active_workers();
 410 
 411   // Finally mark everything else we've got in our queues during the previous steps.
 412   // It does two different things for concurrent vs. mark-compact GC:
 413   // - For concurrent GC, it starts with empty task queues, drains the remaining
 414   //   SATB buffers, and then completes the marking closure.
 415   // - For mark-compact GC, it starts out with the task queues seeded by initial
 416   //   root scan, and completes the closure, thus marking through all live objects
 417   // The implementation is the same, so it's shared here.
 418   {
 419     policy->record_phase_start(full_gc ?
 420                                ShenandoahCollectorPolicy::full_gc_mark_drain_queues :
 421                                ShenandoahCollectorPolicy::drain_satb);
 422     bool count_live = !(ShenandoahNoLivenessFullGC && full_gc); // we do not need liveness data for full GC
 423     task_queues()->reserve(nworkers);
 424 
 425     StrongRootsScope scope(nworkers);
 426     if (UseShenandoahOWST) {
 427       ShenandoahTaskTerminator terminator(nworkers, task_queues());
 428       SCMFinalMarkingTask task(this, &terminator, sh->need_update_refs(), count_live, unload_classes());
 429       sh->workers()->run_task(&task);
 430     } else {
 431       ParallelTaskTerminator terminator(nworkers, task_queues());
 432       SCMFinalMarkingTask task(this, &terminator, sh->need_update_refs(), count_live, unload_classes());
 433       sh->workers()->run_task(&task);
 434     }
 435     policy->record_phase_end(full_gc ?
 436                              ShenandoahCollectorPolicy::full_gc_mark_drain_queues :
 437                              ShenandoahCollectorPolicy::drain_satb);
 438   }
 439 
 440   assert(task_queues()->is_empty(), "Should be empty");
 441 
 442   // When we're done marking everything, we process weak references.
 443   policy->record_phase_start(full_gc ?
 444                              ShenandoahCollectorPolicy::full_gc_mark_weakrefs :
 445                              ShenandoahCollectorPolicy::weakrefs);
 446   if (process_references()) {
 447     weak_refs_work();
 448   }
 449   policy->record_phase_end(full_gc ?
 450                            ShenandoahCollectorPolicy::full_gc_mark_weakrefs :
 451                            ShenandoahCollectorPolicy::weakrefs);
 452 
 453   // And finally finish class unloading
 454   policy->record_phase_start(full_gc ?
 455                              ShenandoahCollectorPolicy::full_gc_mark_class_unloading :
 456                              ShenandoahCollectorPolicy::class_unloading);
 457   if (unload_classes()) {
 458     ShenandoahForwardedIsAliveClosure is_alive;
 459     // Unload classes and purge SystemDictionary.
 460     bool purged_class = SystemDictionary::do_unloading(&is_alive, false);
 461     ParallelCleaningTask unlink_task(&is_alive, true, true, nworkers, purged_class);
 462     sh->workers()->run_task(&unlink_task);
 463     ClassLoaderDataGraph::purge();
 464   }
 465 
 466   // Mark finished. All recently allocated regions are not recent anymore.
 467   {
 468     ResetRecentlyAllocated cl;
 469     sh->heap_region_iterate(&cl);
 470   }
 471 
 472   policy->record_phase_end(full_gc ?
 473                            ShenandoahCollectorPolicy::full_gc_mark_class_unloading :
 474                            ShenandoahCollectorPolicy::class_unloading);
 475 
 476   assert(task_queues()->is_empty(), "Should be empty");
 477 
 478 }
 479 
 480 #ifdef ASSERT
 481 template <class T>
 482 void ShenandoahVerifyRootsClosure1::do_oop_work(T* p) {
 483   ShenandoahHeap* heap = ShenandoahHeap::heap();
 484   T o = oopDesc::load_heap_oop(p);
 485   if (! oopDesc::is_null(o)) {
 486     oop obj = oopDesc::decode_heap_oop_not_null(o);
 487     if (! oopDesc::unsafe_equals(obj, ShenandoahBarrierSet::resolve_oop_static_not_null(obj))) {
 488       tty->print_cr("from-space marked: %s, to-space marked: %s, unload_classes: %s",
 489                     BOOL_TO_STR(heap->is_marked_next(obj)),
 490                     BOOL_TO_STR(heap->is_marked_next(ShenandoahBarrierSet::resolve_oop_static_not_null(obj))),
 491                     BOOL_TO_STR(heap->concurrentMark()->unload_classes()));
 492     }
 493     guarantee(oopDesc::unsafe_equals(obj, ShenandoahBarrierSet::resolve_oop_static_not_null(obj)), "oop must not be forwarded");
 494     guarantee(heap->is_marked_next(obj), "oop must be marked");
 495   }
 496 }
 497 
 498 void ShenandoahVerifyRootsClosure1::do_oop(oop* p) {
 499   do_oop_work(p);
 500 }
 501 
 502 void ShenandoahVerifyRootsClosure1::do_oop(narrowOop* p) {
 503   do_oop_work(p);
 504 }
 505 
 506 void ShenandoahConcurrentMark::verify_roots() {
 507   ShenandoahVerifyRootsClosure1 cl;
 508   CodeBlobToOopClosure blobsCl(&cl, false);
 509   CLDToOopClosure cldCl(&cl);
 510   ClassLoaderDataGraph::clear_claimed_marks();
 511   ShenandoahRootProcessor rp(ShenandoahHeap::heap(), 1);
 512   rp.process_all_roots(&cl, &cl, &cldCl, &blobsCl, 0);
 513 
 514 }
 515 #endif
 516 
 517 class ShenandoahSATBThreadsClosure : public ThreadClosure {
 518   ShenandoahSATBBufferClosure* _satb_cl;
 519   int _thread_parity;
 520 
 521  public:
 522   ShenandoahSATBThreadsClosure(ShenandoahSATBBufferClosure* satb_cl) :
 523     _satb_cl(satb_cl),
 524     _thread_parity(Threads::thread_claim_parity()) {}
 525 
 526   void do_thread(Thread* thread) {
 527     if (thread->is_Java_thread()) {
 528       if (thread->claim_oops_do(true, _thread_parity)) {
 529         JavaThread* jt = (JavaThread*)thread;
 530         jt->satb_mark_queue().apply_closure_and_empty(_satb_cl);
 531       }
 532     } else if (thread->is_VM_thread()) {
 533       if (thread->claim_oops_do(true, _thread_parity)) {
 534         JavaThread::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(_satb_cl);
 535       }
 536     }
 537   }
 538 };
 539 
 540 void ShenandoahConcurrentMark::drain_satb_buffers(uint worker_id, bool remark) {
 541   SCMObjToScanQueue* q = get_queue(worker_id);
 542   ShenandoahSATBBufferClosure cl(q);
 543 
 544   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
 545   while (satb_mq_set.apply_closure_to_completed_buffer(&cl));
 546 
 547   if (remark) {
 548     ShenandoahSATBThreadsClosure tc(&cl);
 549     Threads::threads_do(&tc);
 550   }
 551 }
 552 
 553 #if TASKQUEUE_STATS
 554 void ShenandoahConcurrentMark::print_taskqueue_stats_hdr(outputStream* const st) {
 555   st->print_raw_cr("GC Task Stats");
 556   st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr();
 557   st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr();
 558 }
 559 
 560 void ShenandoahConcurrentMark::print_taskqueue_stats() const {
 561   if (!log_develop_is_enabled(Trace, gc, task, stats)) {
 562     return;
 563   }
 564   Log(gc, task, stats) log;
 565   ResourceMark rm;
 566   outputStream* st = log.trace_stream();
 567   print_taskqueue_stats_hdr(st);
 568 
 569   ShenandoahHeap* sh = (ShenandoahHeap*) Universe::heap();
 570   TaskQueueStats totals;
 571   const int n = _task_queues->size();
 572   for (int i = 0; i < n; ++i) {
 573     st->print(INT32_FORMAT_W(3), i);
 574     _task_queues->queue(i)->stats.print(st);
 575     st->cr();
 576     totals += _task_queues->queue(i)->stats;
 577   }
 578   st->print("tot "); totals.print(st); st->cr();
 579   DEBUG_ONLY(totals.verify());
 580 
 581 }
 582 
 583 void ShenandoahConcurrentMark::reset_taskqueue_stats() {
 584   ShenandoahHeap* sh = (ShenandoahHeap*) Universe::heap();
 585   const int n = task_queues()->size();
 586   for (int i = 0; i < n; ++i) {
 587     task_queues()->queue(i)->stats.reset();
 588   }
 589 }
 590 #endif // TASKQUEUE_STATS
 591 
 592 // Weak Reference Closures
 593 class ShenandoahCMDrainMarkingStackClosure: public VoidClosure {
 594   uint _worker_id;
 595   ParallelTaskTerminator* _terminator;
 596 
 597 public:
 598   ShenandoahCMDrainMarkingStackClosure(uint worker_id, ParallelTaskTerminator* t):
 599     _worker_id(worker_id),
 600     _terminator(t) {
 601   }
 602 
 603 
 604   void do_void() {
 605     assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
 606 
 607     ShenandoahHeap* sh = ShenandoahHeap::heap();
 608     ShenandoahConcurrentMark* scm = sh->concurrentMark();
 609     ReferenceProcessor* rp;
 610     if (scm->process_references()) {
 611       rp = ShenandoahHeap::heap()->ref_processor();
 612     } else {
 613       rp = NULL;
 614     }
 615 
 616     scm->mark_loop(_worker_id, _terminator, rp,
 617                    false, // not cancellable
 618                    false, // do not drain SATBs
 619                    true,  // count liveness
 620                    scm->unload_classes(),
 621                    sh->need_update_refs(),
 622                    UseShenandoahMatrix);
 623   }
 624 };
 625 
 626 
 627 class ShenandoahCMKeepAliveClosure : public OopClosure {
 628 private:
 629   SCMObjToScanQueue* _queue;
 630   ShenandoahHeap* _heap;
 631 
 632   template <class T>
 633   inline void do_oop_nv(T* p) {
 634     ShenandoahConcurrentMark::mark_through_ref<T, NONE, false>(p, _heap, _queue, NULL);
 635   }
 636 
 637 public:
 638   ShenandoahCMKeepAliveClosure(SCMObjToScanQueue* q) :
 639     _queue(q), _heap(ShenandoahHeap::heap()) {}
 640 
 641   void do_oop(narrowOop* p) { do_oop_nv(p); }
 642   void do_oop(oop* p)       { do_oop_nv(p); }
 643 };
 644 
 645 class ShenandoahCMKeepAliveUpdateClosure : public OopClosure {
 646 private:
 647   SCMObjToScanQueue* _queue;
 648   ShenandoahHeap* _heap;
 649 
 650   template <class T>
 651   inline void do_oop_nv(T* p) {
 652     ShenandoahConcurrentMark::mark_through_ref<T, SIMPLE, false>(p, _heap, _queue, NULL);
 653   }
 654 
 655 public:
 656   ShenandoahCMKeepAliveUpdateClosure(SCMObjToScanQueue* q) :
 657     _queue(q), _heap(ShenandoahHeap::heap()) {}
 658 
 659   void do_oop(narrowOop* p) { do_oop_nv(p); }
 660   void do_oop(oop* p)       { do_oop_nv(p); }
 661 };
 662 
 663 class ShenandoahCMKeepAliveMatrixClosure : public OopClosure {
 664 private:
 665   SCMObjToScanQueue* _queue;
 666   ShenandoahHeap* _heap;
 667   ShenandoahConnectionMatrix* _conn_matrix;
 668 
 669   template <class T>
 670   inline void do_oop_nv(T* p) {
 671     ShenandoahConcurrentMark::mark_through_ref<T, NONE, true>(p, _heap, _queue, _conn_matrix);
 672   }
 673 
 674 public:
 675   ShenandoahCMKeepAliveMatrixClosure(SCMObjToScanQueue* q) :
 676     _queue(q), _heap(ShenandoahHeap::heap()),
 677     _conn_matrix(ShenandoahHeap::heap()->connection_matrix()) {};
 678 
 679   void do_oop(narrowOop* p) { do_oop_nv(p); }
 680   void do_oop(oop* p)       { do_oop_nv(p); }
 681 };
 682 
 683 class ShenandoahCMKeepAliveUpdateMatrixClosure : public OopClosure {
 684 private:
 685   SCMObjToScanQueue* _queue;
 686   ShenandoahHeap* _heap;
 687   ShenandoahConnectionMatrix* _conn_matrix;
 688 
 689   template <class T>
 690   inline void do_oop_nv(T* p) {
 691     ShenandoahConcurrentMark::mark_through_ref<T, SIMPLE, true>(p, _heap, _queue, _conn_matrix);
 692   }
 693 
 694 public:
 695   ShenandoahCMKeepAliveUpdateMatrixClosure(SCMObjToScanQueue* q) :
 696     _queue(q), _heap(ShenandoahHeap::heap()),
 697     _conn_matrix(ShenandoahHeap::heap()->connection_matrix()) {};
 698 
 699   void do_oop(narrowOop* p) { do_oop_nv(p); }
 700   void do_oop(oop* p)       { do_oop_nv(p); }
 701 };
 702 
 703 class ShenandoahRefProcTaskProxy : public AbstractGangTask {
 704 
 705 private:
 706   AbstractRefProcTaskExecutor::ProcessTask& _proc_task;
 707   ParallelTaskTerminator* _terminator;
 708 public:
 709 
 710   ShenandoahRefProcTaskProxy(AbstractRefProcTaskExecutor::ProcessTask& proc_task,
 711                              ParallelTaskTerminator* t) :
 712     AbstractGangTask("Process reference objects in parallel"),
 713     _proc_task(proc_task),
 714     _terminator(t) {
 715   }
 716 
 717   void work(uint worker_id) {
 718     assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
 719     ShenandoahHeap* heap = ShenandoahHeap::heap();
 720     ShenandoahForwardedIsAliveClosure is_alive;
 721     ShenandoahCMDrainMarkingStackClosure complete_gc(worker_id, _terminator);
 722     if (UseShenandoahMatrix) {
 723       if (heap->need_update_refs()) {
 724         ShenandoahCMKeepAliveUpdateMatrixClosure keep_alive(heap->concurrentMark()->get_queue(worker_id));
 725         _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
 726       } else {
 727         ShenandoahCMKeepAliveMatrixClosure keep_alive(heap->concurrentMark()->get_queue(worker_id));
 728         _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
 729       }
 730     } else {
 731       if (heap->need_update_refs()) {
 732         ShenandoahCMKeepAliveUpdateClosure keep_alive(heap->concurrentMark()->get_queue(worker_id));
 733         _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
 734       } else {
 735         ShenandoahCMKeepAliveClosure keep_alive(heap->concurrentMark()->get_queue(worker_id));
 736         _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
 737       }
 738     }
 739   }
 740 };
 741 
 742 class ShenandoahRefEnqueueTaskProxy : public AbstractGangTask {
 743 
 744 private:
 745   AbstractRefProcTaskExecutor::EnqueueTask& _enqueue_task;
 746 
 747 public:
 748 
 749   ShenandoahRefEnqueueTaskProxy(AbstractRefProcTaskExecutor::EnqueueTask& enqueue_task) :
 750     AbstractGangTask("Enqueue reference objects in parallel"),
 751     _enqueue_task(enqueue_task) {
 752   }
 753 
 754   void work(uint worker_id) {
 755     _enqueue_task.work(worker_id);
 756   }
 757 };
 758 
 759 class ShenandoahRefProcTaskExecutor : public AbstractRefProcTaskExecutor {
 760 
 761 private:
 762   WorkGang* _workers;
 763 
 764 public:
 765 
 766   ShenandoahRefProcTaskExecutor(WorkGang* workers) :
 767     _workers(workers) {
 768   }
 769 
 770   // Executes a task using worker threads.
 771   void execute(ProcessTask& task) {
 772     assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
 773 
 774     ShenandoahHeap* heap = ShenandoahHeap::heap();
 775     ShenandoahConcurrentMark* cm = heap->concurrentMark();
 776     uint nworkers = _workers->active_workers();
 777     cm->task_queues()->reserve(nworkers);
 778     if (UseShenandoahOWST) {
 779       ShenandoahTaskTerminator terminator(nworkers, cm->task_queues());
 780       ShenandoahRefProcTaskProxy proc_task_proxy(task, &terminator);
 781       _workers->run_task(&proc_task_proxy);
 782     } else {
 783       ParallelTaskTerminator terminator(nworkers, cm->task_queues());
 784       ShenandoahRefProcTaskProxy proc_task_proxy(task, &terminator);
 785       _workers->run_task(&proc_task_proxy);
 786     }
 787   }
 788 
 789   void execute(EnqueueTask& task) {
 790     ShenandoahRefEnqueueTaskProxy enqueue_task_proxy(task);
 791     _workers->run_task(&enqueue_task_proxy);
 792   }
 793 };
 794 
 795 
 796 void ShenandoahConcurrentMark::weak_refs_work() {
 797   assert(process_references(), "sanity");
 798   ShenandoahHeap* sh = (ShenandoahHeap*) Universe::heap();
 799   ReferenceProcessor* rp = sh->ref_processor();
 800   WorkGang* workers = sh->workers();
 801   uint nworkers = workers->active_workers();
 802 
 803   // Setup collector policy for softref cleaning.
 804   bool clear_soft_refs = sh->collector_policy()->use_should_clear_all_soft_refs(true /* bogus arg*/);
 805   log_develop_debug(gc, ref)("clearing soft refs: %s", BOOL_TO_STR(clear_soft_refs));
 806   rp->setup_policy(clear_soft_refs);
 807   rp->set_active_mt_degree(nworkers);
 808 
 809   uint serial_worker_id = 0;
 810   ShenandoahForwardedIsAliveClosure is_alive;
 811 
 812   assert(task_queues()->is_empty(), "Should be empty");
 813 
 814   ParallelTaskTerminator terminator(1, task_queues());
 815   ShenandoahCMDrainMarkingStackClosure complete_gc(serial_worker_id, &terminator);
 816   ShenandoahRefProcTaskExecutor executor(workers);
 817 
 818   log_develop_trace(gc, ref)("start processing references");
 819 
 820   if (sh->need_update_refs()) {
 821     ShenandoahCMKeepAliveUpdateClosure keep_alive(get_queue(serial_worker_id));
 822     rp->process_discovered_references(&is_alive, &keep_alive,
 823                                       &complete_gc, &executor,
 824                                       NULL);
 825   } else {
 826     ShenandoahCMKeepAliveClosure keep_alive(get_queue(serial_worker_id));
 827     rp->process_discovered_references(&is_alive, &keep_alive,
 828                                       &complete_gc, &executor,
 829                                       NULL);
 830   }
 831 
 832   assert(task_queues()->is_empty(), "Should be empty");
 833 
 834   log_develop_trace(gc, ref)("finished processing references");
 835   log_develop_trace(gc, ref)("start enqueuing references");
 836 
 837   rp->enqueue_discovered_references(&executor);
 838 
 839   log_develop_trace(gc, ref)("finished enqueueing references");
 840 
 841   rp->verify_no_references_recorded();
 842   assert(!rp->discovery_enabled(), "Post condition");
 843 }
 844 
 845 void ShenandoahConcurrentMark::cancel() {
 846   ShenandoahHeap* sh = ShenandoahHeap::heap();
 847 
 848   // Cancel weak-ref discovery.
 849   if (process_references()) {
 850     ReferenceProcessor* rp = sh->ref_processor();
 851     rp->abandon_partial_discovery();
 852     rp->disable_discovery();
 853   }
 854 
 855   // Clean up marking stacks.
 856   SCMObjToScanQueueSet* queues = task_queues();
 857   queues->clear();
 858 
 859   // Cancel SATB buffers.
 860   JavaThread::satb_mark_queue_set().abandon_partial_marking();
 861 }
 862 
 863 SCMObjToScanQueue* ShenandoahConcurrentMark::get_queue(uint worker_id) {
 864   assert(task_queues()->get_reserved() > worker_id, "No reserved queue for worker id: %d", worker_id);
 865   return _task_queues->queue(worker_id);
 866 }
 867 
 868 void ShenandoahConcurrentMark::clear_queue(SCMObjToScanQueue *q) {
 869   q->set_empty();
 870   q->overflow_stack()->clear();
 871   q->clear_buffer();
 872 }
 873 
 874 template <bool CANCELLABLE, bool DRAIN_SATB, bool COUNT_LIVENESS, bool CLASS_UNLOAD, bool UPDATE_REFS, bool UPDATE_MATRIX>
 875 void ShenandoahConcurrentMark::mark_loop_prework(uint w, ParallelTaskTerminator *t, ReferenceProcessor *rp) {
 876   SCMObjToScanQueue* q = get_queue(w);
 877 
 878   jushort* ld;
 879   if (COUNT_LIVENESS) {
 880     ld = get_liveness(w);
 881     Copy::fill_to_bytes(ld, _heap->max_regions() * sizeof(jushort));
 882   } else {
 883     ld = NULL;
 884   }
 885 
 886   // TODO: We can clean up this if we figure out how to do templated oop closures that
 887   // play nice with specialized_oop_iterators.
 888   if (UPDATE_MATRIX) {
 889     if (CLASS_UNLOAD) {
 890       if (UPDATE_REFS) {
 891         ShenandoahMarkUpdateRefsMetadataMatrixClosure cl(q, rp);
 892         mark_loop_work<ShenandoahMarkUpdateRefsMetadataMatrixClosure, CANCELLABLE, DRAIN_SATB, COUNT_LIVENESS>(&cl, ld, w, t);
 893       } else {
 894         ShenandoahMarkRefsMetadataMatrixClosure cl(q, rp);
 895         mark_loop_work<ShenandoahMarkRefsMetadataMatrixClosure, CANCELLABLE, DRAIN_SATB, COUNT_LIVENESS>(&cl, ld, w, t);
 896       }
 897     } else {
 898       if (UPDATE_REFS) {
 899         ShenandoahMarkUpdateRefsMatrixClosure cl(q, rp);
 900         mark_loop_work<ShenandoahMarkUpdateRefsMatrixClosure, CANCELLABLE, DRAIN_SATB, COUNT_LIVENESS>(&cl, ld, w, t);
 901       } else {
 902         ShenandoahMarkRefsMatrixClosure cl(q, rp);
 903         mark_loop_work<ShenandoahMarkRefsMatrixClosure, CANCELLABLE, DRAIN_SATB, COUNT_LIVENESS>(&cl, ld, w, t);
 904       }
 905     }
 906   } else {
 907     if (CLASS_UNLOAD) {
 908       if (UPDATE_REFS) {
 909         ShenandoahMarkUpdateRefsMetadataClosure cl(q, rp);
 910         mark_loop_work<ShenandoahMarkUpdateRefsMetadataClosure, CANCELLABLE, DRAIN_SATB, COUNT_LIVENESS>(&cl, ld, w, t);
 911       } else {
 912         ShenandoahMarkRefsMetadataClosure cl(q, rp);
 913         mark_loop_work<ShenandoahMarkRefsMetadataClosure, CANCELLABLE, DRAIN_SATB, COUNT_LIVENESS>(&cl, ld, w, t);
 914       }
 915     } else {
 916       if (UPDATE_REFS) {
 917         ShenandoahMarkUpdateRefsClosure cl(q, rp);
 918         mark_loop_work<ShenandoahMarkUpdateRefsClosure, CANCELLABLE, DRAIN_SATB, COUNT_LIVENESS>(&cl, ld, w, t);
 919       } else {
 920         ShenandoahMarkRefsClosure cl(q, rp);
 921         mark_loop_work<ShenandoahMarkRefsClosure, CANCELLABLE, DRAIN_SATB, COUNT_LIVENESS>(&cl, ld, w, t);
 922       }
 923     }
 924   }
 925   if (COUNT_LIVENESS) {
 926     for (uint i = 0; i < _heap->max_regions(); i++) {
 927       ShenandoahHeapRegion *r = _heap->regions()->get(i);
 928       if (r != NULL) {
 929         jushort live = ld[i];
 930         if (live > 0) {
 931           r->increase_live_data_words(live);
 932         }
 933       }
 934     }
 935   }
 936 }
 937 
 938 template <class T, bool CANCELLABLE, bool DRAIN_SATB, bool COUNT_LIVENESS>
 939 void ShenandoahConcurrentMark::mark_loop_work(T* cl, jushort* live_data, uint worker_id, ParallelTaskTerminator *terminator) {
 940   int seed = 17;
 941   uint stride = CANCELLABLE ? ShenandoahMarkLoopStride : 1;
 942 
 943   ShenandoahHeap* heap = ShenandoahHeap::heap();
 944   SCMObjToScanQueueSet* queues = task_queues();
 945   SCMObjToScanQueue* q;
 946   SCMTask t;
 947 
 948   /*
 949    * Process outstanding queues, if any.
 950    *
 951    * There can be more queues than workers. To deal with the imbalance, we claim
 952    * extra queues first. Since marking can push new tasks into the queue associated
 953    * with this worker id, we come back to process this queue in the normal loop.
 954    */
 955   assert(queues->get_reserved() == heap->workers()->active_workers(),
 956     "Need to reserve proper number of queues");
 957 
 958   q = queues->claim_next();
 959   while (q != NULL) {
 960     if (CANCELLABLE && heap->cancelled_concgc()) {
 961       ShenandoahCancelledTerminatorTerminator tt;
 962       while (!terminator->offer_termination(&tt));
 963       return;
 964     }
 965 
 966     for (uint i = 0; i < stride; i++) {
 967       if (try_queue(q, t)) {
 968         do_task<T, COUNT_LIVENESS>(q, cl, live_data, &t);
 969       } else {
 970         assert(q->is_empty(), "Must be empty");
 971         q = queues->claim_next();
 972         break;
 973       }
 974     }
 975   }
 976 
 977   q = get_queue(worker_id);
 978 
 979   /*
 980    * Normal marking loop:
 981    */
 982   while (true) {
 983     if (CANCELLABLE && heap->cancelled_concgc()) {
 984       ShenandoahCancelledTerminatorTerminator tt;
 985       while (!terminator->offer_termination(&tt));
 986       return;
 987     }
 988 
 989     for (uint i = 0; i < stride; i++) {
 990       if (try_queue(q, t) ||
 991               (DRAIN_SATB && try_draining_satb_buffer(q, t)) ||
 992               queues->steal(worker_id, &seed, t)) {
 993         do_task<T, COUNT_LIVENESS>(q, cl, live_data, &t);
 994       } else {
 995         if (terminator->offer_termination()) return;
 996       }
 997     }
 998   }
 999 }
1000 
1001 void ShenandoahConcurrentMark::set_process_references(bool pr) {
1002   _process_references = pr;
1003 }
1004 
1005 bool ShenandoahConcurrentMark::process_references() const {
1006   return _process_references;
1007 }
1008 
1009 void ShenandoahConcurrentMark::set_unload_classes(bool uc) {
1010   _unload_classes = uc;
1011 }
1012 
1013 bool ShenandoahConcurrentMark::unload_classes() const {
1014   return _unload_classes;
1015 }
1016 
1017 bool ShenandoahConcurrentMark::claim_codecache() {
1018   assert(ShenandoahConcurrentCodeRoots, "must not be called otherwise");
1019   jbyte old = Atomic::cmpxchg(1, &_claimed_codecache, 0);
1020   return old == 0;
1021 }
1022 
1023 void ShenandoahConcurrentMark::clear_claim_codecache() {
1024   assert(ShenandoahConcurrentCodeRoots, "must not be called otherwise");
1025   _claimed_codecache = 0;
1026 }
1027 
1028 jushort* ShenandoahConcurrentMark::get_liveness(uint worker_id) {
1029   return _liveness_local[worker_id];
1030 }
1031 
1032 // Generate Shenandoah specialized oop_oop_iterate functions.
1033 SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_SHENANDOAH(ALL_KLASS_OOP_OOP_ITERATE_DEFN)