1 /*
   2  * Copyright (c) 2013, 2015, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "classfile/stringTable.hpp"
  26 #include "gc/shared/gcTimer.hpp"
  27 #include "gc/shared/isGCActiveMark.hpp"
  28 #include "gc/shared/parallelCleaning.hpp"
  29 #include "gc/shared/strongRootsScope.hpp"
  30 #include "gc/shenandoah/shenandoahBarrierSet.inline.hpp"
  31 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  32 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
  33 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  34 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  35 #include "gc/shenandoah/shenandoahRootProcessor.hpp"
  36 #include "gc/shenandoah/shenandoah_specialized_oop_closures.hpp"
  37 #include "gc/shenandoah/brooksPointer.hpp"
  38 #include "gc/shared/referenceProcessor.hpp"
  39 #include "gc/shenandoah/shenandoahTaskqueue.hpp"
  40 #include "code/codeCache.hpp"
  41 #include "classfile/symbolTable.hpp"
  42 #include "classfile/systemDictionary.hpp"
  43 #include "memory/iterator.inline.hpp"
  44 #include "oops/oop.inline.hpp"
  45 #include "gc/shared/taskqueue.inline.hpp"
  46 
  47 class ShenandoahInitMarkRootsClosure : public OopClosure {
  48 private:
  49   SCMObjToScanQueue* _queue;
  50   ShenandoahHeap* _heap;
  51 
  52   template <class T>
  53   inline void do_oop_nv(T* p) {
  54     ShenandoahConcurrentMark::mark_through_ref<T, RESOLVE>(p, _heap, _queue);
  55   }
  56 
  57 public:
  58   ShenandoahInitMarkRootsClosure(SCMObjToScanQueue* q) :
  59     _queue(q), _heap(ShenandoahHeap::heap()) {};
  60 
  61   void do_oop(narrowOop* p) { do_oop_nv(p); }
  62   void do_oop(oop* p)       { do_oop_nv(p); }
  63 };
  64 
  65 ShenandoahMarkRefsSuperClosure::ShenandoahMarkRefsSuperClosure(SCMObjToScanQueue* q, ReferenceProcessor* rp) :
  66   MetadataAwareOopClosure(rp),
  67   _queue(q),
  68   _heap(ShenandoahHeap::heap())
  69 {
  70 }
  71 
  72 class ShenandoahInitMarkRootsTask : public AbstractGangTask {
  73 private:
  74   ShenandoahRootProcessor* _rp;
  75   bool _process_refs;
  76 public:
  77   ShenandoahInitMarkRootsTask(ShenandoahRootProcessor* rp, bool process_refs) :
  78     AbstractGangTask("Shenandoah init mark roots task"),
  79     _rp(rp),
  80     _process_refs(process_refs) {
  81   }
  82 
  83   void work(uint worker_id) {
  84     assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
  85 
  86     ShenandoahHeap* heap = ShenandoahHeap::heap();
  87     SCMObjToScanQueueSet* queues = heap->concurrentMark()->task_queues();
  88     assert(queues->get_reserved() > worker_id, "Queue has not been reserved for worker id: %d", worker_id);
  89 
  90     SCMObjToScanQueue* q = queues->queue(worker_id);
  91     ShenandoahInitMarkRootsClosure mark_cl(q);
  92     CLDToOopClosure cldCl(&mark_cl);
  93     MarkingCodeBlobClosure blobsCl(&mark_cl, ! CodeBlobToOopClosure::FixRelocations);
  94 
  95     // The rationale for selecting the roots to scan is as follows:
  96     //   a. With unload_classes = true, we only want to scan the actual strong roots from the
  97     //      code cache. This will allow us to identify the dead classes, unload them, *and*
  98     //      invalidate the relevant code cache blobs. This could be only done together with
  99     //      class unloading.
 100     //   b. With unload_classes = false, we have to nominally retain all the references from code
 101     //      cache, because there could be the case of embedded class/oop in the generated code,
 102     //      which we will never visit during mark. Without code cache invalidation, as in (a),
 103     //      we risk executing that code cache blob, and crashing.
 104     //   c. With ShenandoahConcurrentCodeRoots, we avoid scanning the entire code cache here,
 105     //      and instead do that in concurrent phase under the relevant lock. This saves init mark
 106     //      pause time.
 107 
 108     ResourceMark m;
 109     if (heap->concurrentMark()->unload_classes()) {
 110       _rp->process_strong_roots(&mark_cl, _process_refs ? NULL : &mark_cl, &cldCl, &blobsCl, worker_id);
 111     } else {
 112       if (ShenandoahConcurrentCodeRoots) {
 113         CodeBlobClosure* code_blobs;
 114 #ifdef ASSERT
 115         AssertToSpaceClosure assert_to_space_oops;
 116         CodeBlobToOopClosure assert_to_space(&assert_to_space_oops,
 117                                              !CodeBlobToOopClosure::FixRelocations);
 118         code_blobs = &assert_to_space;
 119 #else
 120         code_blobs = NULL;
 121 #endif
 122         _rp->process_all_roots(&mark_cl, _process_refs ? NULL : &mark_cl, &cldCl, code_blobs, worker_id);
 123       } else {
 124         _rp->process_all_roots(&mark_cl, _process_refs ? NULL : &mark_cl, &cldCl, &blobsCl, worker_id);
 125       }
 126     }
 127   }
 128 };
 129 
 130 class ShenandoahUpdateRootsTask : public AbstractGangTask {
 131 private:
 132   ShenandoahRootProcessor* _rp;
 133 public:
 134   ShenandoahUpdateRootsTask(ShenandoahRootProcessor* rp) :
 135     AbstractGangTask("Shenandoah update roots task"),
 136     _rp(rp) {
 137   }
 138 
 139   void work(uint worker_id) {
 140     assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
 141 
 142     ShenandoahHeap* heap = ShenandoahHeap::heap();
 143     SCMUpdateRefsClosure cl;
 144     CLDToOopClosure cldCl(&cl);
 145 
 146     CodeBlobClosure* code_blobs;
 147 #ifdef ASSERT
 148     AssertToSpaceClosure assert_to_space_oops;
 149     CodeBlobToOopClosure assert_to_space(&assert_to_space_oops, !CodeBlobToOopClosure::FixRelocations);
 150     code_blobs = &assert_to_space;
 151 #else
 152     code_blobs = NULL;
 153 #endif
 154     _rp->process_all_roots(&cl, &cl, &cldCl, code_blobs, worker_id);
 155   }
 156 };
 157 
 158 class SCMConcurrentMarkingTask : public AbstractGangTask {
 159 private:
 160   ShenandoahConcurrentMark* _cm;
 161   ParallelTaskTerminator* _terminator;
 162   bool _update_refs;
 163 
 164 public:
 165   SCMConcurrentMarkingTask(ShenandoahConcurrentMark* cm, ParallelTaskTerminator* terminator, bool update_refs) :
 166     AbstractGangTask("Root Region Scan"), _cm(cm), _terminator(terminator), _update_refs(update_refs) {
 167   }
 168 
 169 
 170   void work(uint worker_id) {
 171     SCMObjToScanQueue* q = _cm->get_queue(worker_id);
 172     jushort* live_data = _cm->get_liveness(worker_id);
 173     ReferenceProcessor* rp;
 174     if (_cm->process_references()) {
 175       rp = ShenandoahHeap::heap()->ref_processor();
 176     } else {
 177       rp = NULL;
 178     }
 179     if (ShenandoahConcurrentCodeRoots && _cm->claim_codecache()) {
 180       if (! _cm->unload_classes()) {
 181         ShenandoahMarkResolveRefsClosure cl(q, rp);
 182         CodeBlobToOopClosure blobs(&cl, ! CodeBlobToOopClosure::FixRelocations);
 183         MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 184         CodeCache::blobs_do(&blobs);
 185       }
 186     }
 187 
 188     _cm->mark_loop(worker_id, _terminator, rp,
 189                    true, // cancellable
 190                    true, // drain SATBs as we go
 191                    true, // count liveness
 192                    _cm->unload_classes(),
 193                    _update_refs);
 194   }
 195 };
 196 
 197 class SCMFinalMarkingTask : public AbstractGangTask {
 198 private:
 199   ShenandoahConcurrentMark* _cm;
 200   ParallelTaskTerminator* _terminator;
 201   bool _update_refs;
 202   bool _count_live;
 203   bool _unload_classes;
 204 
 205 public:
 206   SCMFinalMarkingTask(ShenandoahConcurrentMark* cm, ParallelTaskTerminator* terminator, bool update_refs, bool count_live, bool unload_classes) :
 207     AbstractGangTask("Shenandoah Final Marking"), _cm(cm), _terminator(terminator), _update_refs(update_refs), _count_live(count_live), _unload_classes(unload_classes) {
 208   }
 209 
 210   void work(uint worker_id) {
 211     // First drain remaining SATB buffers.
 212     // Notice that this is not strictly necessary for mark-compact. But since
 213     // it requires a StrongRootsScope around the task, we need to claim the
 214     // threads, and performance-wise it doesn't really matter. Adds about 1ms to
 215     // full-gc.
 216     _cm->drain_satb_buffers(worker_id, true);
 217 
 218     ReferenceProcessor* rp;
 219     if (_cm->process_references()) {
 220       rp = ShenandoahHeap::heap()->ref_processor();
 221     } else {
 222       rp = NULL;
 223     }
 224 
 225     _cm->mark_loop(worker_id, _terminator, rp,
 226                    false, // not cancellable
 227                    false, // do not drain SATBs, already drained
 228                    _count_live,
 229                    _unload_classes,
 230                    _update_refs);
 231 
 232     assert(_cm->task_queues()->is_empty(), "Should be empty");
 233   }
 234 };
 235 
 236 void ShenandoahConcurrentMark::mark_roots() {
 237   assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
 238   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
 239 
 240   ShenandoahHeap* heap = ShenandoahHeap::heap();
 241 
 242   ClassLoaderDataGraph::clear_claimed_marks();
 243   WorkGang* workers = heap->workers();
 244   uint nworkers = workers->active_workers();
 245 
 246   assert(nworkers <= task_queues()->size(), "Just check");
 247 
 248   ShenandoahRootProcessor root_proc(heap, nworkers, ShenandoahCollectorPolicy::scan_thread_roots);
 249   TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
 250   task_queues()->reserve(nworkers);
 251 
 252   ShenandoahInitMarkRootsTask mark_roots(&root_proc, process_references());
 253   workers->run_task(&mark_roots);
 254   if (ShenandoahConcurrentCodeRoots) {
 255     clear_claim_codecache();
 256   }
 257 }
 258 
 259 void ShenandoahConcurrentMark::init_mark_roots() {
 260   assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
 261   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
 262 
 263   ShenandoahHeap* heap = ShenandoahHeap::heap();
 264 
 265   // Set up ref processing and class unloading.
 266   ShenandoahCollectorPolicy* policy = heap->shenandoahPolicy();
 267   set_process_references(policy->process_references());
 268   set_unload_classes(policy->unload_classes());
 269 
 270   mark_roots();
 271 }
 272 
 273 void ShenandoahConcurrentMark::update_roots() {
 274   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
 275   ShenandoahHeap* heap = ShenandoahHeap::heap();
 276 
 277   ClassLoaderDataGraph::clear_claimed_marks();
 278   uint nworkers = heap->workers()->active_workers();
 279 
 280   ShenandoahRootProcessor root_proc(heap, nworkers, ShenandoahCollectorPolicy::update_thread_roots);
 281   ShenandoahUpdateRootsTask update_roots(&root_proc);
 282   heap->workers()->run_task(&update_roots);
 283 }
 284 
 285 void ShenandoahConcurrentMark::final_update_roots() {
 286   assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
 287   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
 288 
 289   COMPILER2_PRESENT(DerivedPointerTable::clear());
 290 
 291   update_roots();
 292 
 293   COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
 294 }
 295 
 296 
 297 void ShenandoahConcurrentMark::initialize(uint workers) {
 298   _heap = ShenandoahHeap::heap();
 299 
 300   uint num_queues = MAX2(workers, 1U);
 301 
 302   _task_queues = new SCMObjToScanQueueSet((int) num_queues);
 303 
 304   for (uint i = 0; i < num_queues; ++i) {
 305     SCMObjToScanQueue* task_queue = new SCMObjToScanQueue();
 306     task_queue->initialize();
 307     _task_queues->register_queue(i, task_queue);
 308   }
 309   _process_references = false;
 310   _unload_classes = false;
 311   _claimed_codecache = 0;
 312 
 313   JavaThread::satb_mark_queue_set().set_buffer_size(ShenandoahSATBBufferSize);
 314 
 315   size_t max_regions = ShenandoahHeap::heap()->max_regions();
 316   _liveness_local = NEW_C_HEAP_ARRAY(jushort*, workers, mtGC);
 317   for (uint worker = 0; worker < workers; worker++) {
 318      _liveness_local[worker] = NEW_C_HEAP_ARRAY(jushort, max_regions, mtGC);
 319   }
 320 }
 321 
 322 void ShenandoahConcurrentMark::mark_from_roots() {
 323   ShenandoahHeap* sh = (ShenandoahHeap *) Universe::heap();
 324   WorkGang* workers = sh->workers();
 325   uint nworkers = workers->active_workers();
 326 
 327   bool update_refs = sh->need_update_refs();
 328 
 329   sh->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::conc_mark);
 330 
 331   if (process_references()) {
 332     ReferenceProcessor* rp = sh->ref_processor();
 333     rp->set_active_mt_degree(nworkers);
 334 
 335     // enable ("weak") refs discovery
 336     rp->enable_discovery(true /*verify_no_refs*/);
 337     rp->setup_policy(sh->is_full_gc_in_progress()); // snapshot the soft ref policy to be used in this cycle
 338   }
 339 
 340   task_queues()->reserve(nworkers);
 341 
 342   if (UseShenandoahOWST) {
 343     ShenandoahTaskTerminator terminator(nworkers, task_queues());
 344     SCMConcurrentMarkingTask markingTask = SCMConcurrentMarkingTask(this, &terminator, update_refs);
 345     workers->run_task(&markingTask);
 346   } else {
 347     ParallelTaskTerminator terminator(nworkers, task_queues());
 348     SCMConcurrentMarkingTask markingTask = SCMConcurrentMarkingTask(this, &terminator, update_refs);
 349     workers->run_task(&markingTask);
 350   }
 351 
 352   assert(task_queues()->is_empty() || sh->cancelled_concgc(), "Should be empty when not cancelled");
 353   if (! sh->cancelled_concgc()) {
 354     TASKQUEUE_STATS_ONLY(print_taskqueue_stats());
 355   }
 356 
 357   TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
 358 
 359   sh->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::conc_mark);
 360 }
 361 
 362 void ShenandoahConcurrentMark::finish_mark_from_roots() {
 363   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
 364 
 365   IsGCActiveMark is_active;
 366 
 367   ShenandoahHeap* sh = (ShenandoahHeap *) Universe::heap();
 368 
 369   TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
 370 
 371   shared_finish_mark_from_roots(/* full_gc = */ false);
 372 
 373   sh->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::update_roots);
 374   if (sh->need_update_refs()) {
 375     final_update_roots();
 376   }
 377   sh->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::update_roots);
 378 
 379   TASKQUEUE_STATS_ONLY(print_taskqueue_stats());
 380 
 381 #ifdef ASSERT
 382   verify_roots();
 383 
 384   if (ShenandoahDumpHeapAfterConcurrentMark) {
 385     sh->ensure_parsability(false);
 386     sh->print_all_refs("post-mark");
 387   }
 388 #endif
 389 }
 390 
 391 class ResetRecentlyAllocated : public ShenandoahHeapRegionClosure {
 392 public:
 393   bool doHeapRegion(ShenandoahHeapRegion* r) {
 394     ShenandoahHeap* sh = ShenandoahHeap::heap();
 395     r->set_recently_allocated(false);
 396     return false;
 397   }
 398 };
 399 
 400 void ShenandoahConcurrentMark::shared_finish_mark_from_roots(bool full_gc) {
 401   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
 402 
 403   ShenandoahHeap* sh = ShenandoahHeap::heap();
 404   ShenandoahCollectorPolicy* policy = sh->shenandoahPolicy();
 405 
 406   uint nworkers = sh->workers()->active_workers();
 407 
 408   // Finally mark everything else we've got in our queues during the previous steps.
 409   // It does two different things for concurrent vs. mark-compact GC:
 410   // - For concurrent GC, it starts with empty task queues, drains the remaining
 411   //   SATB buffers, and then completes the marking closure.
 412   // - For mark-compact GC, it starts out with the task queues seeded by initial
 413   //   root scan, and completes the closure, thus marking through all live objects
 414   // The implementation is the same, so it's shared here.
 415   {
 416     policy->record_phase_start(full_gc ?
 417                                ShenandoahCollectorPolicy::full_gc_mark_drain_queues :
 418                                ShenandoahCollectorPolicy::drain_satb);
 419     bool count_live = !(ShenandoahNoLivenessFullGC && full_gc); // we do not need liveness data for full GC
 420     task_queues()->reserve(nworkers);
 421 
 422     StrongRootsScope scope(nworkers);
 423     if (UseShenandoahOWST) {
 424       ShenandoahTaskTerminator terminator(nworkers, task_queues());
 425       SCMFinalMarkingTask task(this, &terminator, sh->need_update_refs(), count_live, unload_classes());
 426       sh->workers()->run_task(&task);
 427     } else {
 428       ParallelTaskTerminator terminator(nworkers, task_queues());
 429       SCMFinalMarkingTask task(this, &terminator, sh->need_update_refs(), count_live, unload_classes());
 430       sh->workers()->run_task(&task);
 431     }
 432     policy->record_phase_end(full_gc ?
 433                              ShenandoahCollectorPolicy::full_gc_mark_drain_queues :
 434                              ShenandoahCollectorPolicy::drain_satb);
 435   }
 436 
 437   assert(task_queues()->is_empty(), "Should be empty");
 438 
 439   // When we're done marking everything, we process weak references.
 440   policy->record_phase_start(full_gc ?
 441                              ShenandoahCollectorPolicy::full_gc_mark_weakrefs :
 442                              ShenandoahCollectorPolicy::weakrefs);
 443   if (process_references()) {
 444     weak_refs_work();
 445   }
 446   policy->record_phase_end(full_gc ?
 447                            ShenandoahCollectorPolicy::full_gc_mark_weakrefs :
 448                            ShenandoahCollectorPolicy::weakrefs);
 449 
 450   // And finally finish class unloading
 451   policy->record_phase_start(full_gc ?
 452                              ShenandoahCollectorPolicy::full_gc_mark_class_unloading :
 453                              ShenandoahCollectorPolicy::class_unloading);
 454   if (unload_classes()) {
 455     ShenandoahForwardedIsAliveClosure is_alive;
 456     // Unload classes and purge SystemDictionary.
 457     bool purged_class = SystemDictionary::do_unloading(&is_alive, false);
 458     ParallelCleaningTask unlink_task(&is_alive, true, true, nworkers, purged_class);
 459     sh->workers()->run_task(&unlink_task);
 460     ClassLoaderDataGraph::purge();
 461   }
 462 
 463   // Mark finished. All recently allocated regions are not recent anymore.
 464   {
 465     ResetRecentlyAllocated cl;
 466     sh->heap_region_iterate(&cl);
 467   }
 468 
 469   policy->record_phase_end(full_gc ?
 470                            ShenandoahCollectorPolicy::full_gc_mark_class_unloading :
 471                            ShenandoahCollectorPolicy::class_unloading);
 472 
 473   assert(task_queues()->is_empty(), "Should be empty");
 474 
 475 }
 476 
 477 #ifdef ASSERT
 478 template <class T>
 479 void ShenandoahVerifyRootsClosure1::do_oop_work(T* p) {
 480   ShenandoahHeap* heap = ShenandoahHeap::heap();
 481   T o = oopDesc::load_heap_oop(p);
 482   if (! oopDesc::is_null(o)) {
 483     oop obj = oopDesc::decode_heap_oop_not_null(o);
 484     if (! oopDesc::unsafe_equals(obj, ShenandoahBarrierSet::resolve_oop_static_not_null(obj))) {
 485       tty->print_cr("from-space marked: %s, to-space marked: %s, unload_classes: %s",
 486                     BOOL_TO_STR(heap->is_marked_next(obj)),
 487                     BOOL_TO_STR(heap->is_marked_next(ShenandoahBarrierSet::resolve_oop_static_not_null(obj))),
 488                     BOOL_TO_STR(heap->concurrentMark()->unload_classes()));
 489     }
 490     guarantee(oopDesc::unsafe_equals(obj, ShenandoahBarrierSet::resolve_oop_static_not_null(obj)), "oop must not be forwarded");
 491     guarantee(heap->is_marked_next(obj), "oop must be marked");
 492   }
 493 }
 494 
 495 void ShenandoahVerifyRootsClosure1::do_oop(oop* p) {
 496   do_oop_work(p);
 497 }
 498 
 499 void ShenandoahVerifyRootsClosure1::do_oop(narrowOop* p) {
 500   do_oop_work(p);
 501 }
 502 
 503 void ShenandoahConcurrentMark::verify_roots() {
 504   ShenandoahVerifyRootsClosure1 cl;
 505   CodeBlobToOopClosure blobsCl(&cl, false);
 506   CLDToOopClosure cldCl(&cl);
 507   ClassLoaderDataGraph::clear_claimed_marks();
 508   ShenandoahRootProcessor rp(ShenandoahHeap::heap(), 1);
 509   rp.process_all_roots(&cl, &cl, &cldCl, &blobsCl, 0);
 510 
 511 }
 512 #endif
 513 
 514 class ShenandoahSATBThreadsClosure : public ThreadClosure {
 515   ShenandoahSATBBufferClosure* _satb_cl;
 516   int _thread_parity;
 517 
 518  public:
 519   ShenandoahSATBThreadsClosure(ShenandoahSATBBufferClosure* satb_cl) :
 520     _satb_cl(satb_cl),
 521     _thread_parity(Threads::thread_claim_parity()) {}
 522 
 523   void do_thread(Thread* thread) {
 524     if (thread->is_Java_thread()) {
 525       if (thread->claim_oops_do(true, _thread_parity)) {
 526         JavaThread* jt = (JavaThread*)thread;
 527         jt->satb_mark_queue().apply_closure_and_empty(_satb_cl);
 528       }
 529     } else if (thread->is_VM_thread()) {
 530       if (thread->claim_oops_do(true, _thread_parity)) {
 531         JavaThread::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(_satb_cl);
 532       }
 533     }
 534   }
 535 };
 536 
 537 void ShenandoahConcurrentMark::drain_satb_buffers(uint worker_id, bool remark) {
 538   SCMObjToScanQueue* q = get_queue(worker_id);
 539   ShenandoahSATBBufferClosure cl(q);
 540 
 541   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
 542   while (satb_mq_set.apply_closure_to_completed_buffer(&cl));
 543 
 544   if (remark) {
 545     ShenandoahSATBThreadsClosure tc(&cl);
 546     Threads::threads_do(&tc);
 547   }
 548 }
 549 
 550 #if TASKQUEUE_STATS
 551 void ShenandoahConcurrentMark::print_taskqueue_stats_hdr(outputStream* const st) {
 552   st->print_raw_cr("GC Task Stats");
 553   st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr();
 554   st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr();
 555 }
 556 
 557 void ShenandoahConcurrentMark::print_taskqueue_stats() const {
 558   if (!log_develop_is_enabled(Trace, gc, task, stats)) {
 559     return;
 560   }
 561   Log(gc, task, stats) log;
 562   ResourceMark rm;
 563   outputStream* st = log.trace_stream();
 564   print_taskqueue_stats_hdr(st);
 565 
 566   ShenandoahHeap* sh = (ShenandoahHeap*) Universe::heap();
 567   TaskQueueStats totals;
 568   const int n = _task_queues->size();
 569   for (int i = 0; i < n; ++i) {
 570     st->print(INT32_FORMAT_W(3), i);
 571     _task_queues->queue(i)->stats.print(st);
 572     st->cr();
 573     totals += _task_queues->queue(i)->stats;
 574   }
 575   st->print("tot "); totals.print(st); st->cr();
 576   DEBUG_ONLY(totals.verify());
 577 
 578 }
 579 
 580 void ShenandoahConcurrentMark::reset_taskqueue_stats() {
 581   ShenandoahHeap* sh = (ShenandoahHeap*) Universe::heap();
 582   const int n = task_queues()->size();
 583   for (int i = 0; i < n; ++i) {
 584     task_queues()->queue(i)->stats.reset();
 585   }
 586 }
 587 #endif // TASKQUEUE_STATS
 588 
 589 // Weak Reference Closures
 590 class ShenandoahCMDrainMarkingStackClosure: public VoidClosure {
 591   uint _worker_id;
 592   ParallelTaskTerminator* _terminator;
 593 
 594 public:
 595   ShenandoahCMDrainMarkingStackClosure(uint worker_id, ParallelTaskTerminator* t):
 596     _worker_id(worker_id),
 597     _terminator(t) {
 598   }
 599 
 600 
 601   void do_void() {
 602     assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
 603 
 604     ShenandoahHeap* sh = ShenandoahHeap::heap();
 605     ShenandoahConcurrentMark* scm = sh->concurrentMark();
 606     ReferenceProcessor* rp;
 607     if (scm->process_references()) {
 608       rp = ShenandoahHeap::heap()->ref_processor();
 609     } else {
 610       rp = NULL;
 611     }
 612 
 613     scm->mark_loop(_worker_id, _terminator, rp,
 614                    false, // not cancellable
 615                    false, // do not drain SATBs
 616                    true,  // count liveness
 617                    scm->unload_classes(),
 618                    sh->need_update_refs());
 619   }
 620 };
 621 
 622 
 623 class ShenandoahCMKeepAliveClosure : public OopClosure {
 624 private:
 625   SCMObjToScanQueue* _queue;
 626   ShenandoahHeap* _heap;
 627 
 628   template <class T>
 629   inline void do_oop_nv(T* p) {
 630     ShenandoahConcurrentMark::mark_through_ref<T, NONE>(p, _heap, _queue);
 631   }
 632 
 633 public:
 634   ShenandoahCMKeepAliveClosure(SCMObjToScanQueue* q) :
 635     _queue(q), _heap(ShenandoahHeap::heap()) {}
 636 
 637   void do_oop(narrowOop* p) { do_oop_nv(p); }
 638   void do_oop(oop* p)       { do_oop_nv(p); }
 639 };
 640 
 641 class ShenandoahCMKeepAliveUpdateClosure : public OopClosure {
 642 private:
 643   SCMObjToScanQueue* _queue;
 644   ShenandoahHeap* _heap;
 645 
 646   template <class T>
 647   inline void do_oop_nv(T* p) {
 648     ShenandoahConcurrentMark::mark_through_ref<T, SIMPLE>(p, _heap, _queue);
 649   }
 650 
 651 public:
 652   ShenandoahCMKeepAliveUpdateClosure(SCMObjToScanQueue* q) :
 653     _queue(q), _heap(ShenandoahHeap::heap()) {}
 654 
 655   void do_oop(narrowOop* p) { do_oop_nv(p); }
 656   void do_oop(oop* p)       { do_oop_nv(p); }
 657 };
 658 
 659 class ShenandoahRefProcTaskProxy : public AbstractGangTask {
 660 
 661 private:
 662   AbstractRefProcTaskExecutor::ProcessTask& _proc_task;
 663   ParallelTaskTerminator* _terminator;
 664 public:
 665 
 666   ShenandoahRefProcTaskProxy(AbstractRefProcTaskExecutor::ProcessTask& proc_task,
 667                              ParallelTaskTerminator* t) :
 668     AbstractGangTask("Process reference objects in parallel"),
 669     _proc_task(proc_task),
 670     _terminator(t) {
 671   }
 672 
 673   void work(uint worker_id) {
 674     assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
 675     ShenandoahHeap* heap = ShenandoahHeap::heap();
 676     ShenandoahForwardedIsAliveClosure is_alive;
 677     ShenandoahCMDrainMarkingStackClosure complete_gc(worker_id, _terminator);
 678     if (heap->need_update_refs()) {
 679       ShenandoahCMKeepAliveUpdateClosure keep_alive(heap->concurrentMark()->get_queue(worker_id));
 680       _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
 681     } else {
 682       ShenandoahCMKeepAliveClosure keep_alive(heap->concurrentMark()->get_queue(worker_id));
 683       _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
 684     }
 685   }
 686 };
 687 
 688 class ShenandoahRefEnqueueTaskProxy : public AbstractGangTask {
 689 
 690 private:
 691   AbstractRefProcTaskExecutor::EnqueueTask& _enqueue_task;
 692 
 693 public:
 694 
 695   ShenandoahRefEnqueueTaskProxy(AbstractRefProcTaskExecutor::EnqueueTask& enqueue_task) :
 696     AbstractGangTask("Enqueue reference objects in parallel"),
 697     _enqueue_task(enqueue_task) {
 698   }
 699 
 700   void work(uint worker_id) {
 701     _enqueue_task.work(worker_id);
 702   }
 703 };
 704 
 705 class ShenandoahRefProcTaskExecutor : public AbstractRefProcTaskExecutor {
 706 
 707 private:
 708   WorkGang* _workers;
 709 
 710 public:
 711 
 712   ShenandoahRefProcTaskExecutor(WorkGang* workers) :
 713     _workers(workers) {
 714   }
 715 
 716   // Executes a task using worker threads.
 717   void execute(ProcessTask& task) {
 718     assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
 719 
 720     ShenandoahHeap* heap = ShenandoahHeap::heap();
 721     ShenandoahConcurrentMark* cm = heap->concurrentMark();
 722     uint nworkers = _workers->active_workers();
 723     cm->task_queues()->reserve(nworkers);
 724     if (UseShenandoahOWST) {
 725       ShenandoahTaskTerminator terminator(nworkers, cm->task_queues());
 726       ShenandoahRefProcTaskProxy proc_task_proxy(task, &terminator);
 727       _workers->run_task(&proc_task_proxy);
 728     } else {
 729       ParallelTaskTerminator terminator(nworkers, cm->task_queues());
 730       ShenandoahRefProcTaskProxy proc_task_proxy(task, &terminator);
 731       _workers->run_task(&proc_task_proxy);
 732     }
 733   }
 734 
 735   void execute(EnqueueTask& task) {
 736     ShenandoahRefEnqueueTaskProxy enqueue_task_proxy(task);
 737     _workers->run_task(&enqueue_task_proxy);
 738   }
 739 };
 740 
 741 
 742 void ShenandoahConcurrentMark::weak_refs_work() {
 743   assert(process_references(), "sanity");
 744   ShenandoahHeap* sh = (ShenandoahHeap*) Universe::heap();
 745   ReferenceProcessor* rp = sh->ref_processor();
 746   WorkGang* workers = sh->workers();
 747   uint nworkers = workers->active_workers();
 748 
 749   // Setup collector policy for softref cleaning.
 750   bool clear_soft_refs = sh->collector_policy()->use_should_clear_all_soft_refs(true /* bogus arg*/);
 751   log_develop_debug(gc, ref)("clearing soft refs: %s", BOOL_TO_STR(clear_soft_refs));
 752   rp->setup_policy(clear_soft_refs);
 753   rp->set_active_mt_degree(nworkers);
 754 
 755   uint serial_worker_id = 0;
 756   ShenandoahForwardedIsAliveClosure is_alive;
 757 
 758   assert(task_queues()->is_empty(), "Should be empty");
 759 
 760   ParallelTaskTerminator terminator(1, task_queues());
 761   ShenandoahCMDrainMarkingStackClosure complete_gc(serial_worker_id, &terminator);
 762   ShenandoahRefProcTaskExecutor executor(workers);
 763 
 764   log_develop_trace(gc, ref)("start processing references");
 765 
 766   if (sh->need_update_refs()) {
 767     ShenandoahCMKeepAliveUpdateClosure keep_alive(get_queue(serial_worker_id));
 768     rp->process_discovered_references(&is_alive, &keep_alive,
 769                                       &complete_gc, &executor,
 770                                       NULL);
 771   } else {
 772     ShenandoahCMKeepAliveClosure keep_alive(get_queue(serial_worker_id));
 773     rp->process_discovered_references(&is_alive, &keep_alive,
 774                                       &complete_gc, &executor,
 775                                       NULL);
 776   }
 777 
 778   assert(task_queues()->is_empty(), "Should be empty");
 779 
 780   log_develop_trace(gc, ref)("finished processing references");
 781   log_develop_trace(gc, ref)("start enqueuing references");
 782 
 783   rp->enqueue_discovered_references(&executor);
 784 
 785   log_develop_trace(gc, ref)("finished enqueueing references");
 786 
 787   rp->verify_no_references_recorded();
 788   assert(!rp->discovery_enabled(), "Post condition");
 789 }
 790 
 791 void ShenandoahConcurrentMark::cancel() {
 792   ShenandoahHeap* sh = ShenandoahHeap::heap();
 793 
 794   // Cancel weak-ref discovery.
 795   if (process_references()) {
 796     ReferenceProcessor* rp = sh->ref_processor();
 797     rp->abandon_partial_discovery();
 798     rp->disable_discovery();
 799   }
 800 
 801   // Clean up marking stacks.
 802   SCMObjToScanQueueSet* queues = task_queues();
 803   queues->clear();
 804 
 805   // Cancel SATB buffers.
 806   JavaThread::satb_mark_queue_set().abandon_partial_marking();
 807 }
 808 
 809 SCMObjToScanQueue* ShenandoahConcurrentMark::get_queue(uint worker_id) {
 810   assert(task_queues()->get_reserved() > worker_id, "No reserved queue for worker id: %d", worker_id);
 811   return _task_queues->queue(worker_id);
 812 }
 813 
 814 void ShenandoahConcurrentMark::clear_queue(SCMObjToScanQueue *q) {
 815   q->set_empty();
 816   q->overflow_stack()->clear();
 817   q->clear_buffer();
 818 }
 819 
 820 template <bool CANCELLABLE, bool DRAIN_SATB, bool COUNT_LIVENESS, bool CLASS_UNLOAD, bool UPDATE_REFS>
 821 void ShenandoahConcurrentMark::mark_loop_prework(uint w, ParallelTaskTerminator *t, ReferenceProcessor *rp) {
 822   SCMObjToScanQueue* q = get_queue(w);
 823 
 824   jushort* ld;
 825   if (COUNT_LIVENESS) {
 826     ld = get_liveness(w);
 827     Copy::fill_to_bytes(ld, _heap->max_regions() * sizeof(jushort));
 828   } else {
 829     ld = NULL;
 830   }
 831 
 832   // TODO: We can clean up this if we figure out how to do templated oop closures that
 833   // play nice with specialized_oop_iterators.
 834   if (CLASS_UNLOAD) {
 835     if (UPDATE_REFS) {
 836       ShenandoahMarkUpdateRefsMetadataClosure cl(q, rp);
 837       mark_loop_work<ShenandoahMarkUpdateRefsMetadataClosure, CANCELLABLE, DRAIN_SATB, COUNT_LIVENESS>(&cl, ld, w, t);
 838     } else {
 839       ShenandoahMarkRefsMetadataClosure cl(q, rp);
 840       mark_loop_work<ShenandoahMarkRefsMetadataClosure, CANCELLABLE, DRAIN_SATB, COUNT_LIVENESS>(&cl, ld, w, t);
 841     }
 842   } else {
 843     if (UPDATE_REFS) {
 844       ShenandoahMarkUpdateRefsClosure cl(q, rp);
 845       mark_loop_work<ShenandoahMarkUpdateRefsClosure, CANCELLABLE, DRAIN_SATB, COUNT_LIVENESS>(&cl, ld, w, t);
 846     } else {
 847       ShenandoahMarkRefsClosure cl(q, rp);
 848       mark_loop_work<ShenandoahMarkRefsClosure, CANCELLABLE, DRAIN_SATB, COUNT_LIVENESS>(&cl, ld, w, t);
 849     }
 850   }
 851   if (COUNT_LIVENESS) {
 852     for (uint i = 0; i < _heap->max_regions(); i++) {
 853       ShenandoahHeapRegion *r = _heap->regions()->get(i);
 854       if (r != NULL) {
 855         jushort live = ld[i];
 856         if (live > 0) {
 857           r->increase_live_data_words(live);
 858         }
 859       }
 860     }
 861   }
 862 }
 863 
 864 template <class T, bool CANCELLABLE, bool DRAIN_SATB, bool COUNT_LIVENESS>
 865 void ShenandoahConcurrentMark::mark_loop_work(T* cl, jushort* live_data, uint worker_id, ParallelTaskTerminator *terminator) {
 866   int seed = 17;
 867   uint stride = CANCELLABLE ? ShenandoahMarkLoopStride : 1;
 868 
 869   ShenandoahHeap* heap = ShenandoahHeap::heap();
 870   SCMObjToScanQueueSet* queues = task_queues();
 871   SCMObjToScanQueue* q;
 872   SCMTask t;
 873 
 874   /*
 875    * Process outstanding queues, if any.
 876    *
 877    * There can be more queues than workers. To deal with the imbalance, we claim
 878    * extra queues first. Since marking can push new tasks into the queue associated
 879    * with this worker id, we come back to process this queue in the normal loop.
 880    */
 881   assert(queues->get_reserved() == heap->workers()->active_workers(),
 882     "Need to reserve proper number of queues");
 883 
 884   q = queues->claim_next();
 885   while (q != NULL) {
 886     if (CANCELLABLE && heap->cancelled_concgc()) {
 887       ShenandoahCancelledTerminatorTerminator tt;
 888       while (!terminator->offer_termination(&tt));
 889       return;
 890     }
 891 
 892     for (uint i = 0; i < stride; i++) {
 893       if (try_queue(q, t)) {
 894         do_task<T, COUNT_LIVENESS>(q, cl, live_data, &t);
 895       } else {
 896         assert(q->is_empty(), "Must be empty");
 897         q = queues->claim_next();
 898         break;
 899       }
 900     }
 901   }
 902 
 903   q = get_queue(worker_id);
 904 
 905   /*
 906    * Normal marking loop:
 907    */
 908   while (true) {
 909     if (CANCELLABLE && heap->cancelled_concgc()) {
 910       ShenandoahCancelledTerminatorTerminator tt;
 911       while (!terminator->offer_termination(&tt));
 912       return;
 913     }
 914 
 915     for (uint i = 0; i < stride; i++) {
 916       if (try_queue(q, t) ||
 917               (DRAIN_SATB && try_draining_satb_buffer(q, t)) ||
 918               queues->steal(worker_id, &seed, t)) {
 919         do_task<T, COUNT_LIVENESS>(q, cl, live_data, &t);
 920       } else {
 921         if (terminator->offer_termination()) return;
 922       }
 923     }
 924   }
 925 }
 926 
 927 void ShenandoahConcurrentMark::set_process_references(bool pr) {
 928   _process_references = pr;
 929 }
 930 
 931 bool ShenandoahConcurrentMark::process_references() const {
 932   return _process_references;
 933 }
 934 
 935 void ShenandoahConcurrentMark::set_unload_classes(bool uc) {
 936   _unload_classes = uc;
 937 }
 938 
 939 bool ShenandoahConcurrentMark::unload_classes() const {
 940   return _unload_classes;
 941 }
 942 
 943 bool ShenandoahConcurrentMark::claim_codecache() {
 944   assert(ShenandoahConcurrentCodeRoots, "must not be called otherwise");
 945   jbyte old = Atomic::cmpxchg(1, &_claimed_codecache, 0);
 946   return old == 0;
 947 }
 948 
 949 void ShenandoahConcurrentMark::clear_claim_codecache() {
 950   assert(ShenandoahConcurrentCodeRoots, "must not be called otherwise");
 951   _claimed_codecache = 0;
 952 }
 953 
 954 jushort* ShenandoahConcurrentMark::get_liveness(uint worker_id) {
 955   return _liveness_local[worker_id];
 956 }
 957 
 958 // Generate Shenandoah specialized oop_oop_iterate functions.
 959 SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_SHENANDOAH(ALL_KLASS_OOP_OOP_ITERATE_DEFN)