1 /*
   2  * Copyright (c) 2013, 2015, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "classfile/stringTable.hpp"
  25 #include "gc/shared/gcTimer.hpp"
  26 #include "gc/shared/isGCActiveMark.hpp"
  27 #include "gc/shared/parallelCleaning.hpp"
  28 #include "gc/shared/strongRootsScope.hpp"
  29 #include "gc/shenandoah/shenandoahBarrierSet.inline.hpp"
  30 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  31 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
  32 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  33 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  34 #include "gc/shenandoah/shenandoahRootProcessor.hpp"
  35 #include "gc/shenandoah/shenandoah_specialized_oop_closures.hpp"
  36 #include "gc/shenandoah/brooksPointer.hpp"
  37 #include "gc/shared/referenceProcessor.hpp"
  38 #include "gc/shenandoah/shenandoahTaskqueue.hpp"
  39 #include "code/codeCache.hpp"
  40 #include "classfile/symbolTable.hpp"
  41 #include "classfile/systemDictionary.hpp"
  42 #include "memory/iterator.inline.hpp"
  43 #include "oops/oop.inline.hpp"
  44 #include "gc/shared/taskqueue.inline.hpp"
  45 
  46 class ShenandoahInitMarkRootsClosure : public OopClosure {
  47 private:
  48   SCMObjToScanQueue* _queue;
  49   ShenandoahHeap* _heap;
  50 
  51   template <class T>
  52   inline void do_oop_nv(T* p) {
  53     ShenandoahConcurrentMark::mark_through_ref<T, RESOLVE, false>(p, _heap, _queue, NULL);
  54   }
  55 
  56 public:
  57   ShenandoahInitMarkRootsClosure(SCMObjToScanQueue* q) :
  58     _queue(q), _heap(ShenandoahHeap::heap()) {};
  59 
  60   void do_oop(narrowOop* p) { do_oop_nv(p); }
  61   void do_oop(oop* p)       { do_oop_nv(p); }
  62 };
  63 
  64 ShenandoahMarkRefsSuperClosure::ShenandoahMarkRefsSuperClosure(SCMObjToScanQueue* q, ReferenceProcessor* rp) :
  65   MetadataAwareOopClosure(rp),
  66   _queue(q),
  67   _heap(ShenandoahHeap::heap()),
  68   _conn_matrix(ShenandoahHeap::heap()->connection_matrix())
  69 {
  70 }
  71 
  72 class ShenandoahInitMarkRootsTask : public AbstractGangTask {
  73 private:
  74   ShenandoahRootProcessor* _rp;
  75   bool _process_refs;
  76 public:
  77   ShenandoahInitMarkRootsTask(ShenandoahRootProcessor* rp, bool process_refs) :
  78     AbstractGangTask("Shenandoah init mark roots task"),
  79     _rp(rp),
  80     _process_refs(process_refs) {
  81   }
  82 
  83   void work(uint worker_id) {
  84     assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
  85 
  86     ShenandoahHeap* heap = ShenandoahHeap::heap();
  87     SCMObjToScanQueueSet* queues = heap->concurrentMark()->task_queues();
  88     assert(queues->get_reserved() > worker_id, "Queue has not been reserved for worker id: %d", worker_id);
  89 
  90     SCMObjToScanQueue* q = queues->queue(worker_id);
  91     ShenandoahInitMarkRootsClosure mark_cl(q);
  92     CLDToOopClosure cldCl(&mark_cl);
  93     MarkingCodeBlobClosure blobsCl(&mark_cl, ! CodeBlobToOopClosure::FixRelocations);
  94 
  95     // The rationale for selecting the roots to scan is as follows:
  96     //   a. With unload_classes = true, we only want to scan the actual strong roots from the
  97     //      code cache. This will allow us to identify the dead classes, unload them, *and*
  98     //      invalidate the relevant code cache blobs. This could be only done together with
  99     //      class unloading.
 100     //   b. With unload_classes = false, we have to nominally retain all the references from code
 101     //      cache, because there could be the case of embedded class/oop in the generated code,
 102     //      which we will never visit during mark. Without code cache invalidation, as in (a),
 103     //      we risk executing that code cache blob, and crashing.
 104     //   c. With ShenandoahConcurrentCodeRoots, we avoid scanning the entire code cache here,
 105     //      and instead do that in concurrent phase under the relevant lock. This saves init mark
 106     //      pause time.
 107 
 108     ResourceMark m;
 109     if (heap->concurrentMark()->unload_classes()) {
 110       _rp->process_strong_roots(&mark_cl, _process_refs ? NULL : &mark_cl, &cldCl, &blobsCl, worker_id);
 111     } else {
 112       if (ShenandoahConcurrentCodeRoots) {
 113         CodeBlobClosure* code_blobs;
 114 #ifdef ASSERT
 115         AssertToSpaceClosure assert_to_space_oops;
 116         CodeBlobToOopClosure assert_to_space(&assert_to_space_oops,
 117                                              !CodeBlobToOopClosure::FixRelocations);
 118         code_blobs = &assert_to_space;
 119 #else
 120         code_blobs = NULL;
 121 #endif
 122         _rp->process_all_roots(&mark_cl, _process_refs ? NULL : &mark_cl, &cldCl, code_blobs, worker_id);
 123       } else {
 124         _rp->process_all_roots(&mark_cl, _process_refs ? NULL : &mark_cl, &cldCl, &blobsCl, worker_id);
 125       }
 126     }
 127   }
 128 };
 129 
 130 class ShenandoahUpdateRootsTask : public AbstractGangTask {
 131 private:
 132   ShenandoahRootProcessor* _rp;
 133 public:
 134   ShenandoahUpdateRootsTask(ShenandoahRootProcessor* rp) :
 135     AbstractGangTask("Shenandoah update roots task"),
 136     _rp(rp) {
 137   }
 138 
 139   void work(uint worker_id) {
 140     assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
 141 
 142     ShenandoahHeap* heap = ShenandoahHeap::heap();
 143     SCMUpdateRefsClosure cl;
 144     CLDToOopClosure cldCl(&cl);
 145 
 146     CodeBlobClosure* code_blobs;
 147 #ifdef ASSERT
 148     AssertToSpaceClosure assert_to_space_oops;
 149     CodeBlobToOopClosure assert_to_space(&assert_to_space_oops, !CodeBlobToOopClosure::FixRelocations);
 150     code_blobs = &assert_to_space;
 151 #else
 152     code_blobs = NULL;
 153 #endif
 154     _rp->process_all_roots(&cl, &cl, &cldCl, code_blobs, worker_id);
 155   }
 156 };
 157 
 158 class SCMConcurrentMarkingTask : public AbstractGangTask {
 159 private:
 160   ShenandoahConcurrentMark* _cm;
 161   ParallelTaskTerminator* _terminator;
 162   bool _update_refs;
 163 
 164 public:
 165   SCMConcurrentMarkingTask(ShenandoahConcurrentMark* cm, ParallelTaskTerminator* terminator, bool update_refs) :
 166     AbstractGangTask("Root Region Scan"), _cm(cm), _terminator(terminator), _update_refs(update_refs) {
 167   }
 168 
 169 
 170   void work(uint worker_id) {
 171     SCMObjToScanQueue* q = _cm->get_queue(worker_id);
 172     jushort* live_data = _cm->get_liveness(worker_id);
 173     ReferenceProcessor* rp;
 174     if (_cm->process_references()) {
 175       rp = ShenandoahHeap::heap()->ref_processor();
 176     } else {
 177       rp = NULL;
 178     }
 179     if (ShenandoahConcurrentCodeRoots && _cm->claim_codecache()) {
 180       if (! _cm->unload_classes()) {
 181         ShenandoahMarkResolveRefsClosure cl(q, rp);
 182         CodeBlobToOopClosure blobs(&cl, ! CodeBlobToOopClosure::FixRelocations);
 183         MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 184         CodeCache::blobs_do(&blobs);
 185       }
 186     }
 187 
 188     _cm->mark_loop(worker_id, _terminator, rp,
 189                    true, // cancellable
 190                    true, // drain SATBs as we go
 191                    true, // count liveness
 192                    _cm->unload_classes(),
 193                    _update_refs,
 194                    UseShenandoahMatrix);
 195   }
 196 };
 197 
 198 class SCMFinalMarkingTask : public AbstractGangTask {
 199 private:
 200   ShenandoahConcurrentMark* _cm;
 201   ParallelTaskTerminator* _terminator;
 202   bool _update_refs;
 203   bool _count_live;
 204   bool _unload_classes;
 205 
 206 public:
 207   SCMFinalMarkingTask(ShenandoahConcurrentMark* cm, ParallelTaskTerminator* terminator, bool update_refs, bool count_live, bool unload_classes) :
 208     AbstractGangTask("Shenandoah Final Marking"), _cm(cm), _terminator(terminator), _update_refs(update_refs), _count_live(count_live), _unload_classes(unload_classes) {
 209   }
 210 
 211   void work(uint worker_id) {
 212     // First drain remaining SATB buffers.
 213     // Notice that this is not strictly necessary for mark-compact. But since
 214     // it requires a StrongRootsScope around the task, we need to claim the
 215     // threads, and performance-wise it doesn't really matter. Adds about 1ms to
 216     // full-gc.
 217     _cm->drain_satb_buffers(worker_id, true);
 218 
 219     ReferenceProcessor* rp;
 220     if (_cm->process_references()) {
 221       rp = ShenandoahHeap::heap()->ref_processor();
 222     } else {
 223       rp = NULL;
 224     }
 225 
 226     _cm->mark_loop(worker_id, _terminator, rp,
 227                    false, // not cancellable
 228                    false, // do not drain SATBs, already drained
 229                    _count_live,
 230                    _unload_classes,
 231                    _update_refs,
 232                    UseShenandoahMatrix);
 233 
 234     assert(_cm->task_queues()->is_empty(), "Should be empty");
 235   }
 236 };
 237 
 238 void ShenandoahConcurrentMark::mark_roots() {
 239   assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
 240   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
 241 
 242   ShenandoahHeap* heap = ShenandoahHeap::heap();
 243 
 244   ClassLoaderDataGraph::clear_claimed_marks();
 245   WorkGang* workers = heap->workers();
 246   uint nworkers = workers->active_workers();
 247 
 248   assert(nworkers <= task_queues()->size(), "Just check");
 249 
 250   ShenandoahRootProcessor root_proc(heap, nworkers, ShenandoahCollectorPolicy::scan_thread_roots);
 251   TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
 252   task_queues()->reserve(nworkers);
 253 
 254   ShenandoahInitMarkRootsTask mark_roots(&root_proc, process_references());
 255   workers->run_task(&mark_roots);
 256   if (ShenandoahConcurrentCodeRoots) {
 257     clear_claim_codecache();
 258   }
 259 }
 260 
 261 void ShenandoahConcurrentMark::init_mark_roots() {
 262   assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
 263   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
 264 
 265   ShenandoahHeap* heap = ShenandoahHeap::heap();
 266 
 267   // Set up ref processing and class unloading.
 268   ShenandoahCollectorPolicy* policy = heap->shenandoahPolicy();
 269   set_process_references(policy->process_references());
 270   set_unload_classes(policy->unload_classes());
 271 
 272   mark_roots();
 273 }
 274 
 275 void ShenandoahConcurrentMark::update_roots() {
 276   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
 277   ShenandoahHeap* heap = ShenandoahHeap::heap();
 278 
 279   ClassLoaderDataGraph::clear_claimed_marks();
 280   uint nworkers = heap->workers()->active_workers();
 281 
 282   ShenandoahRootProcessor root_proc(heap, nworkers, ShenandoahCollectorPolicy::update_thread_roots);
 283   ShenandoahUpdateRootsTask update_roots(&root_proc);
 284   heap->workers()->run_task(&update_roots);
 285 }
 286 
 287 void ShenandoahConcurrentMark::final_update_roots() {
 288   assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
 289   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
 290 
 291   COMPILER2_PRESENT(DerivedPointerTable::clear());
 292 
 293   update_roots();
 294 
 295   COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
 296 }
 297 
 298 
 299 void ShenandoahConcurrentMark::initialize(uint workers) {
 300   _heap = ShenandoahHeap::heap();
 301 
 302   uint num_queues = MAX2(workers, 1U);
 303 
 304   _task_queues = new SCMObjToScanQueueSet((int) num_queues);
 305 
 306   for (uint i = 0; i < num_queues; ++i) {
 307     SCMObjToScanQueue* task_queue = new SCMObjToScanQueue();
 308     task_queue->initialize();
 309     _task_queues->register_queue(i, task_queue);
 310   }
 311   _process_references = false;
 312   _unload_classes = false;
 313   _claimed_codecache = 0;
 314 
 315   JavaThread::satb_mark_queue_set().set_buffer_size(ShenandoahSATBBufferSize);
 316 
 317   size_t max_regions = ShenandoahHeap::heap()->max_regions();
 318   _liveness_local = NEW_C_HEAP_ARRAY(jushort*, workers, mtGC);
 319   for (uint worker = 0; worker < workers; worker++) {
 320      _liveness_local[worker] = NEW_C_HEAP_ARRAY(jushort, max_regions, mtGC);
 321   }
 322 }
 323 
 324 void ShenandoahConcurrentMark::mark_from_roots() {
 325   ShenandoahHeap* sh = (ShenandoahHeap *) Universe::heap();
 326   WorkGang* workers = sh->workers();
 327   uint nworkers = workers->active_workers();
 328 
 329   bool update_refs = sh->need_update_refs();
 330 
 331   sh->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::conc_mark);
 332 
 333   if (process_references()) {
 334     ReferenceProcessor* rp = sh->ref_processor();
 335     rp->set_active_mt_degree(nworkers);
 336 
 337     // enable ("weak") refs discovery
 338     rp->enable_discovery(true /*verify_no_refs*/);
 339     rp->setup_policy(sh->is_full_gc_in_progress()); // snapshot the soft ref policy to be used in this cycle
 340   }
 341 
 342   task_queues()->reserve(nworkers);
 343 
 344   if (UseShenandoahOWST) {
 345     ShenandoahTaskTerminator terminator(nworkers, task_queues());
 346     SCMConcurrentMarkingTask markingTask = SCMConcurrentMarkingTask(this, &terminator, update_refs);
 347     workers->run_task(&markingTask);
 348   } else {
 349     ParallelTaskTerminator terminator(nworkers, task_queues());
 350     SCMConcurrentMarkingTask markingTask = SCMConcurrentMarkingTask(this, &terminator, update_refs);
 351     workers->run_task(&markingTask);
 352   }
 353 
 354   assert(task_queues()->is_empty() || sh->cancelled_concgc(), "Should be empty when not cancelled");
 355   if (! sh->cancelled_concgc()) {
 356     TASKQUEUE_STATS_ONLY(print_taskqueue_stats());
 357   }
 358 
 359   TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
 360 
 361   sh->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::conc_mark);
 362 }
 363 
 364 void ShenandoahConcurrentMark::finish_mark_from_roots() {
 365   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
 366 
 367   IsGCActiveMark is_active;
 368 
 369   ShenandoahHeap* sh = (ShenandoahHeap *) Universe::heap();
 370 
 371   TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
 372 
 373   shared_finish_mark_from_roots(/* full_gc = */ false);
 374 
 375   sh->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::update_roots);
 376   if (sh->need_update_refs()) {
 377     final_update_roots();
 378   }
 379   sh->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::update_roots);
 380 
 381   TASKQUEUE_STATS_ONLY(print_taskqueue_stats());
 382 
 383 #ifdef ASSERT
 384   verify_roots();
 385 
 386   if (ShenandoahDumpHeapAfterConcurrentMark) {
 387     sh->ensure_parsability(false);
 388     sh->print_all_refs("post-mark");
 389   }
 390 #endif
 391 }
 392 
 393 class ResetRecentlyAllocated : public ShenandoahHeapRegionClosure {
 394 public:
 395   bool doHeapRegion(ShenandoahHeapRegion* r) {
 396     ShenandoahHeap* sh = ShenandoahHeap::heap();
 397     r->set_recently_allocated(false);
 398     return false;
 399   }
 400 };
 401 
 402 void ShenandoahConcurrentMark::shared_finish_mark_from_roots(bool full_gc) {
 403   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
 404 
 405   ShenandoahHeap* sh = ShenandoahHeap::heap();
 406   ShenandoahCollectorPolicy* policy = sh->shenandoahPolicy();
 407 
 408   uint nworkers = sh->workers()->active_workers();
 409 
 410   // Finally mark everything else we've got in our queues during the previous steps.
 411   // It does two different things for concurrent vs. mark-compact GC:
 412   // - For concurrent GC, it starts with empty task queues, drains the remaining
 413   //   SATB buffers, and then completes the marking closure.
 414   // - For mark-compact GC, it starts out with the task queues seeded by initial
 415   //   root scan, and completes the closure, thus marking through all live objects
 416   // The implementation is the same, so it's shared here.
 417   {
 418     policy->record_phase_start(full_gc ?
 419                                ShenandoahCollectorPolicy::full_gc_mark_drain_queues :
 420                                ShenandoahCollectorPolicy::drain_satb);
 421     bool count_live = !(ShenandoahNoLivenessFullGC && full_gc); // we do not need liveness data for full GC
 422     task_queues()->reserve(nworkers);
 423 
 424     StrongRootsScope scope(nworkers);
 425     if (UseShenandoahOWST) {
 426       ShenandoahTaskTerminator terminator(nworkers, task_queues());
 427       SCMFinalMarkingTask task(this, &terminator, sh->need_update_refs(), count_live, unload_classes());
 428       sh->workers()->run_task(&task);
 429     } else {
 430       ParallelTaskTerminator terminator(nworkers, task_queues());
 431       SCMFinalMarkingTask task(this, &terminator, sh->need_update_refs(), count_live, unload_classes());
 432       sh->workers()->run_task(&task);
 433     }
 434     policy->record_phase_end(full_gc ?
 435                              ShenandoahCollectorPolicy::full_gc_mark_drain_queues :
 436                              ShenandoahCollectorPolicy::drain_satb);
 437   }
 438 
 439   assert(task_queues()->is_empty(), "Should be empty");
 440 
 441   // When we're done marking everything, we process weak references.
 442   policy->record_phase_start(full_gc ?
 443                              ShenandoahCollectorPolicy::full_gc_mark_weakrefs :
 444                              ShenandoahCollectorPolicy::weakrefs);
 445   if (process_references()) {
 446     weak_refs_work();
 447   }
 448   policy->record_phase_end(full_gc ?
 449                            ShenandoahCollectorPolicy::full_gc_mark_weakrefs :
 450                            ShenandoahCollectorPolicy::weakrefs);
 451 
 452   // And finally finish class unloading
 453   policy->record_phase_start(full_gc ?
 454                              ShenandoahCollectorPolicy::full_gc_mark_class_unloading :
 455                              ShenandoahCollectorPolicy::class_unloading);
 456   if (unload_classes()) {
 457     ShenandoahForwardedIsAliveClosure is_alive;
 458     // Unload classes and purge SystemDictionary.
 459     bool purged_class = SystemDictionary::do_unloading(&is_alive, false);
 460     ParallelCleaningTask unlink_task(&is_alive, true, true, nworkers, purged_class);
 461     sh->workers()->run_task(&unlink_task);
 462     ClassLoaderDataGraph::purge();
 463   }
 464 
 465   // Mark finished. All recently allocated regions are not recent anymore.
 466   {
 467     ResetRecentlyAllocated cl;
 468     sh->heap_region_iterate(&cl);
 469   }
 470 
 471   policy->record_phase_end(full_gc ?
 472                            ShenandoahCollectorPolicy::full_gc_mark_class_unloading :
 473                            ShenandoahCollectorPolicy::class_unloading);
 474 
 475   assert(task_queues()->is_empty(), "Should be empty");
 476 
 477 }
 478 
 479 #ifdef ASSERT
 480 template <class T>
 481 void ShenandoahVerifyRootsClosure1::do_oop_work(T* p) {
 482   ShenandoahHeap* heap = ShenandoahHeap::heap();
 483   T o = oopDesc::load_heap_oop(p);
 484   if (! oopDesc::is_null(o)) {
 485     oop obj = oopDesc::decode_heap_oop_not_null(o);
 486     if (! oopDesc::unsafe_equals(obj, ShenandoahBarrierSet::resolve_oop_static_not_null(obj))) {
 487       tty->print_cr("from-space marked: %s, to-space marked: %s, unload_classes: %s",
 488                     BOOL_TO_STR(heap->is_marked_next(obj)),
 489                     BOOL_TO_STR(heap->is_marked_next(ShenandoahBarrierSet::resolve_oop_static_not_null(obj))),
 490                     BOOL_TO_STR(heap->concurrentMark()->unload_classes()));
 491     }
 492     guarantee(oopDesc::unsafe_equals(obj, ShenandoahBarrierSet::resolve_oop_static_not_null(obj)), "oop must not be forwarded");
 493     guarantee(heap->is_marked_next(obj), "oop must be marked");
 494   }
 495 }
 496 
 497 void ShenandoahVerifyRootsClosure1::do_oop(oop* p) {
 498   do_oop_work(p);
 499 }
 500 
 501 void ShenandoahVerifyRootsClosure1::do_oop(narrowOop* p) {
 502   do_oop_work(p);
 503 }
 504 
 505 void ShenandoahConcurrentMark::verify_roots() {
 506   ShenandoahVerifyRootsClosure1 cl;
 507   CodeBlobToOopClosure blobsCl(&cl, false);
 508   CLDToOopClosure cldCl(&cl);
 509   ClassLoaderDataGraph::clear_claimed_marks();
 510   ShenandoahRootProcessor rp(ShenandoahHeap::heap(), 1);
 511   rp.process_all_roots(&cl, &cl, &cldCl, &blobsCl, 0);
 512 
 513 }
 514 #endif
 515 
 516 class ShenandoahSATBThreadsClosure : public ThreadClosure {
 517   ShenandoahSATBBufferClosure* _satb_cl;
 518   int _thread_parity;
 519 
 520  public:
 521   ShenandoahSATBThreadsClosure(ShenandoahSATBBufferClosure* satb_cl) :
 522     _satb_cl(satb_cl),
 523     _thread_parity(Threads::thread_claim_parity()) {}
 524 
 525   void do_thread(Thread* thread) {
 526     if (thread->is_Java_thread()) {
 527       if (thread->claim_oops_do(true, _thread_parity)) {
 528         JavaThread* jt = (JavaThread*)thread;
 529         jt->satb_mark_queue().apply_closure_and_empty(_satb_cl);
 530       }
 531     } else if (thread->is_VM_thread()) {
 532       if (thread->claim_oops_do(true, _thread_parity)) {
 533         JavaThread::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(_satb_cl);
 534       }
 535     }
 536   }
 537 };
 538 
 539 void ShenandoahConcurrentMark::drain_satb_buffers(uint worker_id, bool remark) {
 540   SCMObjToScanQueue* q = get_queue(worker_id);
 541   ShenandoahSATBBufferClosure cl(q);
 542 
 543   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
 544   while (satb_mq_set.apply_closure_to_completed_buffer(&cl));
 545 
 546   if (remark) {
 547     ShenandoahSATBThreadsClosure tc(&cl);
 548     Threads::threads_do(&tc);
 549   }
 550 }
 551 
 552 #if TASKQUEUE_STATS
 553 void ShenandoahConcurrentMark::print_taskqueue_stats_hdr(outputStream* const st) {
 554   st->print_raw_cr("GC Task Stats");
 555   st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr();
 556   st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr();
 557 }
 558 
 559 void ShenandoahConcurrentMark::print_taskqueue_stats() const {
 560   if (!log_develop_is_enabled(Trace, gc, task, stats)) {
 561     return;
 562   }
 563   Log(gc, task, stats) log;
 564   ResourceMark rm;
 565   outputStream* st = log.trace_stream();
 566   print_taskqueue_stats_hdr(st);
 567 
 568   ShenandoahHeap* sh = (ShenandoahHeap*) Universe::heap();
 569   TaskQueueStats totals;
 570   const int n = _task_queues->size();
 571   for (int i = 0; i < n; ++i) {
 572     st->print(INT32_FORMAT_W(3), i);
 573     _task_queues->queue(i)->stats.print(st);
 574     st->cr();
 575     totals += _task_queues->queue(i)->stats;
 576   }
 577   st->print("tot "); totals.print(st); st->cr();
 578   DEBUG_ONLY(totals.verify());
 579 
 580 }
 581 
 582 void ShenandoahConcurrentMark::reset_taskqueue_stats() {
 583   ShenandoahHeap* sh = (ShenandoahHeap*) Universe::heap();
 584   const int n = task_queues()->size();
 585   for (int i = 0; i < n; ++i) {
 586     task_queues()->queue(i)->stats.reset();
 587   }
 588 }
 589 #endif // TASKQUEUE_STATS
 590 
 591 // Weak Reference Closures
 592 class ShenandoahCMDrainMarkingStackClosure: public VoidClosure {
 593   uint _worker_id;
 594   ParallelTaskTerminator* _terminator;
 595 
 596 public:
 597   ShenandoahCMDrainMarkingStackClosure(uint worker_id, ParallelTaskTerminator* t):
 598     _worker_id(worker_id),
 599     _terminator(t) {
 600   }
 601 
 602 
 603   void do_void() {
 604     assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
 605 
 606     ShenandoahHeap* sh = ShenandoahHeap::heap();
 607     ShenandoahConcurrentMark* scm = sh->concurrentMark();
 608     ReferenceProcessor* rp;
 609     if (scm->process_references()) {
 610       rp = ShenandoahHeap::heap()->ref_processor();
 611     } else {
 612       rp = NULL;
 613     }
 614 
 615     scm->mark_loop(_worker_id, _terminator, rp,
 616                    false, // not cancellable
 617                    false, // do not drain SATBs
 618                    true,  // count liveness
 619                    scm->unload_classes(),
 620                    sh->need_update_refs(),
 621                    UseShenandoahMatrix);
 622   }
 623 };
 624 
 625 
 626 class ShenandoahCMKeepAliveClosure : public OopClosure {
 627 private:
 628   SCMObjToScanQueue* _queue;
 629   ShenandoahHeap* _heap;
 630 
 631   template <class T>
 632   inline void do_oop_nv(T* p) {
 633     ShenandoahConcurrentMark::mark_through_ref<T, NONE, false>(p, _heap, _queue, NULL);
 634   }
 635 
 636 public:
 637   ShenandoahCMKeepAliveClosure(SCMObjToScanQueue* q) :
 638     _queue(q), _heap(ShenandoahHeap::heap()) {}
 639 
 640   void do_oop(narrowOop* p) { do_oop_nv(p); }
 641   void do_oop(oop* p)       { do_oop_nv(p); }
 642 };
 643 
 644 class ShenandoahCMKeepAliveUpdateClosure : public OopClosure {
 645 private:
 646   SCMObjToScanQueue* _queue;
 647   ShenandoahHeap* _heap;
 648 
 649   template <class T>
 650   inline void do_oop_nv(T* p) {
 651     ShenandoahConcurrentMark::mark_through_ref<T, SIMPLE, false>(p, _heap, _queue, NULL);
 652   }
 653 
 654 public:
 655   ShenandoahCMKeepAliveUpdateClosure(SCMObjToScanQueue* q) :
 656     _queue(q), _heap(ShenandoahHeap::heap()) {}
 657 
 658   void do_oop(narrowOop* p) { do_oop_nv(p); }
 659   void do_oop(oop* p)       { do_oop_nv(p); }
 660 };
 661 
 662 class ShenandoahCMKeepAliveMatrixClosure : public OopClosure {
 663 private:
 664   SCMObjToScanQueue* _queue;
 665   ShenandoahHeap* _heap;
 666   ShenandoahConnectionMatrix* _conn_matrix;
 667 
 668   template <class T>
 669   inline void do_oop_nv(T* p) {
 670     ShenandoahConcurrentMark::mark_through_ref<T, NONE, true>(p, _heap, _queue, _conn_matrix);
 671   }
 672 
 673 public:
 674   ShenandoahCMKeepAliveMatrixClosure(SCMObjToScanQueue* q) :
 675     _queue(q), _heap(ShenandoahHeap::heap()),
 676     _conn_matrix(ShenandoahHeap::heap()->connection_matrix()) {};
 677 
 678   void do_oop(narrowOop* p) { do_oop_nv(p); }
 679   void do_oop(oop* p)       { do_oop_nv(p); }
 680 };
 681 
 682 class ShenandoahCMKeepAliveUpdateMatrixClosure : public OopClosure {
 683 private:
 684   SCMObjToScanQueue* _queue;
 685   ShenandoahHeap* _heap;
 686   ShenandoahConnectionMatrix* _conn_matrix;
 687 
 688   template <class T>
 689   inline void do_oop_nv(T* p) {
 690     ShenandoahConcurrentMark::mark_through_ref<T, SIMPLE, true>(p, _heap, _queue, _conn_matrix);
 691   }
 692 
 693 public:
 694   ShenandoahCMKeepAliveUpdateMatrixClosure(SCMObjToScanQueue* q) :
 695     _queue(q), _heap(ShenandoahHeap::heap()),
 696     _conn_matrix(ShenandoahHeap::heap()->connection_matrix()) {};
 697 
 698   void do_oop(narrowOop* p) { do_oop_nv(p); }
 699   void do_oop(oop* p)       { do_oop_nv(p); }
 700 };
 701 
 702 class ShenandoahRefProcTaskProxy : public AbstractGangTask {
 703 
 704 private:
 705   AbstractRefProcTaskExecutor::ProcessTask& _proc_task;
 706   ParallelTaskTerminator* _terminator;
 707 public:
 708 
 709   ShenandoahRefProcTaskProxy(AbstractRefProcTaskExecutor::ProcessTask& proc_task,
 710                              ParallelTaskTerminator* t) :
 711     AbstractGangTask("Process reference objects in parallel"),
 712     _proc_task(proc_task),
 713     _terminator(t) {
 714   }
 715 
 716   void work(uint worker_id) {
 717     assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
 718     ShenandoahHeap* heap = ShenandoahHeap::heap();
 719     ShenandoahForwardedIsAliveClosure is_alive;
 720     ShenandoahCMDrainMarkingStackClosure complete_gc(worker_id, _terminator);
 721     if (UseShenandoahMatrix) {
 722       if (heap->need_update_refs()) {
 723         ShenandoahCMKeepAliveUpdateMatrixClosure keep_alive(heap->concurrentMark()->get_queue(worker_id));
 724         _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
 725       } else {
 726         ShenandoahCMKeepAliveMatrixClosure keep_alive(heap->concurrentMark()->get_queue(worker_id));
 727         _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
 728       }
 729     } else {
 730       if (heap->need_update_refs()) {
 731         ShenandoahCMKeepAliveUpdateClosure keep_alive(heap->concurrentMark()->get_queue(worker_id));
 732         _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
 733       } else {
 734         ShenandoahCMKeepAliveClosure keep_alive(heap->concurrentMark()->get_queue(worker_id));
 735         _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
 736       }
 737     }
 738   }
 739 };
 740 
 741 class ShenandoahRefEnqueueTaskProxy : public AbstractGangTask {
 742 
 743 private:
 744   AbstractRefProcTaskExecutor::EnqueueTask& _enqueue_task;
 745 
 746 public:
 747 
 748   ShenandoahRefEnqueueTaskProxy(AbstractRefProcTaskExecutor::EnqueueTask& enqueue_task) :
 749     AbstractGangTask("Enqueue reference objects in parallel"),
 750     _enqueue_task(enqueue_task) {
 751   }
 752 
 753   void work(uint worker_id) {
 754     _enqueue_task.work(worker_id);
 755   }
 756 };
 757 
 758 class ShenandoahRefProcTaskExecutor : public AbstractRefProcTaskExecutor {
 759 
 760 private:
 761   WorkGang* _workers;
 762 
 763 public:
 764 
 765   ShenandoahRefProcTaskExecutor(WorkGang* workers) :
 766     _workers(workers) {
 767   }
 768 
 769   // Executes a task using worker threads.
 770   void execute(ProcessTask& task) {
 771     assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
 772 
 773     ShenandoahHeap* heap = ShenandoahHeap::heap();
 774     ShenandoahConcurrentMark* cm = heap->concurrentMark();
 775     uint nworkers = _workers->active_workers();
 776     cm->task_queues()->reserve(nworkers);
 777     if (UseShenandoahOWST) {
 778       ShenandoahTaskTerminator terminator(nworkers, cm->task_queues());
 779       ShenandoahRefProcTaskProxy proc_task_proxy(task, &terminator);
 780       _workers->run_task(&proc_task_proxy);
 781     } else {
 782       ParallelTaskTerminator terminator(nworkers, cm->task_queues());
 783       ShenandoahRefProcTaskProxy proc_task_proxy(task, &terminator);
 784       _workers->run_task(&proc_task_proxy);
 785     }
 786   }
 787 
 788   void execute(EnqueueTask& task) {
 789     ShenandoahRefEnqueueTaskProxy enqueue_task_proxy(task);
 790     _workers->run_task(&enqueue_task_proxy);
 791   }
 792 };
 793 
 794 
 795 void ShenandoahConcurrentMark::weak_refs_work() {
 796   assert(process_references(), "sanity");
 797   ShenandoahHeap* sh = (ShenandoahHeap*) Universe::heap();
 798   ReferenceProcessor* rp = sh->ref_processor();
 799   WorkGang* workers = sh->workers();
 800   uint nworkers = workers->active_workers();
 801 
 802   // Setup collector policy for softref cleaning.
 803   bool clear_soft_refs = sh->collector_policy()->use_should_clear_all_soft_refs(true /* bogus arg*/);
 804   log_develop_debug(gc, ref)("clearing soft refs: %s", BOOL_TO_STR(clear_soft_refs));
 805   rp->setup_policy(clear_soft_refs);
 806   rp->set_active_mt_degree(nworkers);
 807 
 808   uint serial_worker_id = 0;
 809   ShenandoahForwardedIsAliveClosure is_alive;
 810 
 811   assert(task_queues()->is_empty(), "Should be empty");
 812 
 813   ParallelTaskTerminator terminator(1, task_queues());
 814   ShenandoahCMDrainMarkingStackClosure complete_gc(serial_worker_id, &terminator);
 815   ShenandoahRefProcTaskExecutor executor(workers);
 816 
 817   log_develop_trace(gc, ref)("start processing references");
 818 
 819   if (sh->need_update_refs()) {
 820     ShenandoahCMKeepAliveUpdateClosure keep_alive(get_queue(serial_worker_id));
 821     rp->process_discovered_references(&is_alive, &keep_alive,
 822                                       &complete_gc, &executor,
 823                                       NULL);
 824   } else {
 825     ShenandoahCMKeepAliveClosure keep_alive(get_queue(serial_worker_id));
 826     rp->process_discovered_references(&is_alive, &keep_alive,
 827                                       &complete_gc, &executor,
 828                                       NULL);
 829   }
 830 
 831   assert(task_queues()->is_empty(), "Should be empty");
 832 
 833   log_develop_trace(gc, ref)("finished processing references");
 834   log_develop_trace(gc, ref)("start enqueuing references");
 835 
 836   rp->enqueue_discovered_references(&executor);
 837 
 838   log_develop_trace(gc, ref)("finished enqueueing references");
 839 
 840   rp->verify_no_references_recorded();
 841   assert(!rp->discovery_enabled(), "Post condition");
 842 }
 843 
 844 void ShenandoahConcurrentMark::cancel() {
 845   ShenandoahHeap* sh = ShenandoahHeap::heap();
 846 
 847   // Cancel weak-ref discovery.
 848   if (process_references()) {
 849     ReferenceProcessor* rp = sh->ref_processor();
 850     rp->abandon_partial_discovery();
 851     rp->disable_discovery();
 852   }
 853 
 854   // Clean up marking stacks.
 855   SCMObjToScanQueueSet* queues = task_queues();
 856   queues->clear();
 857 
 858   // Cancel SATB buffers.
 859   JavaThread::satb_mark_queue_set().abandon_partial_marking();
 860 }
 861 
 862 SCMObjToScanQueue* ShenandoahConcurrentMark::get_queue(uint worker_id) {
 863   assert(task_queues()->get_reserved() > worker_id, "No reserved queue for worker id: %d", worker_id);
 864   return _task_queues->queue(worker_id);
 865 }
 866 
 867 void ShenandoahConcurrentMark::clear_queue(SCMObjToScanQueue *q) {
 868   q->set_empty();
 869   q->overflow_stack()->clear();
 870   q->clear_buffer();
 871 }
 872 
 873 template <bool CANCELLABLE, bool DRAIN_SATB, bool COUNT_LIVENESS, bool CLASS_UNLOAD, bool UPDATE_REFS, bool UPDATE_MATRIX>
 874 void ShenandoahConcurrentMark::mark_loop_prework(uint w, ParallelTaskTerminator *t, ReferenceProcessor *rp) {
 875   SCMObjToScanQueue* q = get_queue(w);
 876 
 877   jushort* ld;
 878   if (COUNT_LIVENESS) {
 879     ld = get_liveness(w);
 880     Copy::fill_to_bytes(ld, _heap->max_regions() * sizeof(jushort));
 881   } else {
 882     ld = NULL;
 883   }
 884 
 885   // TODO: We can clean up this if we figure out how to do templated oop closures that
 886   // play nice with specialized_oop_iterators.
 887   if (UPDATE_MATRIX) {
 888     if (CLASS_UNLOAD) {
 889       if (UPDATE_REFS) {
 890         ShenandoahMarkUpdateRefsMetadataMatrixClosure cl(q, rp);
 891         mark_loop_work<ShenandoahMarkUpdateRefsMetadataMatrixClosure, CANCELLABLE, DRAIN_SATB, COUNT_LIVENESS>(&cl, ld, w, t);
 892       } else {
 893         ShenandoahMarkRefsMetadataMatrixClosure cl(q, rp);
 894         mark_loop_work<ShenandoahMarkRefsMetadataMatrixClosure, CANCELLABLE, DRAIN_SATB, COUNT_LIVENESS>(&cl, ld, w, t);
 895       }
 896     } else {
 897       if (UPDATE_REFS) {
 898         ShenandoahMarkUpdateRefsMatrixClosure cl(q, rp);
 899         mark_loop_work<ShenandoahMarkUpdateRefsMatrixClosure, CANCELLABLE, DRAIN_SATB, COUNT_LIVENESS>(&cl, ld, w, t);
 900       } else {
 901         ShenandoahMarkRefsMatrixClosure cl(q, rp);
 902         mark_loop_work<ShenandoahMarkRefsMatrixClosure, CANCELLABLE, DRAIN_SATB, COUNT_LIVENESS>(&cl, ld, w, t);
 903       }
 904     }
 905   } else {
 906     if (CLASS_UNLOAD) {
 907       if (UPDATE_REFS) {
 908         ShenandoahMarkUpdateRefsMetadataClosure cl(q, rp);
 909         mark_loop_work<ShenandoahMarkUpdateRefsMetadataClosure, CANCELLABLE, DRAIN_SATB, COUNT_LIVENESS>(&cl, ld, w, t);
 910       } else {
 911         ShenandoahMarkRefsMetadataClosure cl(q, rp);
 912         mark_loop_work<ShenandoahMarkRefsMetadataClosure, CANCELLABLE, DRAIN_SATB, COUNT_LIVENESS>(&cl, ld, w, t);
 913       }
 914     } else {
 915       if (UPDATE_REFS) {
 916         ShenandoahMarkUpdateRefsClosure cl(q, rp);
 917         mark_loop_work<ShenandoahMarkUpdateRefsClosure, CANCELLABLE, DRAIN_SATB, COUNT_LIVENESS>(&cl, ld, w, t);
 918       } else {
 919         ShenandoahMarkRefsClosure cl(q, rp);
 920         mark_loop_work<ShenandoahMarkRefsClosure, CANCELLABLE, DRAIN_SATB, COUNT_LIVENESS>(&cl, ld, w, t);
 921       }
 922     }
 923   }
 924   if (COUNT_LIVENESS) {
 925     for (uint i = 0; i < _heap->max_regions(); i++) {
 926       ShenandoahHeapRegion *r = _heap->regions()->get(i);
 927       if (r != NULL) {
 928         jushort live = ld[i];
 929         if (live > 0) {
 930           r->increase_live_data_words(live);
 931         }
 932       }
 933     }
 934   }
 935 }
 936 
 937 template <class T, bool CANCELLABLE, bool DRAIN_SATB, bool COUNT_LIVENESS>
 938 void ShenandoahConcurrentMark::mark_loop_work(T* cl, jushort* live_data, uint worker_id, ParallelTaskTerminator *terminator) {
 939   int seed = 17;
 940   uint stride = CANCELLABLE ? ShenandoahMarkLoopStride : 1;
 941 
 942   ShenandoahHeap* heap = ShenandoahHeap::heap();
 943   SCMObjToScanQueueSet* queues = task_queues();
 944   SCMObjToScanQueue* q;
 945   SCMTask t;
 946 
 947   /*
 948    * Process outstanding queues, if any.
 949    *
 950    * There can be more queues than workers. To deal with the imbalance, we claim
 951    * extra queues first. Since marking can push new tasks into the queue associated
 952    * with this worker id, we come back to process this queue in the normal loop.
 953    */
 954   assert(queues->get_reserved() == heap->workers()->active_workers(),
 955     "Need to reserve proper number of queues");
 956 
 957   q = queues->claim_next();
 958   while (q != NULL) {
 959     if (CANCELLABLE && heap->cancelled_concgc()) {
 960       ShenandoahCancelledTerminatorTerminator tt;
 961       while (!terminator->offer_termination(&tt));
 962       return;
 963     }
 964 
 965     for (uint i = 0; i < stride; i++) {
 966       if (try_queue(q, t)) {
 967         do_task<T, COUNT_LIVENESS>(q, cl, live_data, &t);
 968       } else {
 969         assert(q->is_empty(), "Must be empty");
 970         q = queues->claim_next();
 971         break;
 972       }
 973     }
 974   }
 975 
 976   q = get_queue(worker_id);
 977 
 978   /*
 979    * Normal marking loop:
 980    */
 981   while (true) {
 982     if (CANCELLABLE && heap->cancelled_concgc()) {
 983       ShenandoahCancelledTerminatorTerminator tt;
 984       while (!terminator->offer_termination(&tt));
 985       return;
 986     }
 987 
 988     for (uint i = 0; i < stride; i++) {
 989       if (try_queue(q, t) ||
 990               (DRAIN_SATB && try_draining_satb_buffer(q, t)) ||
 991               queues->steal(worker_id, &seed, t)) {
 992         do_task<T, COUNT_LIVENESS>(q, cl, live_data, &t);
 993       } else {
 994         if (terminator->offer_termination()) return;
 995       }
 996     }
 997   }
 998 }
 999 
1000 void ShenandoahConcurrentMark::set_process_references(bool pr) {
1001   _process_references = pr;
1002 }
1003 
1004 bool ShenandoahConcurrentMark::process_references() const {
1005   return _process_references;
1006 }
1007 
1008 void ShenandoahConcurrentMark::set_unload_classes(bool uc) {
1009   _unload_classes = uc;
1010 }
1011 
1012 bool ShenandoahConcurrentMark::unload_classes() const {
1013   return _unload_classes;
1014 }
1015 
1016 bool ShenandoahConcurrentMark::claim_codecache() {
1017   assert(ShenandoahConcurrentCodeRoots, "must not be called otherwise");
1018   jbyte old = Atomic::cmpxchg(1, &_claimed_codecache, 0);
1019   return old == 0;
1020 }
1021 
1022 void ShenandoahConcurrentMark::clear_claim_codecache() {
1023   assert(ShenandoahConcurrentCodeRoots, "must not be called otherwise");
1024   _claimed_codecache = 0;
1025 }
1026 
1027 jushort* ShenandoahConcurrentMark::get_liveness(uint worker_id) {
1028   return _liveness_local[worker_id];
1029 }
1030 
1031 // Generate Shenandoah specialized oop_oop_iterate functions.
1032 SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_SHENANDOAH(ALL_KLASS_OOP_OOP_ITERATE_DEFN)