1 /*
   2  * Copyright (c) 2013, 2015, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "classfile/stringTable.hpp"
  25 #include "gc/shared/gcTimer.hpp"
  26 #include "gc/shared/isGCActiveMark.hpp"
  27 #include "gc/shared/parallelCleaning.hpp"
  28 #include "gc/shared/strongRootsScope.hpp"
  29 #include "gc/shenandoah/shenandoahBarrierSet.inline.hpp"
  30 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  31 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
  32 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  33 #include "gc/shenandoah/shenandoahRootProcessor.hpp"
  34 #include "gc/shenandoah/shenandoah_specialized_oop_closures.hpp"
  35 #include "gc/shenandoah/brooksPointer.hpp"
  36 #include "gc/shared/referenceProcessor.hpp"
  37 #include "gc/shenandoah/shenandoahTaskqueue.hpp"
  38 #include "code/codeCache.hpp"
  39 #include "classfile/symbolTable.hpp"
  40 #include "classfile/systemDictionary.hpp"
  41 #include "memory/iterator.inline.hpp"
  42 #include "oops/oop.inline.hpp"
  43 #include "gc/shared/taskqueue.inline.hpp"
  44 
  45 #ifdef ASSERT
  46 class AssertToSpaceClosure : public OopClosure {
  47 private:
  48   template <class T>
  49   inline void do_oop_nv(T* p) {
  50     T o = oopDesc::load_heap_oop(p);
  51     if (! oopDesc::is_null(o)) {
  52       oop obj = oopDesc::decode_heap_oop_not_null(o);
  53       assert(oopDesc::unsafe_equals(obj, ShenandoahBarrierSet::resolve_oop_static_not_null(obj)), "need to-space object here");
  54     }
  55   }
  56 public:
  57   void do_oop(narrowOop* p) { do_oop_nv(p); }
  58   void do_oop(oop* p)       { do_oop_nv(p); }
  59 };
  60 #endif
  61 
  62 class ShenandoahInitMarkRootsClosure : public OopClosure {
  63 private:
  64   SCMObjToScanQueue* _queue;
  65   ShenandoahHeap* _heap;
  66 
  67   template <class T>
  68   inline void do_oop_nv(T* p) {
  69     ShenandoahConcurrentMark::mark_through_ref<T, RESOLVE>(p, _heap, _queue);
  70   }
  71 
  72 public:
  73   ShenandoahInitMarkRootsClosure(SCMObjToScanQueue* q) :
  74     _queue(q), _heap(ShenandoahHeap::heap()) {};
  75 
  76   void do_oop(narrowOop* p) { do_oop_nv(p); }
  77   void do_oop(oop* p)       { do_oop_nv(p); }
  78 };
  79 
  80 class SCMUpdateRefsClosure: public OopClosure {
  81 private:
  82   ShenandoahHeap* _heap;
  83 public:
  84 
  85   SCMUpdateRefsClosure() : _heap(ShenandoahHeap::heap()) {
  86   }
  87 
  88 private:
  89   template <class T>
  90   inline void do_oop_work(T* p) {
  91     T o = oopDesc::load_heap_oop(p);
  92     if (! oopDesc::is_null(o)) {
  93       oop obj = oopDesc::decode_heap_oop_not_null(o);
  94       _heap->update_oop_ref_not_null(p, obj);
  95     }
  96   }
  97 
  98 public:
  99   inline void do_oop(oop* p) {
 100     do_oop_work(p);
 101   }
 102 
 103   void do_oop(narrowOop* p) {
 104     do_oop_work(p);
 105   }
 106 };
 107 
 108 ShenandoahMarkRefsSuperClosure::ShenandoahMarkRefsSuperClosure(SCMObjToScanQueue* q, ReferenceProcessor* rp) :
 109   MetadataAwareOopClosure(rp),
 110   _queue(q),
 111   _heap((ShenandoahHeap*) Universe::heap())
 112 {
 113 }
 114 
 115 class ShenandoahInitMarkRootsTask : public AbstractGangTask {
 116 private:
 117   ShenandoahRootProcessor* _rp;
 118   bool _process_refs;
 119 public:
 120   ShenandoahInitMarkRootsTask(ShenandoahRootProcessor* rp, bool process_refs) :
 121     AbstractGangTask("Shenandoah init mark roots task"),
 122     _rp(rp),
 123     _process_refs(process_refs) {
 124   }
 125 
 126   void work(uint worker_id) {
 127     assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
 128 
 129     ShenandoahHeap* heap = ShenandoahHeap::heap();
 130     SCMObjToScanQueueSet* queues = heap->concurrentMark()->task_queues();
 131     assert(queues->get_reserved() > worker_id, "Queue has not been reserved for worker id: %d", worker_id);
 132 
 133     SCMObjToScanQueue* q = queues->queue(worker_id);
 134     ShenandoahInitMarkRootsClosure mark_cl(q);
 135     CLDToOopClosure cldCl(&mark_cl);
 136     MarkingCodeBlobClosure blobsCl(&mark_cl, ! CodeBlobToOopClosure::FixRelocations);
 137 
 138     // The rationale for selecting the roots to scan is as follows:
 139     //   a. With unload_classes = true, we only want to scan the actual strong roots from the
 140     //      code cache. This will allow us to identify the dead classes, unload them, *and*
 141     //      invalidate the relevant code cache blobs. This could be only done together with
 142     //      class unloading.
 143     //   b. With unload_classes = false, we have to nominally retain all the references from code
 144     //      cache, because there could be the case of embedded class/oop in the generated code,
 145     //      which we will never visit during mark. Without code cache invalidation, as in (a),
 146     //      we risk executing that code cache blob, and crashing.
 147     //   c. With ShenandoahConcurrentCodeRoots, we avoid scanning the entire code cache here,
 148     //      and instead do that in concurrent phase under the relevant lock. This saves init mark
 149     //      pause time.
 150 
 151     ResourceMark m;
 152     if (heap->concurrentMark()->unload_classes()) {
 153       _rp->process_strong_roots(&mark_cl, _process_refs ? NULL : &mark_cl, &cldCl, &blobsCl, worker_id);
 154     } else {
 155       if (ShenandoahConcurrentCodeRoots) {
 156         CodeBlobClosure* code_blobs;
 157 #ifdef ASSERT
 158         AssertToSpaceClosure assert_to_space_oops;
 159         CodeBlobToOopClosure assert_to_space(&assert_to_space_oops,
 160                                              !CodeBlobToOopClosure::FixRelocations);
 161         code_blobs = &assert_to_space;
 162 #else
 163         code_blobs = NULL;
 164 #endif
 165         _rp->process_all_roots(&mark_cl, _process_refs ? NULL : &mark_cl, &cldCl, code_blobs, worker_id);
 166       } else {
 167         _rp->process_all_roots(&mark_cl, _process_refs ? NULL : &mark_cl, &cldCl, &blobsCl, worker_id);
 168       }
 169     }
 170   }
 171 };
 172 
 173 class ShenandoahUpdateRootsTask : public AbstractGangTask {
 174 private:
 175   ShenandoahRootProcessor* _rp;
 176 public:
 177   ShenandoahUpdateRootsTask(ShenandoahRootProcessor* rp) :
 178     AbstractGangTask("Shenandoah update roots task"),
 179     _rp(rp) {
 180   }
 181 
 182   void work(uint worker_id) {
 183     assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
 184 
 185     ShenandoahHeap* heap = ShenandoahHeap::heap();
 186     SCMUpdateRefsClosure cl;
 187     CLDToOopClosure cldCl(&cl);
 188 
 189       CodeBlobClosure* code_blobs;
 190 #ifdef ASSERT
 191       AssertToSpaceClosure assert_to_space_oops;
 192       CodeBlobToOopClosure assert_to_space(&assert_to_space_oops, !CodeBlobToOopClosure::FixRelocations);
 193       code_blobs = &assert_to_space;
 194 #else
 195       code_blobs = NULL;
 196 #endif
 197     _rp->process_all_roots(&cl, &cl, &cldCl, code_blobs, worker_id);
 198   }
 199 };
 200 
 201 class SCMConcurrentMarkingTask : public AbstractGangTask {
 202 private:
 203   ShenandoahConcurrentMark* _cm;
 204   ParallelTaskTerminator* _terminator;
 205   bool _update_refs;
 206 
 207 public:
 208   SCMConcurrentMarkingTask(ShenandoahConcurrentMark* cm, ParallelTaskTerminator* terminator, bool update_refs) :
 209     AbstractGangTask("Root Region Scan"), _cm(cm), _terminator(terminator), _update_refs(update_refs) {
 210   }
 211 
 212 
 213   void work(uint worker_id) {
 214     SCMObjToScanQueue* q = _cm->get_queue(worker_id);
 215     jushort* live_data = _cm->get_liveness(worker_id);
 216     ReferenceProcessor* rp;
 217     if (_cm->process_references()) {
 218       rp = ShenandoahHeap::heap()->ref_processor();
 219     } else {
 220       rp = NULL;
 221     }
 222     if (ShenandoahConcurrentCodeRoots && _cm->claim_codecache()) {
 223       if (! _cm->unload_classes()) {
 224         ShenandoahMarkRefsClosure cl(q, rp);
 225         CodeBlobToOopClosure blobs(&cl, ! CodeBlobToOopClosure::FixRelocations);
 226         MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 227         CodeCache::blobs_do(&blobs);
 228       }
 229     }
 230 
 231     _cm->mark_loop(worker_id, _terminator, rp,
 232                    true, // cancellable
 233                    true, // drain SATBs as we go
 234                    true, // count liveness
 235                    _cm->unload_classes(),
 236                    _update_refs);
 237   }
 238 };
 239 
 240 class SCMFinalMarkingTask : public AbstractGangTask {
 241 private:
 242   ShenandoahConcurrentMark* _cm;
 243   ParallelTaskTerminator* _terminator;
 244   bool _update_refs;
 245   bool _count_live;
 246   bool _unload_classes;
 247 
 248 public:
 249   SCMFinalMarkingTask(ShenandoahConcurrentMark* cm, ParallelTaskTerminator* terminator, bool update_refs, bool count_live, bool unload_classes) :
 250     AbstractGangTask("Shenandoah Final Marking"), _cm(cm), _terminator(terminator), _update_refs(update_refs), _count_live(count_live), _unload_classes(unload_classes) {
 251   }
 252 
 253   void work(uint worker_id) {
 254     // First drain remaining SATB buffers.
 255     // Notice that this is not strictly necessary for mark-compact. But since
 256     // it requires a StrongRootsScope around the task, we need to claim the
 257     // threads, and performance-wise it doesn't really matter. Adds about 1ms to
 258     // full-gc.
 259     _cm->drain_satb_buffers(worker_id, true);
 260 
 261     ReferenceProcessor* rp;
 262     if (_cm->process_references()) {
 263       rp = ShenandoahHeap::heap()->ref_processor();
 264     } else {
 265       rp = NULL;
 266     }
 267 
 268     _cm->mark_loop(worker_id, _terminator, rp,
 269                    false, // not cancellable
 270                    false, // do not drain SATBs, already drained
 271                    _count_live,
 272                    _unload_classes,
 273                    _update_refs);
 274 
 275     assert(_cm->task_queues()->is_empty(), "Should be empty");
 276   }
 277 };
 278 
 279 void ShenandoahConcurrentMark::mark_roots() {
 280   assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
 281   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
 282 
 283   ShenandoahHeap* heap = ShenandoahHeap::heap();
 284 
 285   ClassLoaderDataGraph::clear_claimed_marks();
 286   WorkGang* workers = heap->workers();
 287   uint nworkers = workers->active_workers();
 288 
 289   assert(nworkers <= task_queues()->size(), "Just check");
 290 
 291   ShenandoahRootProcessor root_proc(heap, nworkers, ShenandoahCollectorPolicy::scan_thread_roots);
 292   TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
 293   task_queues()->reserve(nworkers);
 294 
 295   ShenandoahInitMarkRootsTask mark_roots(&root_proc, process_references());
 296   workers->run_task(&mark_roots);
 297   if (ShenandoahConcurrentCodeRoots) {
 298     clear_claim_codecache();
 299   }
 300 }
 301 
 302 void ShenandoahConcurrentMark::init_mark_roots() {
 303   assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
 304   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
 305 
 306   ShenandoahHeap* heap = ShenandoahHeap::heap();
 307 
 308   // Set up ref processing and class unloading.
 309   ShenandoahCollectorPolicy* policy = heap->shenandoahPolicy();
 310   set_process_references(policy->process_references());
 311   set_unload_classes(policy->unload_classes());
 312 
 313   // Set up parallel workers for initial marking
 314   WorkGang* workers = heap->workers();
 315   uint nworkers = ShenandoahCollectorPolicy::calc_workers_for_init_marking(
 316     workers->total_workers(), workers->active_workers(),
 317     Threads::number_of_non_daemon_threads());
 318 
 319   workers->update_active_workers(nworkers);
 320 
 321   mark_roots();
 322 }
 323 
 324 void ShenandoahConcurrentMark::update_roots() {
 325   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
 326   ShenandoahHeap* heap = ShenandoahHeap::heap();
 327 
 328   ClassLoaderDataGraph::clear_claimed_marks();
 329   uint nworkers = heap->workers()->active_workers();
 330 
 331   ShenandoahRootProcessor root_proc(heap, nworkers, ShenandoahCollectorPolicy::update_thread_roots);
 332   ShenandoahUpdateRootsTask update_roots(&root_proc);
 333   heap->workers()->run_task(&update_roots);
 334 }
 335 
 336 void ShenandoahConcurrentMark::final_update_roots() {
 337   assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
 338   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
 339 
 340   COMPILER2_PRESENT(DerivedPointerTable::clear());
 341 
 342   update_roots();
 343 
 344   COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
 345 }
 346 
 347 
 348 void ShenandoahConcurrentMark::initialize(uint workers) {
 349   _heap = ShenandoahHeap::heap();
 350 
 351   uint num_queues = MAX2(workers, 1U);
 352 
 353   _task_queues = new SCMObjToScanQueueSet((int) num_queues);
 354 
 355   for (uint i = 0; i < num_queues; ++i) {
 356     SCMObjToScanQueue* task_queue = new SCMObjToScanQueue();
 357     task_queue->initialize();
 358     _task_queues->register_queue(i, task_queue);
 359   }
 360   _process_references = false;
 361   _unload_classes = false;
 362   _claimed_codecache = 0;
 363 
 364   JavaThread::satb_mark_queue_set().set_buffer_size(ShenandoahSATBBufferSize);
 365 
 366   size_t max_regions = ShenandoahHeap::heap()->max_regions();
 367   _liveness_local = NEW_C_HEAP_ARRAY(jushort*, workers, mtGC);
 368   for (uint worker = 0; worker < workers; worker++) {
 369      _liveness_local[worker] = NEW_C_HEAP_ARRAY(jushort, max_regions, mtGC);
 370   }
 371 }
 372 
 373 void ShenandoahConcurrentMark::mark_from_roots() {
 374   ShenandoahHeap* sh = (ShenandoahHeap *) Universe::heap();
 375 
 376   bool update_refs = sh->need_update_refs();
 377 
 378   sh->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::conc_mark);
 379 
 380   // Concurrent marking, uses concurrent workers
 381   // Setup workers for concurrent marking
 382   WorkGang* workers = sh->conc_workers();
 383   uint nworkers = ShenandoahCollectorPolicy::calc_workers_for_conc_marking(
 384     workers->total_workers(), workers->active_workers(),
 385     Threads::number_of_non_daemon_threads());
 386 
 387   workers->update_active_workers(nworkers);
 388 
 389   if (process_references()) {
 390     ReferenceProcessor* rp = sh->ref_processor();
 391     rp->set_active_mt_degree(nworkers);
 392 
 393     // enable ("weak") refs discovery
 394     rp->enable_discovery(true /*verify_no_refs*/);
 395     rp->setup_policy(sh->is_full_gc_in_progress()); // snapshot the soft ref policy to be used in this cycle
 396   }
 397 
 398   task_queues()->reserve(nworkers);
 399 
 400   if (UseShenandoahOWST) {
 401     ShenandoahTaskTerminator terminator(nworkers, task_queues());
 402     SCMConcurrentMarkingTask markingTask = SCMConcurrentMarkingTask(this, &terminator, update_refs);
 403     workers->run_task(&markingTask);
 404   } else {
 405     ParallelTaskTerminator terminator(nworkers, task_queues());
 406     SCMConcurrentMarkingTask markingTask = SCMConcurrentMarkingTask(this, &terminator, update_refs);
 407     workers->run_task(&markingTask);
 408   }
 409 
 410   assert(task_queues()->is_empty() || sh->cancelled_concgc(), "Should be empty when not cancelled");
 411   if (! sh->cancelled_concgc()) {
 412     TASKQUEUE_STATS_ONLY(print_taskqueue_stats());
 413   }
 414 
 415   TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
 416 
 417   sh->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::conc_mark);
 418 }
 419 
 420 void ShenandoahConcurrentMark::finish_mark_from_roots() {
 421   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
 422 
 423   IsGCActiveMark is_active;
 424 
 425   ShenandoahHeap* sh = (ShenandoahHeap *) Universe::heap();
 426 
 427   // Setup workers for final marking
 428   WorkGang* workers = sh->workers();
 429   uint nworkers = ShenandoahCollectorPolicy::calc_workers_for_final_marking(
 430     workers->total_workers(), workers->active_workers(), Threads::number_of_non_daemon_threads());
 431   workers->update_active_workers(nworkers);
 432 
 433   TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
 434 
 435   shared_finish_mark_from_roots(/* full_gc = */ false);
 436 
 437   sh->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::update_roots);
 438   if (sh->need_update_refs()) {
 439     final_update_roots();
 440   }
 441   sh->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::update_roots);
 442 
 443   TASKQUEUE_STATS_ONLY(print_taskqueue_stats());
 444 
 445 #ifdef ASSERT
 446   verify_roots();
 447 
 448   if (ShenandoahDumpHeapAfterConcurrentMark) {
 449     sh->ensure_parsability(false);
 450     sh->print_all_refs("post-mark");
 451   }
 452 #endif
 453 }
 454 
 455 class ResetRecentlyAllocated : public ShenandoahHeapRegionClosure {
 456 public:
 457   bool doHeapRegion(ShenandoahHeapRegion* r) {
 458     ShenandoahHeap* sh = ShenandoahHeap::heap();
 459     r->set_recently_allocated(false);
 460     return false;
 461   }
 462 };
 463 
 464 void ShenandoahConcurrentMark::shared_finish_mark_from_roots(bool full_gc) {
 465   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
 466 
 467   ShenandoahHeap* sh = ShenandoahHeap::heap();
 468   ShenandoahCollectorPolicy* policy = sh->shenandoahPolicy();
 469 
 470   uint nworkers = sh->workers()->active_workers();
 471 
 472   // Finally mark everything else we've got in our queues during the previous steps.
 473   // It does two different things for concurrent vs. mark-compact GC:
 474   // - For concurrent GC, it starts with empty task queues, drains the remaining
 475   //   SATB buffers, and then completes the marking closure.
 476   // - For mark-compact GC, it starts out with the task queues seeded by initial
 477   //   root scan, and completes the closure, thus marking through all live objects
 478   // The implementation is the same, so it's shared here.
 479   {
 480     policy->record_phase_start(full_gc ?
 481                                ShenandoahCollectorPolicy::full_gc_mark_drain_queues :
 482                                ShenandoahCollectorPolicy::drain_satb);
 483     bool count_live = !(ShenandoahNoLivenessFullGC && full_gc); // we do not need liveness data for full GC
 484     task_queues()->reserve(nworkers);
 485 
 486     StrongRootsScope scope(nworkers);
 487     if (UseShenandoahOWST) {
 488       ShenandoahTaskTerminator terminator(nworkers, task_queues());
 489       SCMFinalMarkingTask task(this, &terminator, sh->need_update_refs(), count_live, unload_classes());
 490       sh->workers()->run_task(&task);
 491     } else {
 492       ParallelTaskTerminator terminator(nworkers, task_queues());
 493       SCMFinalMarkingTask task(this, &terminator, sh->need_update_refs(), count_live, unload_classes());
 494       sh->workers()->run_task(&task);
 495     }
 496     policy->record_phase_end(full_gc ?
 497                              ShenandoahCollectorPolicy::full_gc_mark_drain_queues :
 498                              ShenandoahCollectorPolicy::drain_satb);
 499   }
 500 
 501   assert(task_queues()->is_empty(), "Should be empty");
 502 
 503   // When we're done marking everything, we process weak references.
 504   policy->record_phase_start(full_gc ?
 505                              ShenandoahCollectorPolicy::full_gc_mark_weakrefs :
 506                              ShenandoahCollectorPolicy::weakrefs);
 507   if (process_references()) {
 508     weak_refs_work();
 509   }
 510   policy->record_phase_end(full_gc ?
 511                            ShenandoahCollectorPolicy::full_gc_mark_weakrefs :
 512                            ShenandoahCollectorPolicy::weakrefs);
 513 
 514   // And finally finish class unloading
 515   policy->record_phase_start(full_gc ?
 516                              ShenandoahCollectorPolicy::full_gc_mark_class_unloading :
 517                              ShenandoahCollectorPolicy::class_unloading);
 518   if (unload_classes()) {
 519     ShenandoahForwardedIsAliveClosure is_alive;
 520     // Unload classes and purge SystemDictionary.
 521     bool purged_class = SystemDictionary::do_unloading(&is_alive, false);
 522     ParallelCleaningTask unlink_task(&is_alive, true, true, nworkers, purged_class);
 523     sh->workers()->run_task(&unlink_task);
 524     ClassLoaderDataGraph::purge();
 525   }
 526 
 527   // Mark finished. All recently allocated regions are not recent anymore.
 528   {
 529     ResetRecentlyAllocated cl;
 530     sh->heap_region_iterate(&cl);
 531   }
 532 
 533   policy->record_phase_end(full_gc ?
 534                            ShenandoahCollectorPolicy::full_gc_mark_class_unloading :
 535                            ShenandoahCollectorPolicy::class_unloading);
 536 
 537   assert(task_queues()->is_empty(), "Should be empty");
 538 
 539 }
 540 
 541 #ifdef ASSERT
 542 template <class T>
 543 void ShenandoahVerifyRootsClosure1::do_oop_work(T* p) {
 544   ShenandoahHeap* heap = ShenandoahHeap::heap();
 545   T o = oopDesc::load_heap_oop(p);
 546   if (! oopDesc::is_null(o)) {
 547     oop obj = oopDesc::decode_heap_oop_not_null(o);
 548     if (! oopDesc::unsafe_equals(obj, ShenandoahBarrierSet::resolve_oop_static_not_null(obj))) {
 549       tty->print_cr("from-space marked: %s, to-space marked: %s, unload_classes: %s",
 550                     BOOL_TO_STR(heap->is_marked_next(obj)),
 551                     BOOL_TO_STR(heap->is_marked_next(ShenandoahBarrierSet::resolve_oop_static_not_null(obj))),
 552                     BOOL_TO_STR(heap->concurrentMark()->unload_classes()));
 553     }
 554     guarantee(oopDesc::unsafe_equals(obj, ShenandoahBarrierSet::resolve_oop_static_not_null(obj)), "oop must not be forwarded");
 555     guarantee(heap->is_marked_next(obj), "oop must be marked");
 556   }
 557 }
 558 
 559 void ShenandoahVerifyRootsClosure1::do_oop(oop* p) {
 560   do_oop_work(p);
 561 }
 562 
 563 void ShenandoahVerifyRootsClosure1::do_oop(narrowOop* p) {
 564   do_oop_work(p);
 565 }
 566 
 567 void ShenandoahConcurrentMark::verify_roots() {
 568   ShenandoahVerifyRootsClosure1 cl;
 569   CodeBlobToOopClosure blobsCl(&cl, false);
 570   CLDToOopClosure cldCl(&cl);
 571   ClassLoaderDataGraph::clear_claimed_marks();
 572   ShenandoahRootProcessor rp(ShenandoahHeap::heap(), 1);
 573   rp.process_all_roots(&cl, &cl, &cldCl, &blobsCl, 0);
 574 
 575 }
 576 #endif
 577 
 578 class ShenandoahSATBThreadsClosure : public ThreadClosure {
 579   ShenandoahSATBBufferClosure* _satb_cl;
 580   int _thread_parity;
 581 
 582  public:
 583   ShenandoahSATBThreadsClosure(ShenandoahSATBBufferClosure* satb_cl) :
 584     _satb_cl(satb_cl),
 585     _thread_parity(Threads::thread_claim_parity()) {}
 586 
 587   void do_thread(Thread* thread) {
 588     if (thread->is_Java_thread()) {
 589       if (thread->claim_oops_do(true, _thread_parity)) {
 590         JavaThread* jt = (JavaThread*)thread;
 591         jt->satb_mark_queue().apply_closure_and_empty(_satb_cl);
 592       }
 593     } else if (thread->is_VM_thread()) {
 594       if (thread->claim_oops_do(true, _thread_parity)) {
 595         JavaThread::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(_satb_cl);
 596       }
 597     }
 598   }
 599 };
 600 
 601 void ShenandoahConcurrentMark::drain_satb_buffers(uint worker_id, bool remark) {
 602   SCMObjToScanQueue* q = get_queue(worker_id);
 603   ShenandoahSATBBufferClosure cl(q);
 604 
 605   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
 606   while (satb_mq_set.apply_closure_to_completed_buffer(&cl));
 607 
 608   if (remark) {
 609     ShenandoahSATBThreadsClosure tc(&cl);
 610     Threads::threads_do(&tc);
 611   }
 612 }
 613 
 614 #if TASKQUEUE_STATS
 615 void ShenandoahConcurrentMark::print_taskqueue_stats_hdr(outputStream* const st) {
 616   st->print_raw_cr("GC Task Stats");
 617   st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr();
 618   st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr();
 619 }
 620 
 621 void ShenandoahConcurrentMark::print_taskqueue_stats() const {
 622   if (!log_develop_is_enabled(Trace, gc, task, stats)) {
 623     return;
 624   }
 625   Log(gc, task, stats) log;
 626   ResourceMark rm;
 627   outputStream* st = log.trace_stream();
 628   print_taskqueue_stats_hdr(st);
 629 
 630   ShenandoahHeap* sh = (ShenandoahHeap*) Universe::heap();
 631   TaskQueueStats totals;
 632   const int n = _task_queues->size();
 633   for (int i = 0; i < n; ++i) {
 634     st->print(INT32_FORMAT_W(3), i);
 635     _task_queues->queue(i)->stats.print(st);
 636     st->cr();
 637     totals += _task_queues->queue(i)->stats;
 638   }
 639   st->print("tot "); totals.print(st); st->cr();
 640   DEBUG_ONLY(totals.verify());
 641 
 642 }
 643 
 644 void ShenandoahConcurrentMark::reset_taskqueue_stats() {
 645   ShenandoahHeap* sh = (ShenandoahHeap*) Universe::heap();
 646   const int n = task_queues()->size();
 647   for (int i = 0; i < n; ++i) {
 648     task_queues()->queue(i)->stats.reset();
 649   }
 650 }
 651 #endif // TASKQUEUE_STATS
 652 
 653 // Weak Reference Closures
 654 class ShenandoahCMDrainMarkingStackClosure: public VoidClosure {
 655   uint _worker_id;
 656   ParallelTaskTerminator* _terminator;
 657 
 658 public:
 659   ShenandoahCMDrainMarkingStackClosure(uint worker_id, ParallelTaskTerminator* t):
 660     _worker_id(worker_id),
 661     _terminator(t) {
 662   }
 663 
 664 
 665   void do_void() {
 666     assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
 667 
 668     ShenandoahHeap* sh = ShenandoahHeap::heap();
 669     ShenandoahConcurrentMark* scm = sh->concurrentMark();
 670     ReferenceProcessor* rp;
 671     if (scm->process_references()) {
 672       rp = ShenandoahHeap::heap()->ref_processor();
 673     } else {
 674       rp = NULL;
 675     }
 676 
 677     scm->mark_loop(_worker_id, _terminator, rp,
 678                    false, // not cancellable
 679                    false, // do not drain SATBs
 680                    true,  // count liveness
 681                    scm->unload_classes(),
 682                    sh->need_update_refs());
 683   }
 684 };
 685 
 686 
 687 class ShenandoahCMKeepAliveClosure : public OopClosure {
 688 private:
 689   SCMObjToScanQueue* _queue;
 690   ShenandoahHeap* _heap;
 691 
 692   template <class T>
 693   inline void do_oop_nv(T* p) {
 694     ShenandoahConcurrentMark::mark_through_ref<T, NONE>(p, _heap, _queue);
 695   }
 696 
 697 public:
 698   ShenandoahCMKeepAliveClosure(SCMObjToScanQueue* q) :
 699     _queue(q), _heap(ShenandoahHeap::heap()) {};
 700 
 701   void do_oop(narrowOop* p) { do_oop_nv(p); }
 702   void do_oop(oop* p)       { do_oop_nv(p); }
 703 };
 704 
 705 class ShenandoahCMKeepAliveUpdateClosure : public OopClosure {
 706 private:
 707   SCMObjToScanQueue* _queue;
 708   ShenandoahHeap* _heap;
 709 
 710   template <class T>
 711   inline void do_oop_nv(T* p) {
 712     ShenandoahConcurrentMark::mark_through_ref<T, SIMPLE>(p, _heap, _queue);
 713   }
 714 
 715 public:
 716   ShenandoahCMKeepAliveUpdateClosure(SCMObjToScanQueue* q) :
 717     _queue(q), _heap(ShenandoahHeap::heap()) {};
 718 
 719   void do_oop(narrowOop* p) { do_oop_nv(p); }
 720   void do_oop(oop* p)       { do_oop_nv(p); }
 721 };
 722 
 723 class ShenandoahRefProcTaskProxy : public AbstractGangTask {
 724 
 725 private:
 726   AbstractRefProcTaskExecutor::ProcessTask& _proc_task;
 727   ParallelTaskTerminator* _terminator;
 728 public:
 729 
 730   ShenandoahRefProcTaskProxy(AbstractRefProcTaskExecutor::ProcessTask& proc_task,
 731                              ParallelTaskTerminator* t) :
 732     AbstractGangTask("Process reference objects in parallel"),
 733     _proc_task(proc_task),
 734     _terminator(t) {
 735   }
 736 
 737   void work(uint worker_id) {
 738     assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
 739     ShenandoahHeap* heap = ShenandoahHeap::heap();
 740     ShenandoahForwardedIsAliveClosure is_alive;
 741     ShenandoahCMDrainMarkingStackClosure complete_gc(worker_id, _terminator);
 742     if (heap->need_update_refs()) {
 743       ShenandoahCMKeepAliveUpdateClosure keep_alive(heap->concurrentMark()->get_queue(worker_id));
 744       _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
 745     } else {
 746       ShenandoahCMKeepAliveClosure keep_alive(heap->concurrentMark()->get_queue(worker_id));
 747       _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
 748     }
 749   }
 750 };
 751 
 752 class ShenandoahRefEnqueueTaskProxy : public AbstractGangTask {
 753 
 754 private:
 755   AbstractRefProcTaskExecutor::EnqueueTask& _enqueue_task;
 756 
 757 public:
 758 
 759   ShenandoahRefEnqueueTaskProxy(AbstractRefProcTaskExecutor::EnqueueTask& enqueue_task) :
 760     AbstractGangTask("Enqueue reference objects in parallel"),
 761     _enqueue_task(enqueue_task) {
 762   }
 763 
 764   void work(uint worker_id) {
 765     _enqueue_task.work(worker_id);
 766   }
 767 };
 768 
 769 class ShenandoahRefProcTaskExecutor : public AbstractRefProcTaskExecutor {
 770 
 771 private:
 772   WorkGang* _workers;
 773 
 774 public:
 775 
 776   ShenandoahRefProcTaskExecutor(WorkGang* workers) :
 777     _workers(workers) {
 778   }
 779 
 780   // Executes a task using worker threads.
 781   void execute(ProcessTask& task) {
 782     assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
 783 
 784     ShenandoahHeap* heap = ShenandoahHeap::heap();
 785     ShenandoahConcurrentMark* cm = heap->concurrentMark();
 786     uint nworkers = _workers->active_workers();
 787     cm->task_queues()->reserve(nworkers);
 788     if (UseShenandoahOWST) {
 789       ShenandoahTaskTerminator terminator(nworkers, cm->task_queues());
 790       ShenandoahRefProcTaskProxy proc_task_proxy(task, &terminator);
 791       _workers->run_task(&proc_task_proxy);
 792     } else {
 793       ParallelTaskTerminator terminator(nworkers, cm->task_queues());
 794       ShenandoahRefProcTaskProxy proc_task_proxy(task, &terminator);
 795       _workers->run_task(&proc_task_proxy);
 796     }
 797   }
 798 
 799   void execute(EnqueueTask& task) {
 800     ShenandoahRefEnqueueTaskProxy enqueue_task_proxy(task);
 801     _workers->run_task(&enqueue_task_proxy);
 802   }
 803 };
 804 
 805 
 806 void ShenandoahConcurrentMark::weak_refs_work() {
 807   assert(process_references(), "sanity");
 808   ShenandoahHeap* sh = (ShenandoahHeap*) Universe::heap();
 809   ReferenceProcessor* rp = sh->ref_processor();
 810   WorkGang* workers = sh->workers();
 811   uint nworkers = workers->active_workers();
 812 
 813   // Setup collector policy for softref cleaning.
 814   bool clear_soft_refs = sh->collector_policy()->use_should_clear_all_soft_refs(true /* bogus arg*/);
 815   log_develop_debug(gc, ref)("clearing soft refs: %s", BOOL_TO_STR(clear_soft_refs));
 816   rp->setup_policy(clear_soft_refs);
 817   rp->set_active_mt_degree(nworkers);
 818 
 819   uint serial_worker_id = 0;
 820   ShenandoahForwardedIsAliveClosure is_alive;
 821 
 822   assert(task_queues()->is_empty(), "Should be empty");
 823 
 824   ParallelTaskTerminator terminator(1, task_queues());
 825   ShenandoahCMDrainMarkingStackClosure complete_gc(serial_worker_id, &terminator);
 826   ShenandoahRefProcTaskExecutor executor(workers);
 827 
 828   log_develop_trace(gc, ref)("start processing references");
 829 
 830   if (sh->need_update_refs()) {
 831     ShenandoahCMKeepAliveUpdateClosure keep_alive(get_queue(serial_worker_id));
 832     rp->process_discovered_references(&is_alive, &keep_alive,
 833                                       &complete_gc, &executor,
 834                                       NULL);
 835   } else {
 836     ShenandoahCMKeepAliveClosure keep_alive(get_queue(serial_worker_id));
 837     rp->process_discovered_references(&is_alive, &keep_alive,
 838                                       &complete_gc, &executor,
 839                                       NULL);
 840   }
 841 
 842   assert(task_queues()->is_empty(), "Should be empty");
 843 
 844   log_develop_trace(gc, ref)("finished processing references");
 845   log_develop_trace(gc, ref)("start enqueuing references");
 846 
 847   rp->enqueue_discovered_references(&executor);
 848 
 849   log_develop_trace(gc, ref)("finished enqueueing references");
 850 
 851   rp->verify_no_references_recorded();
 852   assert(!rp->discovery_enabled(), "Post condition");
 853 }
 854 
 855 void ShenandoahConcurrentMark::cancel() {
 856   ShenandoahHeap* sh = ShenandoahHeap::heap();
 857 
 858   // Cancel weak-ref discovery.
 859   if (process_references()) {
 860     ReferenceProcessor* rp = sh->ref_processor();
 861     rp->abandon_partial_discovery();
 862     rp->disable_discovery();
 863   }
 864 
 865   // Clean up marking stacks.
 866   SCMObjToScanQueueSet* queues = task_queues();
 867   queues->clear();
 868 
 869   // Cancel SATB buffers.
 870   JavaThread::satb_mark_queue_set().abandon_partial_marking();
 871 }
 872 
 873 SCMObjToScanQueue* ShenandoahConcurrentMark::get_queue(uint worker_id) {
 874   assert(task_queues()->get_reserved() > worker_id, "No reserved queue for worker id: %d", worker_id);
 875   return _task_queues->queue(worker_id);
 876 }
 877 
 878 void ShenandoahConcurrentMark::clear_queue(SCMObjToScanQueue *q) {
 879   q->set_empty();
 880   q->overflow_stack()->clear();
 881   q->clear_buffer();
 882 }
 883 
 884 template <bool CANCELLABLE, bool DRAIN_SATB, bool COUNT_LIVENESS, bool CLASS_UNLOAD, bool UPDATE_REFS>
 885 void ShenandoahConcurrentMark::mark_loop_prework(uint w, ParallelTaskTerminator *t, ReferenceProcessor *rp) {
 886   SCMObjToScanQueue* q = get_queue(w);
 887 
 888   jushort* ld;
 889   if (COUNT_LIVENESS) {
 890     ld = get_liveness(w);
 891     Copy::fill_to_bytes(ld, _heap->max_regions() * sizeof(jushort));
 892   } else {
 893     ld = NULL;
 894   }
 895 
 896   // TODO: We can clean up this if we figure out how to do templated oop closures that
 897   // play nice with specialized_oop_iterators.
 898   if (CLASS_UNLOAD) {
 899     if (UPDATE_REFS) {
 900       ShenandoahMarkUpdateRefsMetadataClosure cl(q, rp);
 901       mark_loop_work<ShenandoahMarkUpdateRefsMetadataClosure, CANCELLABLE, DRAIN_SATB, COUNT_LIVENESS>(&cl, ld, w, t);
 902     } else {
 903       ShenandoahMarkRefsMetadataClosure cl(q, rp);
 904       mark_loop_work<ShenandoahMarkRefsMetadataClosure, CANCELLABLE, DRAIN_SATB, COUNT_LIVENESS>(&cl, ld, w, t);
 905     }
 906   } else {
 907     if (UPDATE_REFS) {
 908       ShenandoahMarkUpdateRefsClosure cl(q, rp);
 909       mark_loop_work<ShenandoahMarkUpdateRefsClosure, CANCELLABLE, DRAIN_SATB, COUNT_LIVENESS>(&cl, ld, w, t);
 910     } else {
 911       ShenandoahMarkRefsClosure cl(q, rp);
 912       mark_loop_work<ShenandoahMarkRefsClosure, CANCELLABLE, DRAIN_SATB, COUNT_LIVENESS>(&cl, ld, w, t);
 913     }
 914   }
 915 
 916   if (COUNT_LIVENESS) {
 917     for (uint i = 0; i < _heap->max_regions(); i++) {
 918       ShenandoahHeapRegion *r = _heap->regions()->get(i);
 919       if (r != NULL) {
 920         jushort live = ld[i];
 921         if (live > 0) {
 922           r->increase_live_data_words(live);
 923         }
 924       }
 925     }
 926   }
 927 }
 928 
 929 template <class T, bool CANCELLABLE, bool DRAIN_SATB, bool COUNT_LIVENESS>
 930 void ShenandoahConcurrentMark::mark_loop_work(T* cl, jushort* live_data, uint worker_id, ParallelTaskTerminator *terminator) {
 931   int seed = 17;
 932   uint stride = CANCELLABLE ? ShenandoahMarkLoopStride : 1;
 933 
 934   ShenandoahHeap* heap = ShenandoahHeap::heap();
 935   SCMObjToScanQueueSet* queues = task_queues();
 936   SCMObjToScanQueue* q;
 937   SCMTask t;
 938 
 939   /*
 940    * Process outstanding queues, if any.
 941    *
 942    * There can be more queues than workers. To deal with the imbalance, we claim
 943    * extra queues first. Since marking can push new tasks into the queue associated
 944    * with this worker id, we come back to process this queue in the normal loop.
 945    */
 946   q = queues->claim_next();
 947   while (q != NULL) {
 948     if (CANCELLABLE && heap->cancelled_concgc()) {
 949       ShenandoahCancelledTerminatorTerminator tt;
 950       while (!terminator->offer_termination(&tt));
 951       return;
 952     }
 953 
 954     for (uint i = 0; i < stride; i++) {
 955       if (try_queue(q, t)) {
 956         do_task<T, COUNT_LIVENESS>(q, cl, live_data, &t);
 957       } else {
 958         assert(q->is_empty(), "Must be empty");
 959         q = queues->claim_next();
 960         break;
 961       }
 962     }
 963   }
 964 
 965   q = get_queue(worker_id);
 966 
 967   /*
 968    * Normal marking loop:
 969    */
 970   while (true) {
 971     if (CANCELLABLE && heap->cancelled_concgc()) {
 972       ShenandoahCancelledTerminatorTerminator tt;
 973       while (!terminator->offer_termination(&tt));
 974       return;
 975     }
 976 
 977     for (uint i = 0; i < stride; i++) {
 978       if (try_queue(q, t) ||
 979               (DRAIN_SATB && try_draining_satb_buffer(q, t)) ||
 980               queues->steal(worker_id, &seed, t)) {
 981         do_task<T, COUNT_LIVENESS>(q, cl, live_data, &t);
 982       } else {
 983         if (terminator->offer_termination()) return;
 984       }
 985     }
 986   }
 987 }
 988 
 989 void ShenandoahConcurrentMark::set_process_references(bool pr) {
 990   _process_references = pr;
 991 }
 992 
 993 bool ShenandoahConcurrentMark::process_references() const {
 994   return _process_references;
 995 }
 996 
 997 void ShenandoahConcurrentMark::set_unload_classes(bool uc) {
 998   _unload_classes = uc;
 999 }
1000 
1001 bool ShenandoahConcurrentMark::unload_classes() const {
1002   return _unload_classes;
1003 }
1004 
1005 bool ShenandoahConcurrentMark::claim_codecache() {
1006   assert(ShenandoahConcurrentCodeRoots, "must not be called otherwise");
1007   jbyte old = Atomic::cmpxchg(1, &_claimed_codecache, 0);
1008   return old == 0;
1009 }
1010 
1011 void ShenandoahConcurrentMark::clear_claim_codecache() {
1012   assert(ShenandoahConcurrentCodeRoots, "must not be called otherwise");
1013   _claimed_codecache = 0;
1014 }
1015 
1016 jushort* ShenandoahConcurrentMark::get_liveness(uint worker_id) {
1017   return _liveness_local[worker_id];
1018 }
1019 
1020 // Generate Shenandoah specialized oop_oop_iterate functions.
1021 SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_SHENANDOAH(ALL_KLASS_OOP_OOP_ITERATE_DEFN)