1 /*
   2  * Copyright (c) 2013, 2015, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "classfile/stringTable.hpp"
  25 #include "gc/shared/gcTimer.hpp"
  26 #include "gc/shared/isGCActiveMark.hpp"
  27 #include "gc/shared/parallelCleaning.hpp"
  28 #include "gc/shared/strongRootsScope.hpp"
  29 #include "gc/shenandoah/shenandoahBarrierSet.inline.hpp"
  30 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  31 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
  32 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  33 #include "gc/shenandoah/shenandoahRootProcessor.hpp"
  34 #include "gc/shenandoah/shenandoah_specialized_oop_closures.hpp"
  35 #include "gc/shenandoah/brooksPointer.hpp"
  36 #include "gc/shared/referenceProcessor.hpp"
  37 #include "gc/shenandoah/shenandoahTaskqueue.hpp"
  38 #include "code/codeCache.hpp"
  39 #include "classfile/symbolTable.hpp"
  40 #include "classfile/systemDictionary.hpp"
  41 #include "memory/iterator.inline.hpp"
  42 #include "oops/oop.inline.hpp"
  43 #include "gc/shared/taskqueue.inline.hpp"
  44 
  45 class ShenandoahInitMarkRootsClosure : public OopClosure {
  46   SCMObjToScanQueue* _queue;
  47   ShenandoahHeap* _heap;
  48 
  49 public:
  50   ShenandoahInitMarkRootsClosure(SCMObjToScanQueue* q) :
  51     _queue(q),
  52     _heap((ShenandoahHeap*) Universe::heap())
  53   {
  54   }
  55 
  56 private:
  57   template <class T>
  58   inline void do_oop_work(T* p) {
  59     T o = oopDesc::load_heap_oop(p);
  60     if (! oopDesc::is_null(o)) {
  61       oop obj = oopDesc::decode_heap_oop_not_null(o);
  62       obj = ShenandoahBarrierSet::resolve_oop_static_not_null(obj);
  63       assert(oopDesc::unsafe_equals(obj, ShenandoahBarrierSet::resolve_oop_static_not_null(obj)),
  64              "expect forwarded oop");
  65       ShenandoahConcurrentMark::mark_and_push(obj, _heap, _queue);
  66     }
  67   }
  68 
  69 public:
  70   void do_oop(narrowOop* p) {
  71     do_oop_work(p);
  72   }
  73 
  74   inline void do_oop(oop* p) {
  75     do_oop_work(p);
  76   }
  77 
  78 };
  79 
  80 class SCMUpdateRefsClosure: public OopClosure {
  81 private:
  82   ShenandoahHeap* _heap;
  83 public:
  84 
  85   SCMUpdateRefsClosure() : _heap(ShenandoahHeap::heap()) {
  86   }
  87 
  88 private:
  89   template <class T>
  90   inline void do_oop_work(T* p) {
  91     T o = oopDesc::load_heap_oop(p);
  92     if (! oopDesc::is_null(o)) {
  93       oop obj = oopDesc::decode_heap_oop_not_null(o);
  94       _heap->update_oop_ref_not_null(p, obj);
  95     }
  96   }
  97 
  98 public:
  99   inline void do_oop(oop* p) {
 100     do_oop_work(p);
 101   }
 102 
 103   void do_oop(narrowOop* p) {
 104     do_oop_work(p);
 105   }
 106 };
 107 
 108 // Mark the object and add it to the queue to be scanned
 109 template <class T, bool CL>
 110 ShenandoahMarkObjsClosure<T, CL>::ShenandoahMarkObjsClosure(SCMObjToScanQueue* q, ReferenceProcessor* rp) :
 111   _heap((ShenandoahHeap*)(Universe::heap())),
 112   _queue(q),
 113   _mark_refs(T(q, rp)),
 114   _last_region_idx(0),
 115   _live_data(0)
 116 {
 117 }
 118 
 119 template <class T, bool CL>
 120 ShenandoahMarkObjsClosure<T, CL>::~ShenandoahMarkObjsClosure() {
 121   if (CL) {
 122     ShenandoahHeapRegion *r = _heap->regions()->get(_last_region_idx);
 123     r->increase_live_data(_live_data);
 124   }
 125 }
 126 
 127 ShenandoahMarkUpdateRefsClosure::ShenandoahMarkUpdateRefsClosure(SCMObjToScanQueue* q, ReferenceProcessor* rp) :
 128   MetadataAwareOopClosure(rp),
 129   _queue(q),
 130   _heap((ShenandoahHeap*) Universe::heap())
 131 {
 132 }
 133 
 134 ShenandoahMarkRefsClosure::ShenandoahMarkRefsClosure(SCMObjToScanQueue* q, ReferenceProcessor* rp) :
 135   MetadataAwareOopClosure(rp),
 136   _queue(q),
 137   _heap((ShenandoahHeap*) Universe::heap())
 138 {
 139 }
 140 
 141 class ShenandoahInitMarkRootsTask : public AbstractGangTask {
 142 private:
 143   ShenandoahRootProcessor* _rp;
 144   bool _process_refs;
 145 public:
 146   ShenandoahInitMarkRootsTask(ShenandoahRootProcessor* rp, bool process_refs) :
 147     AbstractGangTask("Shenandoah init mark roots task"),
 148     _rp(rp),
 149     _process_refs(process_refs) {
 150   }
 151 
 152   void work(uint worker_id) {
 153     assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
 154 
 155     ShenandoahHeap* heap = ShenandoahHeap::heap();
 156     SCMObjToScanQueueSet* queues = heap->concurrentMark()->task_queues();
 157     assert(queues->get_reserved() > worker_id, "Queue has not been reserved for worker id: %d", worker_id);
 158 
 159     SCMObjToScanQueue* q = queues->queue(worker_id);
 160     ShenandoahInitMarkRootsClosure mark_cl(q);
 161     CLDToOopClosure cldCl(&mark_cl);
 162     MarkingCodeBlobClosure blobsCl(&mark_cl, ! CodeBlobToOopClosure::FixRelocations);
 163 
 164     ResourceMark m;
 165     if (heap->concurrentMark()->unload_classes()) {
 166       _rp->process_strong_roots(&mark_cl, _process_refs ? NULL : &mark_cl, &cldCl, &blobsCl, worker_id);
 167     } else {
 168       _rp->process_all_roots(&mark_cl, _process_refs ? NULL : &mark_cl, &cldCl, ShenandoahConcurrentCodeRoots ? NULL : &blobsCl, worker_id);
 169     }
 170   }
 171 };
 172 
 173 class ShenandoahUpdateRootsTask : public AbstractGangTask {
 174 private:
 175   ShenandoahRootProcessor* _rp;
 176 public:
 177   ShenandoahUpdateRootsTask(ShenandoahRootProcessor* rp) :
 178     AbstractGangTask("Shenandoah update roots task"),
 179     _rp(rp) {
 180   }
 181 
 182   void work(uint worker_id) {
 183     assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
 184 
 185     ShenandoahHeap* heap = ShenandoahHeap::heap();
 186     SCMUpdateRefsClosure cl;
 187     CLDToOopClosure cldCl(&cl);
 188 
 189     _rp->process_all_roots(&cl, &cl, &cldCl, NULL, worker_id);
 190   }
 191 };
 192 
 193 class SCMConcurrentMarkingTask : public AbstractGangTask {
 194 private:
 195   ShenandoahConcurrentMark* _cm;
 196   ParallelTaskTerminator* _terminator;
 197   bool _update_refs;
 198 
 199 public:
 200   SCMConcurrentMarkingTask(ShenandoahConcurrentMark* cm, ParallelTaskTerminator* terminator, bool update_refs) :
 201     AbstractGangTask("Root Region Scan"), _cm(cm), _terminator(terminator), _update_refs(update_refs) {
 202   }
 203 
 204 
 205   void work(uint worker_id) {
 206     SCMObjToScanQueue* q = _cm->get_queue(worker_id);
 207     ReferenceProcessor* rp;
 208     if (_cm->process_references()) {
 209       rp = ShenandoahHeap::heap()->ref_processor();
 210     } else {
 211       rp = NULL;
 212     }
 213     if (ShenandoahConcurrentCodeRoots && _cm->claim_codecache()) {
 214       if (! _cm->unload_classes()) {
 215         ShenandoahMarkRefsClosure cl(q, rp);
 216         CodeBlobToOopClosure blobs(&cl, ! CodeBlobToOopClosure::FixRelocations);
 217         MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 218         CodeCache::blobs_do(&blobs);
 219       }
 220     }
 221     if (_update_refs) {
 222       ShenandoahMarkObjsClosure<ShenandoahMarkUpdateRefsClosure, true> cl(q, rp);
 223       _cm->concurrent_mark_loop(&cl, worker_id, q, _terminator);
 224     } else {
 225       ShenandoahMarkObjsClosure<ShenandoahMarkRefsClosure, true> cl(q, rp);
 226       _cm->concurrent_mark_loop(&cl, worker_id, q,  _terminator);
 227     }
 228   }
 229 };
 230 
 231 class SCMFinalMarkingTask : public AbstractGangTask {
 232 private:
 233   ShenandoahConcurrentMark* _cm;
 234   ParallelTaskTerminator* _terminator;
 235   bool _update_refs;
 236   bool _count_live;
 237 
 238 public:
 239   SCMFinalMarkingTask(ShenandoahConcurrentMark* cm, ParallelTaskTerminator* terminator, bool update_refs, bool count_live) :
 240     AbstractGangTask("Shenandoah Final Marking"), _cm(cm), _terminator(terminator), _update_refs(update_refs), _count_live(count_live) {
 241   }
 242 
 243   void work(uint worker_id) {
 244     // First drain remaining SATB buffers.
 245     // Notice that this is not strictly necessary for mark-compact. But since
 246     // it requires a StrongRootsScope around the task, we need to claim the
 247     // threads, and performance-wise it doesn't really matter. Adds about 1ms to
 248     // full-gc.
 249     _cm->drain_satb_buffers(worker_id, true);
 250 
 251     ReferenceProcessor* rp;
 252     if (_cm->process_references()) {
 253       rp = ShenandoahHeap::heap()->ref_processor();
 254     } else {
 255       rp = NULL;
 256     }
 257     SCMObjToScanQueue* q = _cm->get_queue(worker_id);
 258 
 259     // Templates need constexprs, so we have to switch by the flags ourselves.
 260     if (_update_refs) {
 261       if (_count_live) {
 262         ShenandoahMarkObjsClosure<ShenandoahMarkUpdateRefsClosure, true> cl(q, rp);
 263         _cm->final_mark_loop(&cl, worker_id, q, _terminator);
 264       } else {
 265         ShenandoahMarkObjsClosure<ShenandoahMarkUpdateRefsClosure, false> cl(q, rp);
 266         _cm->final_mark_loop(&cl, worker_id, q, _terminator);
 267       }
 268     } else {
 269       if (_count_live) {
 270         ShenandoahMarkObjsClosure<ShenandoahMarkRefsClosure, true> cl(q, rp);
 271         _cm->final_mark_loop(&cl, worker_id, q, _terminator);
 272       } else {
 273         ShenandoahMarkObjsClosure<ShenandoahMarkRefsClosure, false> cl(q, rp);
 274         _cm->final_mark_loop(&cl, worker_id, q, _terminator);
 275       }
 276     }
 277 
 278     assert(_cm->task_queues()->is_empty(), "Should be empty");
 279   }
 280 };
 281 
 282 void ShenandoahConcurrentMark::mark_roots() {
 283   assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
 284   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
 285 
 286   ShenandoahHeap* heap = ShenandoahHeap::heap();
 287 
 288   ClassLoaderDataGraph::clear_claimed_marks();
 289 
 290   uint nworkers = heap->max_parallel_workers();
 291   assert(nworkers <= task_queues()->size(), "Just check");
 292 
 293   ShenandoahRootProcessor root_proc(heap, nworkers, ShenandoahCollectorPolicy::scan_thread_roots);
 294   TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
 295   task_queues()->reserve(nworkers);
 296   assert(heap->workers()->active_workers() == nworkers, "Not expecting other tasks");
 297 
 298   ShenandoahInitMarkRootsTask mark_roots(&root_proc, process_references());
 299   heap->workers()->run_task(&mark_roots, nworkers);
 300   if (ShenandoahConcurrentCodeRoots) {
 301     clear_claim_codecache();
 302   }
 303 }
 304 
 305 void ShenandoahConcurrentMark::init_mark_roots() {
 306   assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
 307   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
 308 
 309   ShenandoahHeap* heap = ShenandoahHeap::heap();
 310 
 311   // Set up ref processing and class unloading.
 312   ShenandoahCollectorPolicy* policy = heap->shenandoahPolicy();
 313   set_process_references(policy->process_references());
 314   set_unload_classes(policy->unload_classes());
 315 
 316   mark_roots();
 317 }
 318 
 319 void ShenandoahConcurrentMark::update_roots() {
 320   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
 321   ShenandoahHeap* heap = ShenandoahHeap::heap();
 322 
 323   ClassLoaderDataGraph::clear_claimed_marks();
 324   uint nworkers = heap->max_parallel_workers();
 325   assert(heap->workers()->active_workers() == nworkers, "Not expecting other tasks");
 326   ShenandoahRootProcessor root_proc(heap, nworkers, ShenandoahCollectorPolicy::update_thread_roots);
 327   ShenandoahUpdateRootsTask update_roots(&root_proc);
 328   heap->workers()->run_task(&update_roots);
 329 
 330 }
 331 
 332 void ShenandoahConcurrentMark::final_update_roots() {
 333   assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
 334   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
 335 
 336   COMPILER2_PRESENT(DerivedPointerTable::clear());
 337 
 338   update_roots();
 339 
 340   COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
 341 }
 342 
 343 
 344 void ShenandoahConcurrentMark::initialize(uint workers) {
 345   uint num_queues = MAX2(workers, 1U);
 346 
 347   _task_queues = new SCMObjToScanQueueSet((int) num_queues);
 348 
 349   for (uint i = 0; i < num_queues; ++i) {
 350     SCMObjToScanQueue* task_queue = new SCMObjToScanQueue();
 351     task_queue->initialize();
 352     _task_queues->register_queue(i, task_queue);
 353   }
 354   _process_references = false;
 355   _unload_classes = false;
 356   _claimed_codecache = 0;
 357 
 358   JavaThread::satb_mark_queue_set().set_buffer_size(ShenandoahSATBBufferSize);
 359 }
 360 
 361 void ShenandoahConcurrentMark::mark_from_roots() {
 362   ShenandoahHeap* sh = (ShenandoahHeap *) Universe::heap();
 363 
 364   bool update_refs = sh->need_update_refs();
 365 
 366   sh->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::conc_mark);
 367 
 368   // Concurrent marking, uses concurrent workers
 369   uint nworkers = sh->max_conc_workers();
 370   if (process_references()) {
 371     ReferenceProcessor* rp = sh->ref_processor();
 372     rp->set_active_mt_degree(nworkers);
 373 
 374     // enable ("weak") refs discovery
 375     rp->enable_discovery(true /*verify_no_refs*/);
 376     rp->setup_policy(sh->is_full_gc_in_progress()); // snapshot the soft ref policy to be used in this cycle
 377   }
 378 
 379   task_queues()->reserve(nworkers);
 380   assert(sh->conc_workers()->active_workers() == nworkers, "Not expecting other tasks");
 381 
 382   if (UseShenandoahOWST) {
 383     ShenandoahTaskTerminator terminator(nworkers, task_queues());
 384     SCMConcurrentMarkingTask markingTask = SCMConcurrentMarkingTask(this, &terminator, update_refs);
 385     sh->conc_workers()->run_task(&markingTask, nworkers);
 386   } else {
 387     ParallelTaskTerminator terminator(nworkers, task_queues());
 388     SCMConcurrentMarkingTask markingTask = SCMConcurrentMarkingTask(this, &terminator, update_refs);
 389     sh->conc_workers()->run_task(&markingTask, nworkers);
 390   }
 391 
 392   assert(task_queues()->is_empty(), "Should be empty");
 393   if (! sh->cancelled_concgc()) {
 394     TASKQUEUE_STATS_ONLY(print_taskqueue_stats());
 395   }
 396 
 397   TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
 398 
 399   sh->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::conc_mark);
 400 }
 401 
 402 void ShenandoahConcurrentMark::finish_mark_from_roots() {
 403   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
 404 
 405   IsGCActiveMark is_active;
 406 
 407   ShenandoahHeap* sh = (ShenandoahHeap *) Universe::heap();
 408 
 409   TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
 410 
 411   shared_finish_mark_from_roots(/* full_gc = */ false);
 412 
 413   sh->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::update_roots);
 414   if (sh->need_update_refs()) {
 415     final_update_roots();
 416   }
 417   sh->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::update_roots);
 418 
 419   TASKQUEUE_STATS_ONLY(print_taskqueue_stats());
 420 
 421 #ifdef ASSERT
 422   verify_roots();
 423 
 424   if (ShenandoahDumpHeapAfterConcurrentMark) {
 425     sh->ensure_parsability(false);
 426     sh->print_all_refs("post-mark");
 427   }
 428 #endif
 429 }
 430 
 431 void ShenandoahConcurrentMark::shared_finish_mark_from_roots(bool full_gc) {
 432   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
 433 
 434   ShenandoahHeap* sh = ShenandoahHeap::heap();
 435   ShenandoahCollectorPolicy* policy = sh->shenandoahPolicy();
 436 
 437   uint nworkers = sh->max_parallel_workers();
 438   // Finally mark everything else we've got in our queues during the previous steps.
 439   // It does two different things for concurrent vs. mark-compact GC:
 440   // - For concurrent GC, it starts with empty task queues, drains the remaining
 441   //   SATB buffers, and then completes the marking closure.
 442   // - For mark-compact GC, it starts out with the task queues seeded by initial
 443   //   root scan, and completes the closure, thus marking through all live objects
 444   // The implementation is the same, so it's shared here.
 445   {
 446     policy->record_phase_start(full_gc ?
 447                                ShenandoahCollectorPolicy::full_gc_mark_drain_queues :
 448                                ShenandoahCollectorPolicy::drain_satb);
 449     bool count_live = !(ShenandoahNoLivenessFullGC && full_gc); // we do not need liveness data for full GC
 450     task_queues()->reserve(nworkers);
 451 
 452     StrongRootsScope scope(nworkers);
 453     if (UseShenandoahOWST) {
 454       ShenandoahTaskTerminator terminator(nworkers, task_queues());
 455       SCMFinalMarkingTask markingTask = SCMFinalMarkingTask(this, &terminator, sh->need_update_refs(), count_live);
 456       sh->workers()->run_task(&markingTask);
 457     } else {
 458       ParallelTaskTerminator terminator(nworkers, task_queues());
 459       SCMFinalMarkingTask markingTask = SCMFinalMarkingTask(this, &terminator, sh->need_update_refs(), count_live);
 460       sh->workers()->run_task(&markingTask);
 461     }
 462     policy->record_phase_end(full_gc ?
 463                              ShenandoahCollectorPolicy::full_gc_mark_drain_queues :
 464                              ShenandoahCollectorPolicy::drain_satb);
 465   }
 466 
 467   assert(task_queues()->is_empty(), "Should be empty");
 468 
 469   // When we're done marking everything, we process weak references.
 470   policy->record_phase_start(full_gc ?
 471                              ShenandoahCollectorPolicy::full_gc_mark_weakrefs :
 472                              ShenandoahCollectorPolicy::weakrefs);
 473   if (process_references()) {
 474     weak_refs_work();
 475   }
 476   policy->record_phase_end(full_gc ?
 477                            ShenandoahCollectorPolicy::full_gc_mark_weakrefs :
 478                            ShenandoahCollectorPolicy::weakrefs);
 479 
 480   // And finally finish class unloading
 481   policy->record_phase_start(full_gc ?
 482                              ShenandoahCollectorPolicy::full_gc_mark_class_unloading :
 483                              ShenandoahCollectorPolicy::class_unloading);
 484   if (unload_classes()) {
 485     ShenandoahForwardedIsAliveClosure is_alive;
 486     // Unload classes and purge SystemDictionary.
 487     bool purged_class = SystemDictionary::do_unloading(&is_alive, false);
 488     ParallelCleaningTask unlink_task(&is_alive, true, true, nworkers, purged_class);
 489     sh->workers()->run_task(&unlink_task, nworkers);
 490     ClassLoaderDataGraph::purge();
 491   }
 492   policy->record_phase_end(full_gc ?
 493                            ShenandoahCollectorPolicy::full_gc_mark_class_unloading :
 494                            ShenandoahCollectorPolicy::class_unloading);
 495 
 496   assert(task_queues()->is_empty(), "Should be empty");
 497 }
 498 
 499 #ifdef ASSERT
 500 template <class T>
 501 void ShenandoahVerifyRootsClosure1::do_oop_work(T* p) {
 502   T o = oopDesc::load_heap_oop(p);
 503   if (! oopDesc::is_null(o)) {
 504     oop obj = oopDesc::decode_heap_oop_not_null(o);
 505     if (! oopDesc::unsafe_equals(obj, ShenandoahBarrierSet::resolve_oop_static_not_null(obj))) {
 506       tty->print_cr("from-space marked: %s, to-space marked: %s, unload_classes: %s", BOOL_TO_STR(ShenandoahHeap::heap()->is_marked_current(obj)), BOOL_TO_STR(ShenandoahHeap::heap()->is_marked_current(ShenandoahBarrierSet::resolve_oop_static_not_null(obj))), BOOL_TO_STR(ShenandoahHeap::heap()->concurrentMark()->unload_classes()));
 507     }
 508     guarantee(oopDesc::unsafe_equals(obj, ShenandoahBarrierSet::resolve_oop_static_not_null(obj)), "oop must not be forwarded");
 509     guarantee(ShenandoahHeap::heap()->is_marked_current(obj), "oop must be marked");
 510   }
 511 }
 512 
 513 void ShenandoahVerifyRootsClosure1::do_oop(oop* p) {
 514   do_oop_work(p);
 515 }
 516 
 517 void ShenandoahVerifyRootsClosure1::do_oop(narrowOop* p) {
 518   do_oop_work(p);
 519 }
 520 
 521 void ShenandoahConcurrentMark::verify_roots() {
 522   ShenandoahVerifyRootsClosure1 cl;
 523   CodeBlobToOopClosure blobsCl(&cl, false);
 524   CLDToOopClosure cldCl(&cl);
 525   ClassLoaderDataGraph::clear_claimed_marks();
 526   ShenandoahRootProcessor rp(ShenandoahHeap::heap(), 1);
 527   rp.process_all_roots(&cl, &cl, &cldCl, &blobsCl, 0);
 528 
 529 }
 530 #endif
 531 
 532 class ShenandoahSATBThreadsClosure : public ThreadClosure {
 533   ShenandoahSATBBufferClosure* _satb_cl;
 534   int _thread_parity;
 535 
 536  public:
 537   ShenandoahSATBThreadsClosure(ShenandoahSATBBufferClosure* satb_cl) :
 538     _satb_cl(satb_cl),
 539     _thread_parity(Threads::thread_claim_parity()) {}
 540 
 541   void do_thread(Thread* thread) {
 542     if (thread->is_Java_thread()) {
 543       if (thread->claim_oops_do(true, _thread_parity)) {
 544         JavaThread* jt = (JavaThread*)thread;
 545         jt->satb_mark_queue().apply_closure_and_empty(_satb_cl);
 546       }
 547     } else if (thread->is_VM_thread()) {
 548       if (thread->claim_oops_do(true, _thread_parity)) {
 549         JavaThread::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(_satb_cl);
 550       }
 551     }
 552   }
 553 };
 554 
 555 void ShenandoahConcurrentMark::drain_satb_buffers(uint worker_id, bool remark) {
 556   SCMObjToScanQueue* q = get_queue(worker_id);
 557   ShenandoahSATBBufferClosure cl(q);
 558 
 559   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
 560   while (satb_mq_set.apply_closure_to_completed_buffer(&cl));
 561 
 562   if (remark) {
 563     ShenandoahSATBThreadsClosure tc(&cl);
 564     Threads::threads_do(&tc);
 565   }
 566 }
 567 
 568 #if TASKQUEUE_STATS
 569 void ShenandoahConcurrentMark::print_taskqueue_stats_hdr(outputStream* const st) {
 570   st->print_raw_cr("GC Task Stats");
 571   st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr();
 572   st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr();
 573 }
 574 
 575 void ShenandoahConcurrentMark::print_taskqueue_stats() const {
 576   if (!log_develop_is_enabled(Trace, gc, task, stats)) {
 577     return;
 578   }
 579   Log(gc, task, stats) log;
 580   ResourceMark rm;
 581   outputStream* st = log.trace_stream();
 582   print_taskqueue_stats_hdr(st);
 583 
 584   ShenandoahHeap* sh = (ShenandoahHeap*) Universe::heap();
 585   TaskQueueStats totals;
 586   const int n = sh->max_conc_workers();
 587   for (int i = 0; i < n; ++i) {
 588     st->print(INT32_FORMAT_W(3), i);
 589     _task_queues->queue(i)->stats.print(st);
 590     st->cr();
 591     totals += _task_queues->queue(i)->stats;
 592   }
 593   st->print("tot "); totals.print(st); st->cr();
 594   DEBUG_ONLY(totals.verify());
 595 
 596 }
 597 
 598 void ShenandoahConcurrentMark::reset_taskqueue_stats() {
 599   ShenandoahHeap* sh = (ShenandoahHeap*) Universe::heap();
 600   const int n = sh->max_conc_workers();
 601   for (int i = 0; i < n; ++i) {
 602     _task_queues->queue(i)->stats.reset();
 603   }
 604 }
 605 #endif // TASKQUEUE_STATS
 606 
 607 // Weak Reference Closures
 608 class ShenandoahCMDrainMarkingStackClosure: public VoidClosure {
 609   uint _worker_id;
 610   ParallelTaskTerminator* _terminator;
 611 
 612 public:
 613   ShenandoahCMDrainMarkingStackClosure(uint worker_id, ParallelTaskTerminator* t):
 614     _worker_id(worker_id),
 615     _terminator(t) {
 616   }
 617 
 618 
 619   void do_void() {
 620     assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
 621 
 622     ShenandoahHeap* sh = ShenandoahHeap::heap();
 623     ShenandoahConcurrentMark* scm = sh->concurrentMark();
 624     ReferenceProcessor* rp;
 625     if (scm->process_references()) {
 626       rp = ShenandoahHeap::heap()->ref_processor();
 627     } else {
 628       rp = NULL;
 629     }
 630     SCMObjToScanQueue* q = scm->get_queue(_worker_id);
 631     if (sh->need_update_refs()) {
 632       ShenandoahMarkObjsClosure<ShenandoahMarkUpdateRefsClosure, true> cl(q, rp);
 633       scm->final_mark_loop(&cl, _worker_id, q, _terminator);
 634     } else {
 635       ShenandoahMarkObjsClosure<ShenandoahMarkRefsClosure, true> cl(q, rp);
 636       scm->final_mark_loop(&cl, _worker_id, q, _terminator);
 637     }
 638   }
 639 };
 640 
 641 
 642 class ShenandoahCMKeepAliveClosure: public OopClosure {
 643   SCMObjToScanQueue* _queue;
 644   ShenandoahHeap* _sh;
 645 
 646 public:
 647   ShenandoahCMKeepAliveClosure(SCMObjToScanQueue* q) :
 648     _queue(q) {
 649     _sh = (ShenandoahHeap*) Universe::heap();
 650   }
 651 
 652 private:
 653   template <class T>
 654   inline void do_oop_work(T* p) {
 655 
 656     T o = oopDesc::load_heap_oop(p);
 657     if (! oopDesc::is_null(o)) {
 658       oop obj = oopDesc::decode_heap_oop_not_null(o);
 659       assert(oopDesc::unsafe_equals(obj, oopDesc::bs()->read_barrier(obj)), "only get updated oops in weak ref processing");
 660 
 661 #ifdef ASSERT
 662       if (log_is_enabled(Trace, gc, ref)) {
 663         ResourceMark rm;
 664         outputStream* out = Log(gc, ref)::trace_stream();
 665         out->print("\twe're looking at location "
 666                    "*"PTR_FORMAT" = "PTR_FORMAT,
 667                    p2i(p), p2i((void*) obj));
 668         obj->print_on(out);
 669       }
 670 #endif
 671       ShenandoahConcurrentMark::mark_and_push(obj, _sh, _queue);
 672     }
 673   }
 674 
 675 public:
 676   void do_oop(narrowOop* p) {
 677     do_oop_work(p);
 678   }
 679 
 680 
 681   void do_oop(oop* p) {
 682     do_oop_work(p);
 683   }
 684 
 685 };
 686 
 687 class ShenandoahCMKeepAliveUpdateClosure: public OopClosure {
 688   SCMObjToScanQueue* _queue;
 689   ShenandoahHeap* _sh;
 690 
 691 public:
 692   ShenandoahCMKeepAliveUpdateClosure(SCMObjToScanQueue* q) :
 693     _queue(q) {
 694     _sh = (ShenandoahHeap*) Universe::heap();
 695   }
 696 
 697 private:
 698   template <class T>
 699   inline void do_oop_work(T* p) {
 700     T o = oopDesc::load_heap_oop(p);
 701     if (! oopDesc::is_null(o)) {
 702       oop obj = oopDesc::decode_heap_oop_not_null(o);
 703       obj = _sh->update_oop_ref_not_null(p, obj);
 704       assert(oopDesc::unsafe_equals(obj, oopDesc::bs()->read_barrier(obj)), "only get updated oops in weak ref processing");
 705 #ifdef ASSERT
 706       if (log_is_enabled(Trace, gc, ref)) {
 707         ResourceMark rm;
 708         outputStream* out = Log(gc, ref)::trace_stream();
 709         out->print("\twe're looking at location "
 710                    "*"PTR_FORMAT" = "PTR_FORMAT,
 711                    p2i(p), p2i((void*) obj));
 712         obj->print_on(out);
 713       }
 714 #endif
 715       ShenandoahConcurrentMark::mark_and_push(obj, _sh, _queue);
 716     }
 717   }
 718 
 719 public:
 720   void do_oop(narrowOop* p) {
 721     do_oop_work(p);
 722   }
 723 
 724   void do_oop(oop* p) {
 725     do_oop_work(p);
 726   }
 727 
 728 };
 729 
 730 class ShenandoahRefProcTaskProxy : public AbstractGangTask {
 731 
 732 private:
 733   AbstractRefProcTaskExecutor::ProcessTask& _proc_task;
 734   ParallelTaskTerminator* _terminator;
 735 public:
 736 
 737   ShenandoahRefProcTaskProxy(AbstractRefProcTaskExecutor::ProcessTask& proc_task,
 738                              ParallelTaskTerminator* t) :
 739     AbstractGangTask("Process reference objects in parallel"),
 740     _proc_task(proc_task),
 741     _terminator(t) {
 742   }
 743 
 744   void work(uint worker_id) {
 745     assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
 746     ShenandoahHeap* heap = ShenandoahHeap::heap();
 747     ShenandoahForwardedIsAliveClosure is_alive;
 748     ShenandoahCMDrainMarkingStackClosure complete_gc(worker_id, _terminator);
 749     if (heap->need_update_refs()) {
 750       ShenandoahCMKeepAliveUpdateClosure keep_alive(heap->concurrentMark()->get_queue(worker_id));
 751       _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
 752     } else {
 753       ShenandoahCMKeepAliveClosure keep_alive(heap->concurrentMark()->get_queue(worker_id));
 754       _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
 755     }
 756   }
 757 };
 758 
 759 class ShenandoahRefEnqueueTaskProxy : public AbstractGangTask {
 760 
 761 private:
 762   AbstractRefProcTaskExecutor::EnqueueTask& _enqueue_task;
 763 
 764 public:
 765 
 766   ShenandoahRefEnqueueTaskProxy(AbstractRefProcTaskExecutor::EnqueueTask& enqueue_task) :
 767     AbstractGangTask("Enqueue reference objects in parallel"),
 768     _enqueue_task(enqueue_task) {
 769   }
 770 
 771   void work(uint worker_id) {
 772     _enqueue_task.work(worker_id);
 773   }
 774 };
 775 
 776 class ShenandoahRefProcTaskExecutor : public AbstractRefProcTaskExecutor {
 777 
 778 private:
 779   WorkGang* _workers;
 780 
 781 public:
 782 
 783   ShenandoahRefProcTaskExecutor() : _workers(ShenandoahHeap::heap()->workers()) {
 784   }
 785 
 786   // Executes a task using worker threads.
 787   void execute(ProcessTask& task) {
 788     assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
 789 
 790     ShenandoahConcurrentMark* cm = ShenandoahHeap::heap()->concurrentMark();
 791     uint nworkers = _workers->active_workers();
 792     cm->task_queues()->reserve(nworkers);
 793     if (UseShenandoahOWST) {
 794       ShenandoahTaskTerminator terminator(nworkers, cm->task_queues());
 795       ShenandoahRefProcTaskProxy proc_task_proxy(task, &terminator);
 796       _workers->run_task(&proc_task_proxy);
 797     } else {
 798       ParallelTaskTerminator terminator(nworkers, cm->task_queues());
 799       ShenandoahRefProcTaskProxy proc_task_proxy(task, &terminator);
 800       _workers->run_task(&proc_task_proxy);
 801     }
 802   }
 803 
 804   void execute(EnqueueTask& task) {
 805     ShenandoahRefEnqueueTaskProxy enqueue_task_proxy(task);
 806     _workers->run_task(&enqueue_task_proxy);
 807   }
 808 };
 809 
 810 
 811 void ShenandoahConcurrentMark::weak_refs_work() {
 812   assert(process_references(), "sanity");
 813   ShenandoahHeap* sh = (ShenandoahHeap*) Universe::heap();
 814   ReferenceProcessor* rp = sh->ref_processor();
 815 
 816   // Setup collector policy for softref cleaning.
 817   bool clear_soft_refs = sh->collector_policy()->use_should_clear_all_soft_refs(true /* bogus arg*/);
 818   log_develop_debug(gc, ref)("clearing soft refs: %s", BOOL_TO_STR(clear_soft_refs));
 819   rp->setup_policy(clear_soft_refs);
 820   rp->set_active_mt_degree(sh->max_parallel_workers());
 821 
 822   uint serial_worker_id = 0;
 823   ShenandoahForwardedIsAliveClosure is_alive;
 824 
 825   assert(task_queues()->is_empty(), "Should be empty");
 826 
 827   ParallelTaskTerminator terminator(1, task_queues());
 828   ShenandoahCMDrainMarkingStackClosure complete_gc(serial_worker_id, &terminator);
 829   ShenandoahRefProcTaskExecutor executor;
 830 
 831   log_develop_trace(gc, ref)("start processing references");
 832 
 833   if (sh->need_update_refs()) {
 834     ShenandoahCMKeepAliveUpdateClosure keep_alive(get_queue(serial_worker_id));
 835     rp->process_discovered_references(&is_alive, &keep_alive,
 836                                       &complete_gc, &executor,
 837                                       NULL);
 838   } else {
 839     ShenandoahCMKeepAliveClosure keep_alive(get_queue(serial_worker_id));
 840     rp->process_discovered_references(&is_alive, &keep_alive,
 841                                       &complete_gc, &executor,
 842                                       NULL);
 843   }
 844 
 845   assert(task_queues()->is_empty(), "Should be empty");
 846 
 847   log_develop_trace(gc, ref)("finished processing references");
 848   log_develop_trace(gc, ref)("start enqueuing references");
 849 
 850   rp->enqueue_discovered_references(&executor);
 851 
 852   log_develop_trace(gc, ref)("finished enqueueing references");
 853 
 854   rp->verify_no_references_recorded();
 855   assert(!rp->discovery_enabled(), "Post condition");
 856 }
 857 
 858 void ShenandoahConcurrentMark::cancel() {
 859   ShenandoahHeap* sh = ShenandoahHeap::heap();
 860 
 861   // Cancel weak-ref discovery.
 862   if (process_references()) {
 863     ReferenceProcessor* rp = sh->ref_processor();
 864     rp->abandon_partial_discovery();
 865     rp->disable_discovery();
 866   }
 867 
 868   // Clean up marking stacks.
 869   SCMObjToScanQueueSet* queues = task_queues();
 870   queues->clear();
 871 
 872   // Cancel SATB buffers.
 873   JavaThread::satb_mark_queue_set().abandon_partial_marking();
 874 }
 875 
 876 SCMObjToScanQueue* ShenandoahConcurrentMark::get_queue(uint worker_id) {
 877   assert(task_queues()->get_reserved() > worker_id, "No reserved queue for worker id: %d", worker_id);
 878   return _task_queues->queue(worker_id);
 879 }
 880 
 881 void ShenandoahConcurrentMark::clear_queue(SCMObjToScanQueue *q) {
 882   q->set_empty();
 883   q->overflow_stack()->clear();
 884   q->clear_buffer();
 885 }
 886 
 887 template <class T, bool CL>
 888 void ShenandoahConcurrentMark::concurrent_mark_loop(ShenandoahMarkObjsClosure<T, CL>* cl,
 889                                                     uint worker_id,
 890                                                     SCMObjToScanQueue* q,
 891                                                     ParallelTaskTerminator* terminator) {
 892   ShenandoahHeap* heap = ShenandoahHeap::heap();
 893   int seed = 17;
 894   uint stride = ShenandoahMarkLoopStride;
 895   SCMObjToScanQueueSet* queues = task_queues();
 896   bool                  done_queues = false;
 897 
 898   while (true) {
 899     if (heap->cancelled_concgc()) {
 900       clear_queue(q);
 901 
 902       // Clear other queues for termination
 903       while ((q = queues->claim_next()) != NULL) {
 904         clear_queue(q);
 905       }
 906 
 907       while (! terminator->offer_termination());
 908       return;
 909     }
 910 
 911     if (!done_queues) {
 912       done_queues = true;
 913       if (!concurrent_process_queues(heap, q, cl)) {
 914         // concurrent GC cancelled
 915         continue;
 916       }
 917     }
 918 
 919     for (uint i = 0; i < stride; i++) {
 920       if (!try_queue(q, cl) &&
 921           !try_draining_an_satb_buffer(q) &&
 922           !try_to_steal(worker_id, cl, &seed)) {
 923         if (terminator->offer_termination()) return;
 924       }
 925     }
 926   }
 927 }
 928 
 929 template <class T, bool CL>
 930 bool ShenandoahConcurrentMark::concurrent_process_queues(ShenandoahHeap* heap,
 931   SCMObjToScanQueue* q, ShenandoahMarkObjsClosure<T, CL>* cl) {
 932   SCMObjToScanQueueSet* queues = task_queues();
 933   uint stride = ShenandoahMarkLoopStride;
 934   while (true) {
 935     if (heap->cancelled_concgc()) return false;
 936 
 937     for (uint i = 0; i < stride; i++) {
 938       if (!try_queue(q, cl)) {
 939         assert(q->is_empty(), "Must be empty");
 940         q = queues->claim_next();
 941         if (q == NULL) {
 942           return true;
 943         }
 944       }
 945     }
 946   }
 947 }
 948 
 949 
 950 template <class T, bool CL>
 951 void ShenandoahConcurrentMark::final_mark_loop(ShenandoahMarkObjsClosure<T, CL>* cl,
 952                                                uint worker_id,
 953                                                SCMObjToScanQueue* q,
 954                                                ParallelTaskTerminator* terminator) {
 955   int seed = 17;
 956   while (true) {
 957     if (!try_queue(q, cl) &&
 958         !try_to_steal(worker_id, cl, &seed)) {
 959       if (terminator->offer_termination()) break;
 960     }
 961   }
 962 }
 963 
 964 void ShenandoahConcurrentMark::set_process_references(bool pr) {
 965   _process_references = pr;
 966 }
 967 
 968 bool ShenandoahConcurrentMark::process_references() const {
 969   return _process_references;
 970 }
 971 
 972 void ShenandoahConcurrentMark::set_unload_classes(bool uc) {
 973   _unload_classes = uc;
 974 }
 975 
 976 bool ShenandoahConcurrentMark::unload_classes() const {
 977   return _unload_classes;
 978 }
 979 
 980 bool ShenandoahConcurrentMark::claim_codecache() {
 981   assert(ShenandoahConcurrentCodeRoots, "must not be called otherwise");
 982   jbyte old = Atomic::cmpxchg(1, &_claimed_codecache, 0);
 983   return old == 0;
 984 }
 985 
 986 void ShenandoahConcurrentMark::clear_claim_codecache() {
 987   assert(ShenandoahConcurrentCodeRoots, "must not be called otherwise");
 988   _claimed_codecache = 0;
 989 }
 990 
 991 // Generate Shenandoah specialized oop_oop_iterate functions.
 992 SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_SHENANDOAH(ALL_KLASS_OOP_OOP_ITERATE_DEFN)