1 /*
   2  * Copyright (c) 2013, 2019, Red Hat, Inc. All rights reserved.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 
  26 #include "classfile/symbolTable.hpp"
  27 #include "classfile/systemDictionary.hpp"
  28 #include "code/codeCache.hpp"
  29 
  30 #include "gc/shared/weakProcessor.hpp"
  31 #include "gc/shared/gcTimer.hpp"
  32 #include "gc/shared/referenceProcessor.hpp"
  33 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
  34 
  35 #include "gc/shenandoah/shenandoahBarrierSet.inline.hpp"
  36 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
  37 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
  38 #include "gc/shenandoah/shenandoahMarkCompact.hpp"
  39 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  40 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
  41 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  42 #include "gc/shenandoah/shenandoahTaskqueue.inline.hpp"
  43 #include "gc/shenandoah/shenandoahTimingTracker.hpp"
  44 #include "gc/shenandoah/shenandoahUtils.hpp"
  45 #include "gc/shared/weakProcessor.hpp"
  46 
  47 #include "memory/iterator.inline.hpp"
  48 #include "memory/metaspace.hpp"
  49 #include "memory/resourceArea.hpp"
  50 #include "oops/oop.inline.hpp"
  51 
  52 template<UpdateRefsMode UPDATE_REFS>
  53 class ShenandoahInitMarkRootsClosure : public OopClosure {
  54 private:
  55   ShenandoahObjToScanQueue* _queue;
  56   ShenandoahHeap* _heap;
  57   ShenandoahMarkingContext* const _mark_context;
  58 
  59   template <class T>
  60   inline void do_oop_work(T* p) {
  61     ShenandoahConcurrentMark::mark_through_ref<T, UPDATE_REFS, NO_DEDUP>(p, _heap, _queue, _mark_context);
  62   }
  63 
  64 public:
  65   ShenandoahInitMarkRootsClosure(ShenandoahObjToScanQueue* q) :
  66     _queue(q),
  67     _heap(ShenandoahHeap::heap()),
  68     _mark_context(_heap->marking_context()) {};
  69 
  70   void do_oop(narrowOop* p) { do_oop_work(p); }
  71   void do_oop(oop* p)       { do_oop_work(p); }
  72 };
  73 
  74 ShenandoahMarkRefsSuperClosure::ShenandoahMarkRefsSuperClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) :
  75   MetadataVisitingOopIterateClosure(rp),
  76   _queue(q),
  77   _heap(ShenandoahHeap::heap()),
  78   _mark_context(_heap->marking_context())
  79 { }
  80 
  81 template<UpdateRefsMode UPDATE_REFS>
  82 class ShenandoahInitMarkRootsTask : public AbstractGangTask {
  83 private:
  84   ShenandoahRootProcessor* _rp;
  85   bool _process_refs;
  86 public:
  87   ShenandoahInitMarkRootsTask(ShenandoahRootProcessor* rp, bool process_refs) :
  88     AbstractGangTask("Shenandoah init mark roots task"),
  89     _rp(rp),
  90     _process_refs(process_refs) {
  91   }
  92 
  93   void work(uint worker_id) {
  94     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
  95     ShenandoahParallelWorkerSession worker_session(worker_id);
  96 
  97     ShenandoahHeap* heap = ShenandoahHeap::heap();
  98     ShenandoahObjToScanQueueSet* queues = heap->concurrent_mark()->task_queues();
  99     assert(queues->get_reserved() > worker_id, "Queue has not been reserved for worker id: %d", worker_id);
 100 
 101     ShenandoahObjToScanQueue* q = queues->queue(worker_id);
 102 
 103     ShenandoahInitMarkRootsClosure<UPDATE_REFS> mark_cl(q);
 104     do_work(heap, &mark_cl, worker_id);
 105   }
 106 
 107 private:
 108   void do_work(ShenandoahHeap* heap, OopClosure* oops, uint worker_id) {
 109     // The rationale for selecting the roots to scan is as follows:
 110     //   a. With unload_classes = true, we only want to scan the actual strong roots from the
 111     //      code cache. This will allow us to identify the dead classes, unload them, *and*
 112     //      invalidate the relevant code cache blobs. This could be only done together with
 113     //      class unloading.
 114     //   b. With unload_classes = false, we have to nominally retain all the references from code
 115     //      cache, because there could be the case of embedded class/oop in the generated code,
 116     //      which we will never visit during mark. Without code cache invalidation, as in (a),
 117     //      we risk executing that code cache blob, and crashing.
 118     //   c. With ShenandoahConcurrentScanCodeRoots, we avoid scanning the entire code cache here,
 119     //      and instead do that in concurrent phase under the relevant lock. This saves init mark
 120     //      pause time.
 121 
 122     CLDToOopClosure clds_cl(oops);
 123     MarkingCodeBlobClosure blobs_cl(oops, ! CodeBlobToOopClosure::FixRelocations);
 124 
 125     ResourceMark m;
 126     if (heap->unload_classes()) {
 127       _rp->process_strong_roots(oops, &clds_cl, &blobs_cl, NULL, worker_id);
 128     } else {
 129       if (ShenandoahConcurrentScanCodeRoots) {
 130         CodeBlobClosure* code_blobs = NULL;
 131 #ifdef ASSERT
 132         ShenandoahAssertToSpaceClosure assert_to_space_oops;
 133         CodeBlobToOopClosure assert_to_space(&assert_to_space_oops, !CodeBlobToOopClosure::FixRelocations);
 134         // If conc code cache evac is disabled, code cache should have only to-space ptrs.
 135         // Otherwise, it should have to-space ptrs only if mark does not update refs.
 136         if (!heap->has_forwarded_objects()) {
 137           code_blobs = &assert_to_space;
 138         }
 139 #endif
 140         _rp->process_all_roots(oops, &clds_cl, code_blobs, NULL, worker_id);
 141       } else {
 142         _rp->process_all_roots(oops, &clds_cl, &blobs_cl, NULL, worker_id);
 143       }
 144     }
 145   }
 146 };
 147 
 148 class ShenandoahUpdateRootsTask : public AbstractGangTask {
 149 private:
 150   ShenandoahRootProcessor* _rp;
 151   const bool _update_code_cache;
 152 public:
 153   ShenandoahUpdateRootsTask(ShenandoahRootProcessor* rp, bool update_code_cache) :
 154     AbstractGangTask("Shenandoah update roots task"),
 155     _rp(rp),
 156     _update_code_cache(update_code_cache) {
 157   }
 158 
 159   void work(uint worker_id) {
 160     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 161     ShenandoahParallelWorkerSession worker_session(worker_id);
 162 
 163     ShenandoahHeap* heap = ShenandoahHeap::heap();
 164     ShenandoahUpdateRefsClosure cl;
 165     CLDToOopClosure cldCl(&cl);
 166 
 167     CodeBlobClosure* code_blobs;
 168     CodeBlobToOopClosure update_blobs(&cl, CodeBlobToOopClosure::FixRelocations);
 169 #ifdef ASSERT
 170     ShenandoahAssertToSpaceClosure assert_to_space_oops;
 171     CodeBlobToOopClosure assert_to_space(&assert_to_space_oops, !CodeBlobToOopClosure::FixRelocations);
 172 #endif
 173     if (_update_code_cache) {
 174       code_blobs = &update_blobs;
 175     } else {
 176       code_blobs =
 177         DEBUG_ONLY(&assert_to_space)
 178         NOT_DEBUG(NULL);
 179     }
 180     _rp->update_all_roots<AlwaysTrueClosure>(&cl, &cldCl, code_blobs, NULL, worker_id);
 181   }
 182 };
 183 
 184 class ShenandoahConcurrentMarkingTask : public AbstractGangTask {
 185 private:
 186   ShenandoahConcurrentMark* _cm;
 187   ShenandoahTaskTerminator* _terminator;
 188 
 189 public:
 190   ShenandoahConcurrentMarkingTask(ShenandoahConcurrentMark* cm, ShenandoahTaskTerminator* terminator) :
 191     AbstractGangTask("Root Region Scan"), _cm(cm), _terminator(terminator) {
 192   }
 193 
 194   void work(uint worker_id) {
 195     ShenandoahHeap* heap = ShenandoahHeap::heap();
 196     ShenandoahConcurrentWorkerSession worker_session(worker_id);
 197     ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
 198     ShenandoahObjToScanQueue* q = _cm->get_queue(worker_id);
 199     ReferenceProcessor* rp;
 200     if (heap->process_references()) {
 201       rp = heap->ref_processor();
 202       shenandoah_assert_rp_isalive_installed();
 203     } else {
 204       rp = NULL;
 205     }
 206 
 207     _cm->concurrent_scan_code_roots(worker_id, rp);
 208     _cm->mark_loop(worker_id, _terminator, rp,
 209                    true, // cancellable
 210                    ShenandoahStringDedup::is_enabled()); // perform string dedup
 211   }
 212 };
 213 
 214 class ShenandoahSATBThreadsClosure : public ThreadClosure {
 215 private:
 216   ShenandoahSATBBufferClosure* _satb_cl;
 217   int _thread_parity;
 218 
 219 public:
 220   ShenandoahSATBThreadsClosure(ShenandoahSATBBufferClosure* satb_cl) :
 221     _satb_cl(satb_cl),
 222     _thread_parity(Threads::thread_claim_parity()) {}
 223 
 224   void do_thread(Thread* thread) {
 225     if (thread->is_Java_thread()) {
 226       if (thread->claim_oops_do(true, _thread_parity)) {
 227         JavaThread* jt = (JavaThread*)thread;
 228         ShenandoahThreadLocalData::satb_mark_queue(jt).apply_closure_and_empty(_satb_cl);
 229       }
 230     } else if (thread->is_VM_thread()) {
 231       if (thread->claim_oops_do(true, _thread_parity)) {
 232         ShenandoahBarrierSet::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(_satb_cl);
 233       }
 234     }
 235   }
 236 };
 237 
 238 class ShenandoahFinalMarkingTask : public AbstractGangTask {
 239 private:
 240   ShenandoahConcurrentMark* _cm;
 241   ShenandoahTaskTerminator* _terminator;
 242   bool _dedup_string;
 243 
 244 public:
 245   ShenandoahFinalMarkingTask(ShenandoahConcurrentMark* cm, ShenandoahTaskTerminator* terminator, bool dedup_string) :
 246     AbstractGangTask("Shenandoah Final Marking"), _cm(cm), _terminator(terminator), _dedup_string(dedup_string) {
 247   }
 248 
 249   void work(uint worker_id) {
 250     ShenandoahHeap* heap = ShenandoahHeap::heap();
 251 
 252     ShenandoahParallelWorkerSession worker_session(worker_id);
 253     // First drain remaining SATB buffers.
 254     // Notice that this is not strictly necessary for mark-compact. But since
 255     // it requires a StrongRootsScope around the task, we need to claim the
 256     // threads, and performance-wise it doesn't really matter. Adds about 1ms to
 257     // full-gc.
 258     {
 259       ShenandoahObjToScanQueue* q = _cm->get_queue(worker_id);
 260       ShenandoahSATBBufferClosure cl(q);
 261       SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
 262       while (satb_mq_set.apply_closure_to_completed_buffer(&cl));
 263       ShenandoahSATBThreadsClosure tc(&cl);
 264       Threads::threads_do(&tc);
 265     }
 266 
 267     ReferenceProcessor* rp;
 268     if (heap->process_references()) {
 269       rp = heap->ref_processor();
 270       shenandoah_assert_rp_isalive_installed();
 271     } else {
 272       rp = NULL;
 273     }
 274 
 275     // Degenerated cycle may bypass concurrent cycle, so code roots might not be scanned,
 276     // let's check here.
 277     _cm->concurrent_scan_code_roots(worker_id, rp);
 278     _cm->mark_loop(worker_id, _terminator, rp,
 279                    false, // not cancellable
 280                    _dedup_string);
 281 
 282     assert(_cm->task_queues()->is_empty(), "Should be empty");
 283   }
 284 };
 285 
 286 void ShenandoahConcurrentMark::mark_roots(ShenandoahPhaseTimings::Phase root_phase) {
 287   assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
 288   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 289 
 290   ShenandoahHeap* heap = ShenandoahHeap::heap();
 291 
 292   ShenandoahGCPhase phase(root_phase);
 293 
 294   WorkGang* workers = heap->workers();
 295   uint nworkers = workers->active_workers();
 296 
 297   assert(nworkers <= task_queues()->size(), "Just check");
 298 
 299   ShenandoahRootProcessor root_proc(heap, nworkers, root_phase);
 300   TASKQUEUE_STATS_ONLY(task_queues()->reset_taskqueue_stats());
 301   task_queues()->reserve(nworkers);
 302 
 303   if (heap->has_forwarded_objects()) {
 304     ShenandoahInitMarkRootsTask<RESOLVE> mark_roots(&root_proc, _heap->process_references());
 305     workers->run_task(&mark_roots);
 306   } else {
 307     // No need to update references, which means the heap is stable.
 308     // Can save time not walking through forwarding pointers.
 309     ShenandoahInitMarkRootsTask<NONE> mark_roots(&root_proc, _heap->process_references());
 310     workers->run_task(&mark_roots);
 311   }
 312 
 313   if (ShenandoahConcurrentScanCodeRoots) {
 314     clear_claim_codecache();
 315   }
 316 }
 317 
 318 void ShenandoahConcurrentMark::update_roots(ShenandoahPhaseTimings::Phase root_phase) {
 319   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 320 
 321   bool update_code_cache = true; // initialize to safer value
 322   switch (root_phase) {
 323     case ShenandoahPhaseTimings::update_roots:
 324     case ShenandoahPhaseTimings::final_update_refs_roots:
 325       update_code_cache = false;
 326       break;
 327     case ShenandoahPhaseTimings::full_gc_roots:
 328     case ShenandoahPhaseTimings::degen_gc_update_roots:
 329       update_code_cache = true;
 330       break;
 331     default:
 332       ShouldNotReachHere();
 333   }
 334 
 335   ShenandoahGCPhase phase(root_phase);
 336 
 337 #if defined(COMPILER2) || INCLUDE_JVMCI
 338   DerivedPointerTable::clear();
 339 #endif
 340 
 341   uint nworkers = _heap->workers()->active_workers();
 342 
 343   ShenandoahRootProcessor root_proc(_heap, nworkers, root_phase);
 344   ShenandoahUpdateRootsTask update_roots(&root_proc, update_code_cache);
 345   _heap->workers()->run_task(&update_roots);
 346 
 347 #if defined(COMPILER2) || INCLUDE_JVMCI
 348   DerivedPointerTable::update_pointers();
 349 #endif
 350 }
 351 
 352 void ShenandoahConcurrentMark::initialize(uint workers) {
 353   _heap = ShenandoahHeap::heap();
 354 
 355   uint num_queues = MAX2(workers, 1U);
 356 
 357   _task_queues = new ShenandoahObjToScanQueueSet((int) num_queues);
 358 
 359   for (uint i = 0; i < num_queues; ++i) {
 360     ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue();
 361     task_queue->initialize();
 362     _task_queues->register_queue(i, task_queue);
 363   }
 364 
 365   ShenandoahBarrierSet::satb_mark_queue_set().set_buffer_size(ShenandoahSATBBufferSize);
 366 }
 367 
 368 void ShenandoahConcurrentMark::concurrent_scan_code_roots(uint worker_id, ReferenceProcessor* rp) {
 369   if (ShenandoahConcurrentScanCodeRoots && claim_codecache()) {
 370     ShenandoahObjToScanQueue* q = task_queues()->queue(worker_id);
 371     if (!_heap->unload_classes()) {
 372       MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 373       // TODO: We can not honor StringDeduplication here, due to lock ranking
 374       // inversion. So, we may miss some deduplication candidates.
 375       if (_heap->has_forwarded_objects()) {
 376         ShenandoahMarkResolveRefsClosure cl(q, rp);
 377         CodeBlobToOopClosure blobs(&cl, !CodeBlobToOopClosure::FixRelocations);
 378         CodeCache::blobs_do(&blobs);
 379       } else {
 380         ShenandoahMarkRefsClosure cl(q, rp);
 381         CodeBlobToOopClosure blobs(&cl, !CodeBlobToOopClosure::FixRelocations);
 382         CodeCache::blobs_do(&blobs);
 383       }
 384     }
 385   }
 386 }
 387 
 388 void ShenandoahConcurrentMark::mark_from_roots() {
 389   WorkGang* workers = _heap->workers();
 390   uint nworkers = workers->active_workers();
 391 
 392   ShenandoahGCPhase conc_mark_phase(ShenandoahPhaseTimings::conc_mark);
 393 
 394   if (_heap->process_references()) {
 395     ReferenceProcessor* rp = _heap->ref_processor();
 396     rp->set_active_mt_degree(nworkers);
 397 
 398     // enable ("weak") refs discovery
 399     rp->enable_discovery(true /*verify_no_refs*/);
 400     rp->setup_policy(_heap->soft_ref_policy()->should_clear_all_soft_refs());
 401   }
 402 
 403   shenandoah_assert_rp_isalive_not_installed();
 404   ShenandoahIsAliveSelector is_alive;
 405   ReferenceProcessorIsAliveMutator fix_isalive(_heap->ref_processor(), is_alive.is_alive_closure());
 406 
 407   task_queues()->reserve(nworkers);
 408 
 409   {
 410     ShenandoahTerminationTracker term(ShenandoahPhaseTimings::conc_termination);
 411     ShenandoahTaskTerminator terminator(nworkers, task_queues());
 412     ShenandoahConcurrentMarkingTask task(this, &terminator);
 413     workers->run_task(&task);
 414   }
 415 
 416   assert(task_queues()->is_empty() || _heap->cancelled_gc(), "Should be empty when not cancelled");
 417 }
 418 
 419 void ShenandoahConcurrentMark::finish_mark_from_roots(bool full_gc) {
 420   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 421 
 422   uint nworkers = _heap->workers()->active_workers();
 423 
 424   // Finally mark everything else we've got in our queues during the previous steps.
 425   // It does two different things for concurrent vs. mark-compact GC:
 426   // - For concurrent GC, it starts with empty task queues, drains the remaining
 427   //   SATB buffers, and then completes the marking closure.
 428   // - For mark-compact GC, it starts out with the task queues seeded by initial
 429   //   root scan, and completes the closure, thus marking through all live objects
 430   // The implementation is the same, so it's shared here.
 431   {
 432     ShenandoahGCPhase phase(full_gc ?
 433                             ShenandoahPhaseTimings::full_gc_mark_finish_queues :
 434                             ShenandoahPhaseTimings::finish_queues);
 435     task_queues()->reserve(nworkers);
 436 
 437     shenandoah_assert_rp_isalive_not_installed();
 438     ShenandoahIsAliveSelector is_alive;
 439     ReferenceProcessorIsAliveMutator fix_isalive(_heap->ref_processor(), is_alive.is_alive_closure());
 440 
 441     ShenandoahTerminationTracker termination_tracker(full_gc ?
 442                                                      ShenandoahPhaseTimings::full_gc_mark_termination :
 443                                                      ShenandoahPhaseTimings::termination);
 444 
 445     StrongRootsScope scope(nworkers);
 446     ShenandoahTaskTerminator terminator(nworkers, task_queues());
 447     ShenandoahFinalMarkingTask task(this, &terminator, ShenandoahStringDedup::is_enabled());
 448     _heap->workers()->run_task(&task);
 449   }
 450 
 451   assert(task_queues()->is_empty(), "Should be empty");
 452 
 453   // When we're done marking everything, we process weak references.
 454   if (_heap->process_references()) {
 455     weak_refs_work(full_gc);
 456   }
 457 
 458   weak_roots_work();
 459 
 460   // And finally finish class unloading
 461   if (_heap->unload_classes()) {
 462     _heap->unload_classes_and_cleanup_tables(full_gc);
 463   }
 464   if (ShenandoahStringDedup::is_enabled()) {
 465     ShenandoahIsAliveSelector alive;
 466     BoolObjectClosure* is_alive = alive.is_alive_closure();
 467     ShenandoahStringDedup::unlink_or_oops_do(is_alive, NULL, false);
 468   }
 469   assert(task_queues()->is_empty(), "Should be empty");
 470   TASKQUEUE_STATS_ONLY(task_queues()->print_taskqueue_stats());
 471   TASKQUEUE_STATS_ONLY(task_queues()->reset_taskqueue_stats());
 472 
 473   // Resize Metaspace
 474   MetaspaceGC::compute_new_size();
 475 }
 476 
 477 // Weak Reference Closures
 478 class ShenandoahCMDrainMarkingStackClosure: public VoidClosure {
 479   uint _worker_id;
 480   ShenandoahTaskTerminator* _terminator;
 481   bool _reset_terminator;
 482 
 483 public:
 484   ShenandoahCMDrainMarkingStackClosure(uint worker_id, ShenandoahTaskTerminator* t, bool reset_terminator = false):
 485     _worker_id(worker_id),
 486     _terminator(t),
 487     _reset_terminator(reset_terminator) {
 488   }
 489 
 490   void do_void() {
 491     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 492 
 493     ShenandoahHeap* sh = ShenandoahHeap::heap();
 494     ShenandoahConcurrentMark* scm = sh->concurrent_mark();
 495     assert(sh->process_references(), "why else would we be here?");
 496     ReferenceProcessor* rp = sh->ref_processor();
 497 
 498     shenandoah_assert_rp_isalive_installed();
 499 
 500     scm->mark_loop(_worker_id, _terminator, rp,
 501                    false,   // not cancellable
 502                    false);  // do not do strdedup
 503 
 504     if (_reset_terminator) {
 505       _terminator->reset_for_reuse();
 506     }
 507   }
 508 };
 509 
 510 class ShenandoahCMKeepAliveClosure : public OopClosure {
 511 private:
 512   ShenandoahObjToScanQueue* _queue;
 513   ShenandoahHeap* _heap;
 514   ShenandoahMarkingContext* const _mark_context;
 515 
 516   template <class T>
 517   inline void do_oop_work(T* p) {
 518     ShenandoahConcurrentMark::mark_through_ref<T, NONE, NO_DEDUP>(p, _heap, _queue, _mark_context);
 519   }
 520 
 521 public:
 522   ShenandoahCMKeepAliveClosure(ShenandoahObjToScanQueue* q) :
 523     _queue(q),
 524     _heap(ShenandoahHeap::heap()),
 525     _mark_context(_heap->marking_context()) {}
 526 
 527   void do_oop(narrowOop* p) { do_oop_work(p); }
 528   void do_oop(oop* p)       { do_oop_work(p); }
 529 };
 530 
 531 class ShenandoahCMKeepAliveUpdateClosure : public OopClosure {
 532 private:
 533   ShenandoahObjToScanQueue* _queue;
 534   ShenandoahHeap* _heap;
 535   ShenandoahMarkingContext* const _mark_context;
 536 
 537   template <class T>
 538   inline void do_oop_work(T* p) {
 539     ShenandoahConcurrentMark::mark_through_ref<T, SIMPLE, NO_DEDUP>(p, _heap, _queue, _mark_context);
 540   }
 541 
 542 public:
 543   ShenandoahCMKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) :
 544     _queue(q),
 545     _heap(ShenandoahHeap::heap()),
 546     _mark_context(_heap->marking_context()) {}
 547 
 548   void do_oop(narrowOop* p) { do_oop_work(p); }
 549   void do_oop(oop* p)       { do_oop_work(p); }
 550 };
 551 
 552 class ShenandoahWeakUpdateClosure : public OopClosure {
 553 private:
 554   ShenandoahHeap* const _heap;
 555 
 556   template <class T>
 557   inline void do_oop_work(T* p) {
 558     oop o = _heap->maybe_update_with_forwarded(p);
 559     shenandoah_assert_marked_except(p, o, o == NULL);
 560   }
 561 
 562 public:
 563   ShenandoahWeakUpdateClosure() : _heap(ShenandoahHeap::heap()) {}
 564 
 565   void do_oop(narrowOop* p) { do_oop_work(p); }
 566   void do_oop(oop* p)       { do_oop_work(p); }
 567 };
 568 
 569 class ShenandoahWeakAssertNotForwardedClosure : public OopClosure {
 570 private:
 571   template <class T>
 572   inline void do_oop_work(T* p) {
 573 #ifdef ASSERT
 574     T o = RawAccess<>::oop_load(p);
 575     if (!CompressedOops::is_null(o)) {
 576       oop obj = CompressedOops::decode_not_null(o);
 577       shenandoah_assert_not_forwarded(p, obj);
 578     }
 579 #endif
 580   }
 581 
 582 public:
 583   ShenandoahWeakAssertNotForwardedClosure() {}
 584 
 585   void do_oop(narrowOop* p) { do_oop_work(p); }
 586   void do_oop(oop* p)       { do_oop_work(p); }
 587 };
 588 
 589 class ShenandoahRefProcTaskProxy : public AbstractGangTask {
 590 private:
 591   AbstractRefProcTaskExecutor::ProcessTask& _proc_task;
 592   ShenandoahTaskTerminator* _terminator;
 593 
 594 public:
 595   ShenandoahRefProcTaskProxy(AbstractRefProcTaskExecutor::ProcessTask& proc_task,
 596                              ShenandoahTaskTerminator* t) :
 597     AbstractGangTask("Process reference objects in parallel"),
 598     _proc_task(proc_task),
 599     _terminator(t) {
 600   }
 601 
 602   void work(uint worker_id) {
 603     ResourceMark rm;
 604     HandleMark hm;
 605     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 606     ShenandoahHeap* heap = ShenandoahHeap::heap();
 607     ShenandoahCMDrainMarkingStackClosure complete_gc(worker_id, _terminator);
 608     if (heap->has_forwarded_objects()) {
 609       ShenandoahForwardedIsAliveClosure is_alive;
 610       ShenandoahCMKeepAliveUpdateClosure keep_alive(heap->concurrent_mark()->get_queue(worker_id));
 611       _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
 612     } else {
 613       ShenandoahIsAliveClosure is_alive;
 614       ShenandoahCMKeepAliveClosure keep_alive(heap->concurrent_mark()->get_queue(worker_id));
 615       _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
 616     }
 617   }
 618 };
 619 
 620 class ShenandoahRefProcTaskExecutor : public AbstractRefProcTaskExecutor {
 621 private:
 622   WorkGang* _workers;
 623 
 624 public:
 625   ShenandoahRefProcTaskExecutor(WorkGang* workers) :
 626     _workers(workers) {
 627   }
 628 
 629   // Executes a task using worker threads.
 630   void execute(ProcessTask& task, uint ergo_workers) {
 631     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 632 
 633     ShenandoahHeap* heap = ShenandoahHeap::heap();
 634     ShenandoahConcurrentMark* cm = heap->concurrent_mark();
 635     ShenandoahPushWorkerQueuesScope scope(_workers, cm->task_queues(),
 636                                           ergo_workers,
 637                                           /* do_check = */ false);
 638     uint nworkers = _workers->active_workers();
 639     cm->task_queues()->reserve(nworkers);
 640     ShenandoahTaskTerminator terminator(nworkers, cm->task_queues());
 641     ShenandoahRefProcTaskProxy proc_task_proxy(task, &terminator);
 642     _workers->run_task(&proc_task_proxy);
 643   }
 644 };
 645 
 646 void ShenandoahConcurrentMark::weak_refs_work(bool full_gc) {
 647   assert(_heap->process_references(), "sanity");
 648 
 649   ShenandoahPhaseTimings::Phase phase_root =
 650           full_gc ?
 651           ShenandoahPhaseTimings::full_gc_weakrefs :
 652           ShenandoahPhaseTimings::weakrefs;
 653 
 654   ShenandoahGCPhase phase(phase_root);
 655 
 656   ReferenceProcessor* rp = _heap->ref_processor();
 657 
 658   // NOTE: We cannot shortcut on has_discovered_references() here, because
 659   // we will miss marking JNI Weak refs then, see implementation in
 660   // ReferenceProcessor::process_discovered_references.
 661   weak_refs_work_doit(full_gc);
 662 
 663   rp->verify_no_references_recorded();
 664   assert(!rp->discovery_enabled(), "Post condition");
 665 
 666 }
 667 
 668 // Process leftover weak oops: update them, if needed or assert they do not
 669 // need updating otherwise.
 670 // Weak processor API requires us to visit the oops, even if we are not doing
 671 // anything to them.
 672 void ShenandoahConcurrentMark::weak_roots_work() {
 673   ShenandoahIsAliveSelector is_alive;
 674 
 675   if (_heap->has_forwarded_objects()) {
 676     ShenandoahWeakUpdateClosure cl;
 677     WeakProcessor::weak_oops_do(is_alive.is_alive_closure(), &cl);
 678   } else {
 679     ShenandoahWeakAssertNotForwardedClosure cl;
 680     WeakProcessor::weak_oops_do(is_alive.is_alive_closure(), &cl);
 681   }
 682 }
 683 
 684 void ShenandoahConcurrentMark::weak_refs_work_doit(bool full_gc) {
 685   ReferenceProcessor* rp = _heap->ref_processor();
 686 
 687   ShenandoahPhaseTimings::Phase phase_process =
 688           full_gc ?
 689           ShenandoahPhaseTimings::full_gc_weakrefs_process :
 690           ShenandoahPhaseTimings::weakrefs_process;
 691 
 692   ShenandoahPhaseTimings::Phase phase_process_termination =
 693           full_gc ?
 694           ShenandoahPhaseTimings::full_gc_weakrefs_termination :
 695           ShenandoahPhaseTimings::weakrefs_termination;
 696 
 697   shenandoah_assert_rp_isalive_not_installed();
 698   ShenandoahIsAliveSelector is_alive;
 699   ReferenceProcessorIsAliveMutator fix_isalive(rp, is_alive.is_alive_closure());
 700 
 701   WorkGang* workers = _heap->workers();
 702   uint nworkers = workers->active_workers();
 703 
 704   rp->setup_policy(_heap->soft_ref_policy()->should_clear_all_soft_refs());
 705   rp->set_active_mt_degree(nworkers);
 706 
 707   assert(task_queues()->is_empty(), "Should be empty");
 708 
 709   // complete_gc and keep_alive closures instantiated here are only needed for
 710   // single-threaded path in RP. They share the queue 0 for tracking work, which
 711   // simplifies implementation. Since RP may decide to call complete_gc several
 712   // times, we need to be able to reuse the terminator.
 713   uint serial_worker_id = 0;
 714   ShenandoahTaskTerminator terminator(1, task_queues());
 715   ShenandoahCMDrainMarkingStackClosure complete_gc(serial_worker_id, &terminator, /* reset_terminator = */ true);
 716 
 717   ShenandoahRefProcTaskExecutor executor(workers);
 718 
 719   ReferenceProcessorPhaseTimes pt(_heap->gc_timer(), rp->num_queues());
 720 
 721   {
 722     ShenandoahGCPhase phase(phase_process);
 723     ShenandoahTerminationTracker phase_term(phase_process_termination);
 724 
 725     if (_heap->has_forwarded_objects()) {
 726       ShenandoahCMKeepAliveUpdateClosure keep_alive(get_queue(serial_worker_id));
 727       rp->process_discovered_references(is_alive.is_alive_closure(), &keep_alive,
 728                                         &complete_gc, &executor,
 729                                         &pt);
 730 
 731     } else {
 732       ShenandoahCMKeepAliveClosure keep_alive(get_queue(serial_worker_id));
 733       rp->process_discovered_references(is_alive.is_alive_closure(), &keep_alive,
 734                                         &complete_gc, &executor,
 735                                         &pt);
 736 
 737     }
 738 
 739     pt.print_all_references();
 740 
 741     assert(task_queues()->is_empty(), "Should be empty");
 742   }
 743 }
 744 
 745 class ShenandoahCancelledGCYieldClosure : public YieldClosure {
 746 private:
 747   ShenandoahHeap* const _heap;
 748 public:
 749   ShenandoahCancelledGCYieldClosure() : _heap(ShenandoahHeap::heap()) {};
 750   virtual bool should_return() { return _heap->cancelled_gc(); }
 751 };
 752 
 753 class ShenandoahPrecleanCompleteGCClosure : public VoidClosure {
 754 public:
 755   void do_void() {
 756     ShenandoahHeap* sh = ShenandoahHeap::heap();
 757     ShenandoahConcurrentMark* scm = sh->concurrent_mark();
 758     assert(sh->process_references(), "why else would we be here?");
 759     ShenandoahTaskTerminator terminator(1, scm->task_queues());
 760 
 761     ReferenceProcessor* rp = sh->ref_processor();
 762     shenandoah_assert_rp_isalive_installed();
 763 
 764     scm->mark_loop(0, &terminator, rp,
 765                    false, // not cancellable
 766                    false); // do not do strdedup
 767   }
 768 };
 769 
 770 class ShenandoahPrecleanKeepAliveUpdateClosure : public OopClosure {
 771 private:
 772   ShenandoahObjToScanQueue* _queue;
 773   ShenandoahHeap* _heap;
 774   ShenandoahMarkingContext* const _mark_context;
 775 
 776   template <class T>
 777   inline void do_oop_work(T* p) {
 778     ShenandoahConcurrentMark::mark_through_ref<T, CONCURRENT, NO_DEDUP>(p, _heap, _queue, _mark_context);
 779   }
 780 
 781 public:
 782   ShenandoahPrecleanKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) :
 783     _queue(q),
 784     _heap(ShenandoahHeap::heap()),
 785     _mark_context(_heap->marking_context()) {}
 786 
 787   void do_oop(narrowOop* p) { do_oop_work(p); }
 788   void do_oop(oop* p)       { do_oop_work(p); }
 789 };
 790 
 791 class ShenandoahPrecleanTask : public AbstractGangTask {
 792 private:
 793   ReferenceProcessor* _rp;
 794 
 795 public:
 796   ShenandoahPrecleanTask(ReferenceProcessor* rp) :
 797           AbstractGangTask("Precleaning task"),
 798           _rp(rp) {}
 799 
 800   void work(uint worker_id) {
 801     assert(worker_id == 0, "The code below is single-threaded, only one worker is expected");
 802     ShenandoahParallelWorkerSession worker_session(worker_id);
 803 
 804     ShenandoahHeap* sh = ShenandoahHeap::heap();
 805 
 806     ShenandoahObjToScanQueue* q = sh->concurrent_mark()->get_queue(worker_id);
 807 
 808     ShenandoahCancelledGCYieldClosure yield;
 809     ShenandoahPrecleanCompleteGCClosure complete_gc;
 810 
 811     if (sh->has_forwarded_objects()) {
 812       ShenandoahForwardedIsAliveClosure is_alive;
 813       ShenandoahPrecleanKeepAliveUpdateClosure keep_alive(q);
 814       ResourceMark rm;
 815       _rp->preclean_discovered_references(&is_alive, &keep_alive,
 816                                           &complete_gc, &yield,
 817                                           NULL);
 818     } else {
 819       ShenandoahIsAliveClosure is_alive;
 820       ShenandoahCMKeepAliveClosure keep_alive(q);
 821       ResourceMark rm;
 822       _rp->preclean_discovered_references(&is_alive, &keep_alive,
 823                                           &complete_gc, &yield,
 824                                           NULL);
 825     }
 826   }
 827 };
 828 
 829 void ShenandoahConcurrentMark::preclean_weak_refs() {
 830   // Pre-cleaning weak references before diving into STW makes sense at the
 831   // end of concurrent mark. This will filter out the references which referents
 832   // are alive. Note that ReferenceProcessor already filters out these on reference
 833   // discovery, and the bulk of work is done here. This phase processes leftovers
 834   // that missed the initial filtering, i.e. when referent was marked alive after
 835   // reference was discovered by RP.
 836 
 837   assert(_heap->process_references(), "sanity");
 838 
 839   // Shortcut if no references were discovered to avoid winding up threads.
 840   ReferenceProcessor* rp = _heap->ref_processor();
 841   if (!rp->has_discovered_references()) {
 842     return;
 843   }
 844 
 845   assert(task_queues()->is_empty(), "Should be empty");
 846 
 847   ReferenceProcessorMTDiscoveryMutator fix_mt_discovery(rp, false);
 848 
 849   shenandoah_assert_rp_isalive_not_installed();
 850   ShenandoahIsAliveSelector is_alive;
 851   ReferenceProcessorIsAliveMutator fix_isalive(rp, is_alive.is_alive_closure());
 852 
 853   // Execute precleaning in the worker thread: it will give us GCLABs, String dedup
 854   // queues and other goodies. When upstream ReferenceProcessor starts supporting
 855   // parallel precleans, we can extend this to more threads.
 856   WorkGang* workers = _heap->workers();
 857   uint nworkers = workers->active_workers();
 858   assert(nworkers == 1, "This code uses only a single worker");
 859   task_queues()->reserve(nworkers);
 860 
 861   ShenandoahPrecleanTask task(rp);
 862   workers->run_task(&task);
 863 
 864   assert(task_queues()->is_empty(), "Should be empty");
 865 }
 866 
 867 void ShenandoahConcurrentMark::cancel() {
 868   // Clean up marking stacks.
 869   ShenandoahObjToScanQueueSet* queues = task_queues();
 870   queues->clear();
 871 
 872   // Cancel SATB buffers.
 873   ShenandoahBarrierSet::satb_mark_queue_set().abandon_partial_marking();
 874 }
 875 
 876 ShenandoahObjToScanQueue* ShenandoahConcurrentMark::get_queue(uint worker_id) {
 877   assert(task_queues()->get_reserved() > worker_id, "No reserved queue for worker id: %d", worker_id);
 878   return _task_queues->queue(worker_id);
 879 }
 880 
 881 template <bool CANCELLABLE>
 882 void ShenandoahConcurrentMark::mark_loop_prework(uint w, ShenandoahTaskTerminator *t, ReferenceProcessor *rp,
 883                                                  bool strdedup) {
 884   ShenandoahObjToScanQueue* q = get_queue(w);
 885 
 886   jushort* ld = _heap->get_liveness_cache(w);
 887 
 888   // TODO: We can clean up this if we figure out how to do templated oop closures that
 889   // play nice with specialized_oop_iterators.
 890   if (_heap->unload_classes()) {
 891     if (_heap->has_forwarded_objects()) {
 892       if (strdedup) {
 893         ShenandoahMarkUpdateRefsMetadataDedupClosure cl(q, rp);
 894         mark_loop_work<ShenandoahMarkUpdateRefsMetadataDedupClosure, CANCELLABLE>(&cl, ld, w, t);
 895       } else {
 896         ShenandoahMarkUpdateRefsMetadataClosure cl(q, rp);
 897         mark_loop_work<ShenandoahMarkUpdateRefsMetadataClosure, CANCELLABLE>(&cl, ld, w, t);
 898       }
 899     } else {
 900       if (strdedup) {
 901         ShenandoahMarkRefsMetadataDedupClosure cl(q, rp);
 902         mark_loop_work<ShenandoahMarkRefsMetadataDedupClosure, CANCELLABLE>(&cl, ld, w, t);
 903       } else {
 904         ShenandoahMarkRefsMetadataClosure cl(q, rp);
 905         mark_loop_work<ShenandoahMarkRefsMetadataClosure, CANCELLABLE>(&cl, ld, w, t);
 906       }
 907     }
 908   } else {
 909     if (_heap->has_forwarded_objects()) {
 910       if (strdedup) {
 911         ShenandoahMarkUpdateRefsDedupClosure cl(q, rp);
 912         mark_loop_work<ShenandoahMarkUpdateRefsDedupClosure, CANCELLABLE>(&cl, ld, w, t);
 913       } else {
 914         ShenandoahMarkUpdateRefsClosure cl(q, rp);
 915         mark_loop_work<ShenandoahMarkUpdateRefsClosure, CANCELLABLE>(&cl, ld, w, t);
 916       }
 917     } else {
 918       if (strdedup) {
 919         ShenandoahMarkRefsDedupClosure cl(q, rp);
 920         mark_loop_work<ShenandoahMarkRefsDedupClosure, CANCELLABLE>(&cl, ld, w, t);
 921       } else {
 922         ShenandoahMarkRefsClosure cl(q, rp);
 923         mark_loop_work<ShenandoahMarkRefsClosure, CANCELLABLE>(&cl, ld, w, t);
 924       }
 925     }
 926   }
 927 
 928   _heap->flush_liveness_cache(w);
 929 }
 930 
 931 template <class T, bool CANCELLABLE>
 932 void ShenandoahConcurrentMark::mark_loop_work(T* cl, jushort* live_data, uint worker_id, ShenandoahTaskTerminator *terminator) {
 933   int seed = 17;
 934   uintx stride = ShenandoahMarkLoopStride;
 935 
 936   ShenandoahHeap* heap = ShenandoahHeap::heap();
 937   ShenandoahObjToScanQueueSet* queues = task_queues();
 938   ShenandoahObjToScanQueue* q;
 939   ShenandoahMarkTask t;
 940 
 941   /*
 942    * Process outstanding queues, if any.
 943    *
 944    * There can be more queues than workers. To deal with the imbalance, we claim
 945    * extra queues first. Since marking can push new tasks into the queue associated
 946    * with this worker id, we come back to process this queue in the normal loop.
 947    */
 948   assert(queues->get_reserved() == heap->workers()->active_workers(),
 949          "Need to reserve proper number of queues: reserved: %u, active: %u", queues->get_reserved(), heap->workers()->active_workers());
 950 
 951   q = queues->claim_next();
 952   while (q != NULL) {
 953     if (CANCELLABLE && heap->check_cancelled_gc_and_yield()) {
 954       return;
 955     }
 956 
 957     for (uint i = 0; i < stride; i++) {
 958       if (q->pop(t)) {
 959         do_task<T>(q, cl, live_data, &t);
 960       } else {
 961         assert(q->is_empty(), "Must be empty");
 962         q = queues->claim_next();
 963         break;
 964       }
 965     }
 966   }
 967   q = get_queue(worker_id);
 968 
 969   ShenandoahSATBBufferClosure drain_satb(q);
 970   SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
 971 
 972   /*
 973    * Normal marking loop:
 974    */
 975   while (true) {
 976     if (CANCELLABLE && heap->check_cancelled_gc_and_yield()) {
 977       return;
 978     }
 979 
 980     while (satb_mq_set.completed_buffers_num() > 0) {
 981       satb_mq_set.apply_closure_to_completed_buffer(&drain_satb);
 982     }
 983 
 984     uint work = 0;
 985     for (uint i = 0; i < stride; i++) {
 986       if (q->pop(t) ||
 987           queues->steal(worker_id, &seed, t)) {
 988         do_task<T>(q, cl, live_data, &t);
 989         work++;
 990       } else {
 991         break;
 992       }
 993     }
 994 
 995     if (work == 0) {
 996       // No work encountered in current stride, try to terminate.
 997       // Need to leave the STS here otherwise it might block safepoints.
 998       ShenandoahSuspendibleThreadSetLeaver stsl(CANCELLABLE && ShenandoahSuspendibleWorkers);
 999       ShenandoahTerminationTimingsTracker term_tracker(worker_id);
1000       ShenandoahTerminatorTerminator tt(heap);
1001       if (terminator->offer_termination(&tt)) return;
1002     }
1003   }
1004 }
1005 
1006 bool ShenandoahConcurrentMark::claim_codecache() {
1007   assert(ShenandoahConcurrentScanCodeRoots, "must not be called otherwise");
1008   return _claimed_codecache.try_set();
1009 }
1010 
1011 void ShenandoahConcurrentMark::clear_claim_codecache() {
1012   assert(ShenandoahConcurrentScanCodeRoots, "must not be called otherwise");
1013   _claimed_codecache.unset();
1014 }