1 /*
   2  * Copyright (c) 2013, 2015, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "classfile/stringTable.hpp"
  25 #include "gc/shared/gcTimer.hpp"
  26 #include "gc/shared/isGCActiveMark.hpp"
  27 #include "gc/shared/strongRootsScope.hpp"
  28 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  29 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
  30 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  31 #include "gc/shenandoah/shenandoahRootProcessor.hpp"
  32 #include "gc/shenandoah/brooksPointer.hpp"
  33 #include "gc/shared/referenceProcessor.hpp"
  34 #include "code/codeCache.hpp"
  35 #include "classfile/symbolTable.hpp"
  36 #include "classfile/systemDictionary.hpp"
  37 #include "memory/iterator.inline.hpp"
  38 #include "oops/oop.inline.hpp"
  39 #include "gc/shared/taskqueue.inline.hpp"
  40 
  41 // Mark the object and add it to the queue to be scanned
  42 ShenandoahMarkObjsClosure::ShenandoahMarkObjsClosure(SCMObjToScanQueue* q, bool update_refs) :
  43   _heap((ShenandoahHeap*)(Universe::heap())),
  44   _mark_refs(ShenandoahMarkRefsClosure(q, update_refs)),
  45   _live_data(NEW_C_HEAP_ARRAY(size_t, _heap->max_regions(), mtGC))
  46 {
  47   Copy::zero_to_bytes(_live_data, _heap->max_regions() * sizeof(size_t));
  48 }
  49 
  50 ShenandoahMarkObjsClosure::~ShenandoahMarkObjsClosure() {
  51   // Merge liveness data back into actual regions.
  52 
  53   // We need to lock the heap here, to avoid race with growing of heap.
  54   MutexLockerEx ml(ShenandoahHeap_lock, true);
  55   ShenandoahHeapRegion** regions = _heap->heap_regions();
  56   for (uint i = 0; i < _heap->num_regions(); i++) {
  57     regions[i]->increase_live_data(_live_data[i]);
  58   }
  59   FREE_C_HEAP_ARRAY(size_t, _live_data);
  60 }
  61 
  62 ShenandoahMarkRefsClosure::ShenandoahMarkRefsClosure(SCMObjToScanQueue* q, bool update_refs) :
  63   MetadataAwareOopClosure(((ShenandoahHeap *) Universe::heap())->ref_processor()),
  64   _queue(q),
  65   _heap((ShenandoahHeap*) Universe::heap()),
  66   _scm(_heap->concurrentMark()),
  67   _update_refs(update_refs)
  68 {
  69 }
  70 
  71 void ShenandoahMarkRefsClosure::do_oop(narrowOop* p) {
  72   Unimplemented();
  73 }
  74 
  75 
  76 // Walks over all the objects in the generation updating any
  77 // references to from space.
  78 
  79 class CLDMarkAliveClosure : public CLDClosure {
  80 private:
  81   CLDClosure* _cl;
  82 public:
  83   CLDMarkAliveClosure(CLDClosure* cl) : _cl(cl) {
  84   }
  85   void do_cld(ClassLoaderData* cld) {
  86     ShenandoahIsAliveClosure is_alive;
  87     if (cld->is_alive(&is_alive)) {
  88       _cl->do_cld(cld);
  89     }
  90   }
  91 };
  92 
  93 class ShenandoahMarkRootsTask : public AbstractGangTask {
  94 private:
  95   ShenandoahRootProcessor* _rp;
  96   bool _update_refs;
  97 public:
  98   ShenandoahMarkRootsTask(ShenandoahRootProcessor* rp, bool update_refs) :
  99     AbstractGangTask("Shenandoah update roots task"), _update_refs(update_refs),
 100     _rp(rp) {
 101   }
 102 
 103   void work(uint worker_id) {
 104     // tty->print_cr("start mark roots worker: "INT32_FORMAT, worker_id);
 105     ShenandoahHeap* heap = ShenandoahHeap::heap();
 106     SCMObjToScanQueue* q = heap->concurrentMark()->get_queue(worker_id);
 107     ShenandoahMarkRefsClosure cl(q, _update_refs);
 108 
 109     CodeBlobToOopClosure blobsCl(&cl, true);
 110     CLDToOopClosure cldCl(&cl);
 111 
 112     ResourceMark m;
 113     if (ShenandoahProcessReferences && ClassUnloadingWithConcurrentMark) {
 114       _rp->process_strong_roots(&cl, &cldCl, &blobsCl);
 115     } else {
 116       _rp->process_all_roots(&cl, &cldCl, &blobsCl);
 117     }
 118     // tty->print_cr("finish mark roots worker: "INT32_FORMAT, worker_id);
 119   }
 120 };
 121 
 122 class SCMConcurrentMarkingTask : public AbstractGangTask {
 123 private:
 124   ShenandoahConcurrentMark* _cm;
 125   ParallelTaskTerminator* _terminator;
 126   int _seed;
 127   bool _update_refs;
 128 
 129 public:
 130   SCMConcurrentMarkingTask(ShenandoahConcurrentMark* cm, ParallelTaskTerminator* terminator, bool update_refs) :
 131     AbstractGangTask("Root Region Scan"), _cm(cm), _terminator(terminator), _update_refs(update_refs), _seed(17) {
 132   }
 133 
 134 
 135   void work(uint worker_id) {
 136 
 137     SCMObjToScanQueue* q = _cm->get_queue(worker_id);
 138     ShenandoahMarkObjsClosure cl(q, _update_refs);
 139     ShenandoahHeap* heap = ShenandoahHeap::heap();
 140     while (true) {
 141       if (heap->cancelled_concgc() ||
 142           (!_cm->try_queue(q, &cl) &&
 143            !_cm->try_draining_an_satb_buffer(worker_id) &&
 144            !_cm->try_to_steal(worker_id, &cl, &_seed))
 145           ) {
 146         if (_terminator->offer_termination()) break;
 147       }
 148     }
 149     if (ShenandoahTracePhases && heap->cancelled_concgc()) {
 150       tty->print_cr("Cancelled concurrent marking");
 151     }
 152   }
 153 };
 154 
 155 void ShenandoahConcurrentMark::prepare_unmarked_root_objs() {
 156 
 157   ShenandoahHeap* heap = ShenandoahHeap::heap();
 158   bool update_refs = heap->need_update_refs();
 159 
 160   if (update_refs) {
 161     COMPILER2_PRESENT(DerivedPointerTable::clear());
 162   }
 163 
 164   prepare_unmarked_root_objs_no_derived_ptrs(update_refs);
 165 
 166   if (update_refs) {
 167     COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
 168   }
 169 
 170 }
 171 
 172 void ShenandoahConcurrentMark::prepare_unmarked_root_objs_no_derived_ptrs(bool update_refs) {
 173   assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
 174 
 175   ShenandoahHeap* heap = ShenandoahHeap::heap();
 176   if (ShenandoahParallelRootScan) {
 177 
 178     ClassLoaderDataGraph::clear_claimed_marks();
 179     heap->conc_workers()->set_active_workers(_max_conc_worker_id);
 180     ShenandoahRootProcessor root_proc(heap, _max_conc_worker_id);
 181     TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
 182     ShenandoahMarkRootsTask mark_roots(&root_proc, update_refs);
 183     heap->conc_workers()->run_task(&mark_roots);
 184 
 185     // Mark through any class loaders that have been found alive.
 186     ShenandoahMarkRefsClosure cl(get_queue(0), update_refs);
 187     CLDToOopClosure cldCl(&cl);
 188     CLDMarkAliveClosure cld_keep_alive(&cldCl);
 189     ClassLoaderDataGraph::roots_cld_do(NULL, &cld_keep_alive);
 190 
 191   } else {
 192     ShenandoahMarkRefsClosure cl(get_queue(0), update_refs);
 193     heap->roots_iterate(&cl);
 194   }
 195 
 196   if (!(ShenandoahProcessReferences && ClassUnloadingWithConcurrentMark)) {
 197     ShenandoahMarkRefsClosure cl(get_queue(0), update_refs);
 198     heap->weak_roots_iterate(&cl);
 199   }
 200 
 201   // tty->print_cr("all root marker threads done");
 202 }
 203 
 204 
 205 void ShenandoahConcurrentMark::initialize() {
 206   _max_conc_worker_id = MAX2((uint) ConcGCThreads, 1U);
 207   _task_queues = new SCMObjToScanQueueSet((int) _max_conc_worker_id);
 208 
 209   for (uint i = 0; i < _max_conc_worker_id; ++i) {
 210     SCMObjToScanQueue* task_queue = new SCMObjToScanQueue();
 211     task_queue->initialize();
 212     _task_queues->register_queue(i, task_queue);
 213   }
 214   JavaThread::satb_mark_queue_set().set_buffer_size(1014 /* G1SATBBufferSize */);
 215 }
 216 
 217 void ShenandoahConcurrentMark::mark_from_roots() {
 218   if (ShenandoahGCVerbose) {
 219     tty->print_cr("STOPPING THE WORLD: before marking");
 220     tty->print_cr("Starting markFromRoots");
 221   }
 222 
 223   ShenandoahHeap* sh = (ShenandoahHeap *) Universe::heap();
 224 
 225   bool update_refs = sh->need_update_refs();
 226 
 227   sh->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::conc_mark);
 228   ParallelTaskTerminator terminator(_max_conc_worker_id, _task_queues);
 229 
 230   if (ShenandoahProcessReferences) {
 231     ReferenceProcessor* rp = sh->ref_processor();
 232     // enable ("weak") refs discovery
 233     rp->enable_discovery(true /*verify_no_refs*/);
 234     rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle
 235   }
 236 
 237   SCMConcurrentMarkingTask markingTask = SCMConcurrentMarkingTask(this, &terminator, update_refs);
 238   sh->conc_workers()->set_active_workers(_max_conc_worker_id);
 239   sh->conc_workers()->run_task(&markingTask);
 240 
 241   if (ShenandoahGCVerbose) {
 242     tty->print("total workers = %u active workers = %u\n",
 243                sh->conc_workers()->total_workers(),
 244                sh->conc_workers()->active_workers());
 245     TASKQUEUE_STATS_ONLY(print_taskqueue_stats());
 246     TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
 247   }
 248 
 249   if (ShenandoahGCVerbose) {
 250     tty->print_cr("Finishing markFromRoots");
 251     tty->print_cr("RESUMING THE WORLD: after marking");
 252   }
 253 
 254   sh->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::conc_mark);
 255 }
 256 
 257 class FinishDrainSATBBuffersTask : public AbstractGangTask {
 258 private:
 259   ShenandoahConcurrentMark* _cm;
 260   ParallelTaskTerminator* _terminator;
 261 public:
 262   FinishDrainSATBBuffersTask(ShenandoahConcurrentMark* cm, ParallelTaskTerminator* terminator) :
 263     AbstractGangTask("Finish draining SATB buffers"), _cm(cm), _terminator(terminator) {
 264   }
 265 
 266   void work(uint worker_id) {
 267     _cm->drain_satb_buffers(worker_id, true);
 268   }
 269 };
 270 
 271 class ShenandoahUpdateAliveRefs : public OopClosure {
 272 private:
 273   ShenandoahHeap* _heap;
 274 public:
 275   ShenandoahUpdateAliveRefs() : _heap(ShenandoahHeap::heap()) {
 276   }
 277   virtual void do_oop(oop* p) {
 278     _heap->maybe_update_oop_ref(p);
 279   }
 280 
 281   virtual void do_oop(narrowOop* p) {
 282     Unimplemented();
 283   }
 284 };
 285 
 286 void ShenandoahConcurrentMark::finish_mark_from_roots() {
 287   if (ShenandoahGCVerbose) {
 288     tty->print_cr("Starting finishMarkFromRoots");
 289   }
 290 
 291   IsGCActiveMark is_active;
 292 
 293   ShenandoahHeap* sh = (ShenandoahHeap *) Universe::heap();
 294 
 295   // Trace any (new) unmarked root references.
 296   sh->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::rescan_roots);
 297   prepare_unmarked_root_objs();
 298   sh->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::rescan_roots);
 299   sh->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::drain_satb);
 300   {
 301     StrongRootsScope scope(_max_conc_worker_id);
 302     ParallelTaskTerminator terminator(_max_conc_worker_id, _task_queues);
 303     // drain_satb_buffers(0, true);
 304     FinishDrainSATBBuffersTask drain_satb_buffers(this, &terminator);
 305     sh->conc_workers()->set_active_workers(_max_conc_worker_id);
 306     sh->conc_workers()->run_task(&drain_satb_buffers);
 307     sh->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::drain_satb);
 308   }
 309 
 310   // Finally mark everything else we've got in our queues during the previous steps.
 311   {
 312     sh->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::drain_queues);
 313     ParallelTaskTerminator terminator(_max_conc_worker_id, _task_queues);
 314     SCMConcurrentMarkingTask markingTask = SCMConcurrentMarkingTask(this, &terminator, sh->need_update_refs());
 315     sh->conc_workers()->set_active_workers(_max_conc_worker_id);
 316     sh->conc_workers()->run_task(&markingTask);
 317     sh->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::drain_queues);
 318   }
 319 
 320 #ifdef ASSERT
 321   for (int i = 0; i < (int) _max_conc_worker_id; i++) {
 322     assert(_task_queues->queue(i)->is_empty(), "Should be empty");
 323   }
 324 #endif
 325 
 326   // When we're done marking everything, we process weak references.
 327   if (ShenandoahProcessReferences) {
 328     sh->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::weakrefs);
 329     weak_refs_work();
 330     sh->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::weakrefs);
 331   }
 332 
 333 #ifdef ASSERT
 334   for (int i = 0; i < (int) _max_conc_worker_id; i++) {
 335     assert(_task_queues->queue(i)->is_empty(), "Should be empty");
 336   }
 337 #endif
 338 
 339   if (ShenandoahGCVerbose) {
 340     tty->print_cr("Finishing finishMarkFromRoots");
 341 #ifdef SLOWDEBUG
 342     for (int i = 0; i <(int)_max_conc_worker_id; i++) {
 343       tty->print("Queue: "INT32_FORMAT":", i);
 344       _task_queues->queue(i)->stats.print(tty, 10);
 345       tty->cr();
 346       _task_queues->queue(i)->stats.verify();
 347     }
 348 #endif
 349   }
 350 
 351   // We still need to update (without marking) alive refs in JNI handles.
 352   if (ShenandoahProcessReferences && ClassUnloadingWithConcurrentMark) {
 353     ShenandoahUpdateAliveRefs cl;
 354     ShenandoahIsAliveClosure is_alive;
 355     JNIHandles::weak_oops_do(&is_alive, &cl);
 356   }
 357 
 358 #ifdef ASSERT
 359   verify_roots();
 360 
 361   if (ShenandoahDumpHeapAfterConcurrentMark) {
 362     sh->ensure_parsability(false);
 363     sh->print_all_refs("post-mark");
 364   }
 365 #endif
 366 }
 367 
 368 #ifdef ASSERT
 369 void ShenandoahVerifyRootsClosure1::do_oop(oop* p) {
 370   oop obj = oopDesc::load_heap_oop(p);
 371   if (! oopDesc::is_null(obj)) {
 372     guarantee(ShenandoahHeap::heap()->is_marked_current(obj), "oop must be marked");
 373     guarantee(obj == ShenandoahBarrierSet::resolve_oop_static_not_null(obj), "oop must not be forwarded");
 374   }
 375 }
 376 
 377 void ShenandoahConcurrentMark::verify_roots() {
 378   ShenandoahVerifyRootsClosure1 cl;
 379   CodeBlobToOopClosure blobsCl(&cl, true);
 380   CLDToOopClosure cldCl(&cl);
 381   ClassLoaderDataGraph::clear_claimed_marks();
 382   ShenandoahRootProcessor rp(ShenandoahHeap::heap(), 1);
 383   rp.process_roots(&cl, &cl, &cldCl, &cldCl, &cldCl, &blobsCl);
 384 }
 385 #endif
 386 
 387 class ShenandoahSATBBufferClosure : public SATBBufferClosure {
 388 private:
 389   SCMObjToScanQueue* _queue;
 390   ShenandoahHeap* _heap;
 391 public:
 392   ShenandoahSATBBufferClosure(SCMObjToScanQueue* q) :
 393     _queue(q), _heap(ShenandoahHeap::heap())
 394   {
 395   }
 396 
 397   void do_buffer(void** buffer, size_t size) {
 398     // tty->print_cr("draining one satb buffer");
 399     for (size_t i = 0; i < size; ++i) {
 400       void* entry = buffer[i];
 401       oop obj = oop(entry);
 402       // tty->print_cr("satb buffer entry: "PTR_FORMAT, p2i((HeapWord*) obj));
 403       if (!oopDesc::is_null(obj)) {
 404         obj = ShenandoahBarrierSet::resolve_oop_static_not_null(obj);
 405         if (_heap->mark_current(obj)) {
 406           bool pushed = _queue->push(obj);
 407           assert(pushed, "overflow queue should always succeed pushing");
 408         }
 409       }
 410     }
 411   }
 412 };
 413 
 414 class ShenandoahSATBThreadsClosure : public ThreadClosure {
 415   ShenandoahSATBBufferClosure* _satb_cl;
 416   int _thread_parity;
 417 
 418  public:
 419   ShenandoahSATBThreadsClosure(ShenandoahSATBBufferClosure* satb_cl) :
 420     _satb_cl(satb_cl),
 421     _thread_parity(Threads::thread_claim_parity()) {}
 422 
 423   void do_thread(Thread* thread) {
 424     if (thread->is_Java_thread()) {
 425       if (thread->claim_oops_do(true, _thread_parity)) {
 426         JavaThread* jt = (JavaThread*)thread;
 427         jt->satb_mark_queue().apply_closure_and_empty(_satb_cl);
 428       }
 429     } else if (thread->is_VM_thread()) {
 430       if (thread->claim_oops_do(true, _thread_parity)) {
 431         JavaThread::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(_satb_cl);
 432       }
 433     }
 434   }
 435 };
 436 
 437 void ShenandoahConcurrentMark::drain_satb_buffers(uint worker_id, bool remark) {
 438 
 439   // tty->print_cr("start draining SATB buffers");
 440 
 441   ShenandoahHeap* sh = (ShenandoahHeap*) Universe::heap();
 442   SCMObjToScanQueue* q = get_queue(worker_id);
 443   ShenandoahSATBBufferClosure cl(q);
 444 
 445   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
 446   while (satb_mq_set.apply_closure_to_completed_buffer(&cl));
 447 
 448   if (remark) {
 449     ShenandoahSATBThreadsClosure tc(&cl);
 450     Threads::threads_do(&tc);
 451   }
 452 
 453   // tty->print_cr("end draining SATB buffers");
 454 
 455 }
 456 
 457 bool ShenandoahConcurrentMark::drain_one_satb_buffer(uint worker_id) {
 458 
 459   ShenandoahHeap* sh = (ShenandoahHeap*) Universe::heap();
 460   SCMObjToScanQueue* q = get_queue(worker_id);
 461   ShenandoahSATBBufferClosure cl(q);
 462 
 463   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
 464   bool result = satb_mq_set.apply_closure_to_completed_buffer(&cl);
 465   return result;
 466 }
 467 
 468 #if TASKQUEUE_STATS
 469 void ShenandoahConcurrentMark::print_taskqueue_stats_hdr(outputStream* const st) {
 470   st->print_raw_cr("GC Task Stats");
 471   st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr();
 472   st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr();
 473 }
 474 
 475 void ShenandoahConcurrentMark::print_taskqueue_stats(outputStream* const st) const {
 476   print_taskqueue_stats_hdr(st);
 477   ShenandoahHeap* sh = (ShenandoahHeap*) Universe::heap();
 478   TaskQueueStats totals;
 479   const int n = sh->max_conc_workers();
 480   for (int i = 0; i < n; ++i) {
 481     st->print(INT32_FORMAT_W(3), i);
 482     _task_queues->queue(i)->stats.print(st);
 483     st->print("\n");
 484     totals += _task_queues->queue(i)->stats;
 485   }
 486   st->print_raw("tot "); totals.print(st); st->cr();
 487   DEBUG_ONLY(totals.verify());
 488 
 489 }
 490 
 491 void ShenandoahConcurrentMark::print_push_only_taskqueue_stats(outputStream* const st) const {
 492   print_taskqueue_stats_hdr(st);
 493   ShenandoahHeap* sh = (ShenandoahHeap*) Universe::heap();
 494   TaskQueueStats totals;
 495   const int n = sh->max_conc_workers();
 496   for (int i = 0; i < n; ++i) {
 497     st->print(INT32_FORMAT_W(3), i);
 498     _task_queues->queue(i)->stats.print(st);
 499     st->print("\n");
 500     totals += _task_queues->queue(i)->stats;
 501   }
 502   st->print_raw("tot "); totals.print(st); st->cr();
 503 }
 504 
 505 void ShenandoahConcurrentMark::reset_taskqueue_stats() {
 506   ShenandoahHeap* sh = (ShenandoahHeap*) Universe::heap();
 507   const int n = sh->max_conc_workers();
 508   for (int i = 0; i < n; ++i) {
 509     _task_queues->queue(i)->stats.reset();
 510   }
 511 }
 512 #endif // TASKQUEUE_STATS
 513 
 514 // Weak Reference Closures
 515 class ShenandoahCMDrainMarkingStackClosure: public VoidClosure {
 516   ShenandoahHeap* _sh;
 517   ShenandoahConcurrentMark* _scm;
 518   uint _worker_id;
 519   int _seed;
 520 
 521 public:
 522   ShenandoahCMDrainMarkingStackClosure(uint worker_id): _worker_id(worker_id), _seed(17) {
 523     _sh = (ShenandoahHeap*) Universe::heap();
 524     _scm = _sh->concurrentMark();
 525   }
 526 
 527 
 528   void do_void() {
 529 
 530     SCMObjToScanQueue* q = _scm->get_queue(_worker_id);
 531     ShenandoahMarkObjsClosure cl(q, _sh->need_update_refs());
 532     while (true) {
 533       if (!_scm->try_queue(q, &cl) &&
 534           !_scm->try_draining_an_satb_buffer(_worker_id) &&
 535           !_scm->try_to_steal(_worker_id, &cl, &_seed)) {
 536         break;
 537       }
 538     }
 539   }
 540 };
 541 
 542 
 543 class ShenandoahCMKeepAliveAndDrainClosure: public OopClosure {
 544   SCMObjToScanQueue* _queue;
 545   ShenandoahHeap* _sh;
 546   ShenandoahConcurrentMark* _scm;
 547 
 548   size_t _ref_count;
 549 
 550 public:
 551   ShenandoahCMKeepAliveAndDrainClosure(SCMObjToScanQueue* q) :
 552     _queue(q) {
 553     _sh = (ShenandoahHeap*) Universe::heap();
 554     _scm = _sh->concurrentMark();
 555     _ref_count = 0;
 556   }
 557 
 558   virtual void do_oop(oop* p){ do_oop_work(p);}
 559   virtual void do_oop(narrowOop* p) {
 560     assert(false, "narrowOops Aren't implemented");
 561   }
 562 
 563 
 564   void do_oop_work(oop* p) {
 565 
 566     oop obj;
 567     if (_sh->need_update_refs()) {
 568       obj = _sh->maybe_update_oop_ref(p);
 569     } else {
 570       obj = oopDesc::load_heap_oop(p);
 571     }
 572 
 573     assert(obj == oopDesc::bs()->read_barrier(obj), "only get updated oops in weak ref processing");
 574 
 575     if (obj != NULL) {
 576       if (Verbose && ShenandoahTraceWeakReferences) {
 577         gclog_or_tty->print_cr("\twe're looking at location "
 578                                "*"PTR_FORMAT" = "PTR_FORMAT,
 579                                p2i(p), p2i((void*) obj));
 580         obj->print();
 581       }
 582       if (_sh->mark_current(obj)) {
 583         bool pushed = _queue->push(obj);
 584         assert(pushed, "overflow queue should always succeed pushing");
 585       }
 586 
 587       _ref_count++;
 588     }
 589   }
 590 
 591   size_t ref_count() { return _ref_count; }
 592 
 593 };
 594 
 595 class ShenandoahRefProcTaskProxy : public AbstractGangTask {
 596 
 597 private:
 598   AbstractRefProcTaskExecutor::ProcessTask& _proc_task;
 599 
 600 public:
 601 
 602   ShenandoahRefProcTaskProxy(AbstractRefProcTaskExecutor::ProcessTask& proc_task) :
 603     AbstractGangTask("Process reference objects in parallel"),
 604     _proc_task(proc_task) {
 605   }
 606 
 607   void work(uint worker_id) {
 608     ShenandoahHeap* heap = ShenandoahHeap::heap();
 609     ShenandoahIsAliveClosure is_alive;
 610     ShenandoahCMKeepAliveAndDrainClosure keep_alive(heap->concurrentMark()->get_queue(worker_id));
 611     ShenandoahCMDrainMarkingStackClosure complete_gc(worker_id);
 612     _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
 613   }
 614 };
 615 
 616 class ShenandoahRefEnqueueTaskProxy : public AbstractGangTask {
 617 
 618 private:
 619   AbstractRefProcTaskExecutor::EnqueueTask& _enqueue_task;
 620 
 621 public:
 622 
 623   ShenandoahRefEnqueueTaskProxy(AbstractRefProcTaskExecutor::EnqueueTask& enqueue_task) :
 624     AbstractGangTask("Enqueue reference objects in parallel"),
 625     _enqueue_task(enqueue_task) {
 626   }
 627 
 628   void work(uint worker_id) {
 629     _enqueue_task.work(worker_id);
 630   }
 631 };
 632 
 633 class ShenandoahRefProcTaskExecutor : public AbstractRefProcTaskExecutor {
 634 
 635 private:
 636   WorkGang* _workers;
 637 
 638 public:
 639 
 640   ShenandoahRefProcTaskExecutor() : _workers(ShenandoahHeap::heap()->conc_workers()) {
 641   }
 642 
 643   // Executes a task using worker threads.
 644   void execute(ProcessTask& task) {
 645     ShenandoahRefProcTaskProxy proc_task_proxy(task);
 646     _workers->run_task(&proc_task_proxy);
 647   }
 648 
 649   void execute(EnqueueTask& task) {
 650     ShenandoahRefEnqueueTaskProxy enqueue_task_proxy(task);
 651     _workers->run_task(&enqueue_task_proxy);
 652   }
 653 };
 654 
 655 
 656 void ShenandoahConcurrentMark::weak_refs_work() {
 657    ShenandoahHeap* sh = (ShenandoahHeap*) Universe::heap();
 658    ReferenceProcessor* rp = sh->ref_processor();
 659 
 660    // Setup collector policy for softref cleaning.
 661    bool clear_soft_refs = sh->collector_policy()->use_should_clear_all_soft_refs(true /* bogus arg*/);
 662    if (ShenandoahTraceWeakReferences) {
 663      tty->print_cr("clearing soft refs: %s", BOOL_TO_STR(clear_soft_refs));
 664    }
 665    rp->setup_policy(clear_soft_refs);
 666 
 667    uint serial_worker_id = 0;
 668    ShenandoahIsAliveClosure is_alive;
 669    ShenandoahCMKeepAliveAndDrainClosure keep_alive(get_queue(serial_worker_id));
 670    ShenandoahCMDrainMarkingStackClosure complete_gc(serial_worker_id);
 671    ShenandoahRefProcTaskExecutor par_task_executor;
 672    bool processing_is_mt = true;
 673    AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL);
 674 
 675    if (ShenandoahTraceWeakReferences) {
 676      gclog_or_tty->print_cr("start processing references");
 677    }
 678 
 679    rp->process_discovered_references(&is_alive, &keep_alive,
 680                                      &complete_gc, &par_task_executor,
 681                                      NULL,
 682                                      ShenandoahHeap::heap()->tracer()->gc_id());
 683 
 684    if (ShenandoahTraceWeakReferences) {
 685      gclog_or_tty->print_cr("finished processing references, processed "SIZE_FORMAT" refs", keep_alive.ref_count());
 686      gclog_or_tty->print_cr("start enqueuing references");
 687    }
 688 
 689    rp->enqueue_discovered_references(executor);
 690 
 691    if (ShenandoahTraceWeakReferences) {
 692      gclog_or_tty->print_cr("finished enqueueing references");
 693    }
 694 
 695    rp->verify_no_references_recorded();
 696    assert(!rp->discovery_enabled(), "Post condition");
 697 
 698    if (ClassUnloadingWithConcurrentMark) {
 699      // Unload classes and purge SystemDictionary.
 700      bool purged_class = SystemDictionary::do_unloading(&is_alive);
 701      // Unload nmethods.
 702      CodeCache::do_unloading(&is_alive, purged_class);
 703      // Prune dead klasses from subklass/sibling/implementor lists.
 704      Klass::clean_weak_klass_links(&is_alive);
 705      // Delete entries from dead interned strings.
 706      StringTable::unlink(&is_alive);
 707      // Clean up unreferenced symbols in symbol table.
 708      SymbolTable::unlink();
 709 
 710      ClassLoaderDataGraph::purge();
 711    }
 712 }
 713 
 714 void ShenandoahConcurrentMark::cancel() {
 715   ShenandoahHeap* sh = ShenandoahHeap::heap();
 716 
 717   // Cancel weak-ref discovery.
 718   if (ShenandoahProcessReferences) {
 719     ReferenceProcessor* rp = sh->ref_processor();
 720     rp->abandon_partial_discovery();
 721     rp->disable_discovery();
 722   }
 723 
 724   // Clean up marking stacks.
 725   SCMObjToScanQueueSet* queues = task_queues();
 726   for (uint i = 0; i < _max_conc_worker_id; ++i) {
 727     SCMObjToScanQueue* task_queue = queues->queue(i);
 728     task_queue->set_empty();
 729     task_queue->overflow_stack()->clear();
 730   }
 731 
 732   // Cancel SATB buffers.
 733   JavaThread::satb_mark_queue_set().abandon_partial_marking();
 734 }
 735 SCMObjToScanQueue* ShenandoahConcurrentMark::get_queue(uint worker_id) {
 736   worker_id = worker_id % _max_conc_worker_id;
 737   return _task_queues->queue(worker_id);
 738 }