1 /*
   2  * Copyright (c) 2013, 2015, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "classfile/stringTable.hpp"
  25 #include "gc/shared/gcTimer.hpp"
  26 #include "gc/shared/isGCActiveMark.hpp"
  27 #include "gc/shared/strongRootsScope.hpp"
  28 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  29 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
  30 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  31 #include "gc/shenandoah/shenandoahRootProcessor.hpp"
  32 #include "gc/shenandoah/brooksPointer.hpp"
  33 #include "gc/shared/referenceProcessor.hpp"
  34 #include "code/codeCache.hpp"
  35 #include "classfile/symbolTable.hpp"
  36 #include "classfile/systemDictionary.hpp"
  37 #include "memory/iterator.inline.hpp"
  38 #include "oops/oop.inline.hpp"
  39 #include "gc/shared/taskqueue.inline.hpp"
  40 
  41 // Mark the object and add it to the queue to be scanned
  42 ShenandoahMarkObjsClosure::ShenandoahMarkObjsClosure(SCMObjToScanQueue* q, bool update_refs) :
  43   _heap((ShenandoahHeap*)(Universe::heap())),
  44   _mark_refs(ShenandoahMarkRefsClosure(q, update_refs)),
  45   _live_data(NEW_C_HEAP_ARRAY(size_t, _heap->max_regions(), mtGC))
  46 {
  47   Copy::zero_to_bytes(_live_data, _heap->max_regions() * sizeof(size_t));
  48 }
  49 
  50 ShenandoahMarkObjsClosure::~ShenandoahMarkObjsClosure() {
  51   // Merge liveness data back into actual regions.
  52 
  53   // We need to lock the heap here, to avoid race with growing of heap.
  54   MutexLockerEx ml(ShenandoahHeap_lock, true);
  55   ShenandoahHeapRegion** regions = _heap->heap_regions();
  56   for (uint i = 0; i < _heap->num_regions(); i++) {
  57     regions[i]->increase_live_data(_live_data[i]);
  58   }
  59   FREE_C_HEAP_ARRAY(size_t, _live_data);
  60 }
  61 
  62 ShenandoahMarkRefsClosure::ShenandoahMarkRefsClosure(SCMObjToScanQueue* q, bool update_refs) :
  63   MetadataAwareOopClosure(((ShenandoahHeap *) Universe::heap())->ref_processor()),
  64   _queue(q),
  65   _heap((ShenandoahHeap*) Universe::heap()),
  66   _scm(_heap->concurrentMark()),
  67   _update_refs(update_refs)
  68 {
  69 }
  70 
  71 void ShenandoahMarkRefsClosure::do_oop(narrowOop* p) {
  72   Unimplemented();
  73 }
  74 
  75 
  76 // Walks over all the objects in the generation updating any
  77 // references to from space.
  78 
  79 class CLDMarkAliveClosure : public CLDClosure {
  80 private:
  81   CLDClosure* _cl;
  82 public:
  83   CLDMarkAliveClosure(CLDClosure* cl) : _cl(cl) {
  84   }
  85   void do_cld(ClassLoaderData* cld) {
  86     ShenandoahIsAliveClosure is_alive;
  87     if (cld->is_alive(&is_alive)) {
  88       _cl->do_cld(cld);
  89     }
  90   }
  91 };
  92 
  93 class ShenandoahMarkRootsTask : public AbstractGangTask {
  94 private:
  95   ShenandoahRootProcessor* _rp;
  96   bool _update_refs;
  97 public:
  98   ShenandoahMarkRootsTask(ShenandoahRootProcessor* rp, bool update_refs) :
  99     AbstractGangTask("Shenandoah update roots task"), _update_refs(update_refs),
 100     _rp(rp) {
 101   }
 102 
 103   void work(uint worker_id) {
 104     // tty->print_cr("start mark roots worker: "INT32_FORMAT, worker_id);
 105     ShenandoahHeap* heap = ShenandoahHeap::heap();
 106     SCMObjToScanQueue* q = heap->concurrentMark()->get_queue(worker_id);
 107     ShenandoahMarkRefsClosure cl(q, _update_refs);
 108 
 109     CodeBlobToOopClosure blobsCl(&cl, true);
 110     CLDToOopClosure cldCl(&cl);
 111 
 112     ResourceMark m;
 113     if (ShenandoahProcessReferences && ClassUnloadingWithConcurrentMark) {
 114       _rp->process_strong_roots(&cl, &cldCl, &blobsCl);
 115     } else {
 116       _rp->process_all_roots(&cl, &cldCl, &blobsCl);
 117     }
 118     // tty->print_cr("finish mark roots worker: "INT32_FORMAT, worker_id);
 119   }
 120 };
 121 
 122 class SCMConcurrentMarkingTask : public AbstractGangTask {
 123 private:
 124   ShenandoahConcurrentMark* _cm;
 125   ParallelTaskTerminator* _terminator;
 126   int _seed;
 127   bool _update_refs;
 128 
 129 public:
 130   SCMConcurrentMarkingTask(ShenandoahConcurrentMark* cm, ParallelTaskTerminator* terminator, bool update_refs) :
 131     AbstractGangTask("Root Region Scan"), _cm(cm), _terminator(terminator), _update_refs(update_refs), _seed(17) {
 132   }
 133 
 134 
 135   void work(uint worker_id) {
 136 
 137     SCMObjToScanQueue* q = _cm->get_queue(worker_id);
 138     ShenandoahMarkObjsClosure cl(q, _update_refs);
 139     ShenandoahHeap* heap = ShenandoahHeap::heap();
 140     while (true) {
 141       if (heap->cancelled_concgc() ||
 142           (!_cm->try_queue(q, &cl) &&
 143            !_cm->try_draining_an_satb_buffer(worker_id) &&
 144            !_cm->try_to_steal(worker_id, &cl, &_seed))
 145           ) {
 146         if (_terminator->offer_termination()) break;
 147       }
 148     }
 149     if (ShenandoahTracePhases && heap->cancelled_concgc()) {
 150       tty->print_cr("Cancelled concurrent marking");
 151     }
 152   }
 153 };
 154 
 155 void ShenandoahConcurrentMark::prepare_unmarked_root_objs() {
 156 
 157   ShenandoahHeap* heap = ShenandoahHeap::heap();
 158   bool update_refs = heap->need_update_refs();
 159 
 160   if (update_refs) {
 161     COMPILER2_PRESENT(DerivedPointerTable::clear());
 162   }
 163 
 164   prepare_unmarked_root_objs_no_derived_ptrs(update_refs);
 165 
 166   if (update_refs) {
 167     COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
 168   }
 169 
 170 }
 171 
 172 void ShenandoahConcurrentMark::prepare_unmarked_root_objs_no_derived_ptrs(bool update_refs) {
 173   assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
 174 
 175   ShenandoahHeap* heap = ShenandoahHeap::heap();
 176   if (ShenandoahParallelRootScan) {
 177 
 178     ClassLoaderDataGraph::clear_claimed_marks();
 179     heap->conc_workers()->set_active_workers(_max_conc_worker_id);
 180     ShenandoahRootProcessor root_proc(heap, _max_conc_worker_id);
 181     TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
 182     ShenandoahMarkRootsTask mark_roots(&root_proc, update_refs);
 183     heap->conc_workers()->run_task(&mark_roots);
 184 
 185     // Mark through any class loaders that have been found alive.
 186     ShenandoahMarkRefsClosure cl(get_queue(0), update_refs);
 187     CLDToOopClosure cldCl(&cl);
 188     CLDMarkAliveClosure cld_keep_alive(&cldCl);
 189     ClassLoaderDataGraph::roots_cld_do(NULL, &cld_keep_alive);
 190 
 191   } else {
 192     ShenandoahMarkRefsClosure cl(get_queue(0), update_refs);
 193     heap->roots_iterate(&cl);
 194   }
 195 
 196   if (!(ShenandoahProcessReferences && ClassUnloadingWithConcurrentMark)) {
 197     ShenandoahMarkRefsClosure cl(get_queue(0), update_refs);
 198     heap->weak_roots_iterate(&cl);
 199   }
 200 
 201   // tty->print_cr("all root marker threads done");
 202 }
 203 
 204 
 205 void ShenandoahConcurrentMark::initialize() {
 206   _max_conc_worker_id = MAX2((uint) ConcGCThreads, 1U);
 207   _task_queues = new SCMObjToScanQueueSet((int) _max_conc_worker_id);
 208 
 209   for (uint i = 0; i < _max_conc_worker_id; ++i) {
 210     SCMObjToScanQueue* task_queue = new SCMObjToScanQueue();
 211     task_queue->initialize();
 212     _task_queues->register_queue(i, task_queue);
 213   }
 214   JavaThread::satb_mark_queue_set().set_buffer_size(1014 /* G1SATBBufferSize */);
 215 }
 216 
 217 void ShenandoahConcurrentMark::mark_from_roots() {
 218   if (ShenandoahGCVerbose) {
 219     tty->print_cr("STOPPING THE WORLD: before marking");
 220     tty->print_cr("Starting markFromRoots");
 221   }
 222 
 223   ShenandoahHeap* sh = (ShenandoahHeap *) Universe::heap();
 224 
 225   bool update_refs = sh->need_update_refs();
 226 
 227   sh->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::conc_mark);
 228   ParallelTaskTerminator terminator(_max_conc_worker_id, _task_queues);
 229 
 230   if (ShenandoahProcessReferences) {
 231     ReferenceProcessor* rp = sh->ref_processor();
 232     // enable ("weak") refs discovery
 233     rp->enable_discovery(true /*verify_no_refs*/);
 234     rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle
 235   }
 236 
 237   SCMConcurrentMarkingTask markingTask = SCMConcurrentMarkingTask(this, &terminator, update_refs);
 238   sh->conc_workers()->set_active_workers(_max_conc_worker_id);
 239   sh->conc_workers()->run_task(&markingTask);
 240 
 241   if (ShenandoahGCVerbose) {
 242     tty->print("total workers = %u active workers = %u\n",
 243                sh->conc_workers()->total_workers(),
 244                sh->conc_workers()->active_workers());
 245     TASKQUEUE_STATS_ONLY(print_taskqueue_stats());
 246     TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
 247   }
 248 
 249   if (ShenandoahGCVerbose) {
 250     tty->print_cr("Finishing markFromRoots");
 251     tty->print_cr("RESUMING THE WORLD: after marking");
 252   }
 253 
 254   sh->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::conc_mark);
 255 }
 256 
 257 class FinishDrainSATBBuffersTask : public AbstractGangTask {
 258 private:
 259   ShenandoahConcurrentMark* _cm;
 260   ParallelTaskTerminator* _terminator;
 261 public:
 262   FinishDrainSATBBuffersTask(ShenandoahConcurrentMark* cm, ParallelTaskTerminator* terminator) :
 263     AbstractGangTask("Finish draining SATB buffers"), _cm(cm), _terminator(terminator) {
 264   }
 265 
 266   void work(uint worker_id) {
 267     _cm->drain_satb_buffers(worker_id, true);
 268   }
 269 };
 270 
 271 class ShenandoahUpdateAliveRefs : public OopClosure {
 272 private:
 273   ShenandoahHeap* _heap;
 274 public:
 275   ShenandoahUpdateAliveRefs() : _heap(ShenandoahHeap::heap()) {
 276   }
 277   virtual void do_oop(oop* p) {
 278     _heap->maybe_update_oop_ref(p);
 279   }
 280 
 281   virtual void do_oop(narrowOop* p) {
 282     Unimplemented();
 283   }
 284 };
 285 
 286 void ShenandoahConcurrentMark::finish_mark_from_roots() {
 287   if (ShenandoahGCVerbose) {
 288     tty->print_cr("Starting finishMarkFromRoots");
 289   }
 290 
 291   IsGCActiveMark is_active;
 292 
 293   ShenandoahHeap* sh = (ShenandoahHeap *) Universe::heap();
 294 
 295   // Trace any (new) unmarked root references.
 296   sh->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::rescan_roots);
 297   prepare_unmarked_root_objs();
 298   sh->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::rescan_roots);
 299   sh->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::drain_satb);
 300   {
 301     StrongRootsScope scope(_max_conc_worker_id);
 302     ParallelTaskTerminator terminator(_max_conc_worker_id, _task_queues);
 303     // drain_satb_buffers(0, true);
 304     FinishDrainSATBBuffersTask drain_satb_buffers(this, &terminator);
 305     sh->conc_workers()->set_active_workers(_max_conc_worker_id);
 306     sh->conc_workers()->run_task(&drain_satb_buffers);
 307     sh->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::drain_satb);
 308   }
 309 
 310   // Finally mark everything else we've got in our queues during the previous steps.
 311   {
 312     sh->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::drain_queues);
 313     ParallelTaskTerminator terminator(_max_conc_worker_id, _task_queues);
 314     SCMConcurrentMarkingTask markingTask = SCMConcurrentMarkingTask(this, &terminator, sh->need_update_refs());
 315     sh->conc_workers()->set_active_workers(_max_conc_worker_id);
 316     sh->conc_workers()->run_task(&markingTask);
 317     sh->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::drain_queues);
 318   }
 319 
 320 #ifdef ASSERT
 321   for (int i = 0; i < (int) _max_conc_worker_id; i++) {
 322     assert(_task_queues->queue(i)->is_empty(), "Should be empty");
 323   }
 324 #endif
 325 
 326   // When we're done marking everything, we process weak references.
 327   if (ShenandoahProcessReferences) {
 328     sh->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::weakrefs);
 329     weak_refs_work();
 330     sh->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::weakrefs);
 331   }
 332 
 333 #ifdef ASSERT
 334   for (int i = 0; i < (int) _max_conc_worker_id; i++) {
 335     assert(_task_queues->queue(i)->is_empty(), "Should be empty");
 336   }
 337 #endif
 338 
 339   if (ShenandoahGCVerbose) {
 340     tty->print_cr("Finishing finishMarkFromRoots");
 341 #ifdef SLOWDEBUG
 342     for (int i = 0; i <(int)_max_conc_worker_id; i++) {
 343       tty->print("Queue: "INT32_FORMAT":", i);
 344       _task_queues->queue(i)->stats.print(tty, 10);
 345       tty->cr();
 346       _task_queues->queue(i)->stats.verify();
 347     }
 348 #endif
 349   }
 350 
 351   // We still need to update (without marking) alive refs in JNI handles.
 352   if (ShenandoahProcessReferences && ClassUnloadingWithConcurrentMark) {
 353     ShenandoahUpdateAliveRefs cl;
 354     ShenandoahIsAliveClosure is_alive;
 355     JNIHandles::weak_oops_do(&is_alive, &cl);
 356   }
 357 
 358 #ifdef ASSERT
 359   verify_roots();
 360 
 361   if (ShenandoahDumpHeapAfterConcurrentMark) {
 362     sh->ensure_parsability(false);
 363     sh->print_all_refs("post-mark");
 364   }
 365 #endif
 366 }
 367 
 368 #ifdef ASSERT
 369 void ShenandoahVerifyRootsClosure1::do_oop(oop* p) {
 370   oop obj = oopDesc::load_heap_oop(p);
 371   if (! oopDesc::is_null(obj)) {
 372     guarantee(ShenandoahHeap::heap()->is_marked_current(obj), "oop must be marked");
 373     guarantee(obj == ShenandoahBarrierSet::resolve_oop_static_not_null(obj), "oop must not be forwarded");
 374   }
 375 }
 376 
 377 void ShenandoahConcurrentMark::verify_roots() {
 378   ShenandoahVerifyRootsClosure1 cl;
 379   CodeBlobToOopClosure blobsCl(&cl, true);
 380   CLDToOopClosure cldCl(&cl);
 381   ClassLoaderDataGraph::clear_claimed_marks();
 382   ShenandoahRootProcessor rp(ShenandoahHeap::heap(), 1);
 383   rp.process_roots(&cl, &cl, &cldCl, &cldCl, &cldCl, &blobsCl);
 384 }
 385 #endif
 386 
 387 class ShenandoahSATBBufferClosure : public SATBBufferClosure {
 388 private:
 389   SCMObjToScanQueue* _queue;
 390 
 391 public:
 392   ShenandoahSATBBufferClosure(SCMObjToScanQueue* q) :
 393     _queue(q)
 394   {
 395   }
 396 
 397   void do_buffer(void** buffer, size_t size) {
 398     // tty->print_cr("draining one satb buffer");
 399     for (size_t i = 0; i < size; ++i) {
 400       void* entry = buffer[i];
 401       oop obj = oop(entry);
 402       // tty->print_cr("satb buffer entry: "PTR_FORMAT, p2i((HeapWord*) obj));
 403       if (!oopDesc::is_null(obj)) {
 404         obj = ShenandoahBarrierSet::resolve_oop_static_not_null(obj);
 405         bool pushed = _queue->push(obj);
 406         assert(pushed, "overflow queue should always succeed pushing");
 407       }
 408     }
 409   }
 410 };
 411 
 412 class ShenandoahSATBThreadsClosure : public ThreadClosure {
 413   ShenandoahSATBBufferClosure* _satb_cl;
 414   int _thread_parity;
 415 
 416  public:
 417   ShenandoahSATBThreadsClosure(ShenandoahSATBBufferClosure* satb_cl) :
 418     _satb_cl(satb_cl),
 419     _thread_parity(Threads::thread_claim_parity()) {}
 420 
 421   void do_thread(Thread* thread) {
 422     if (thread->is_Java_thread()) {
 423       if (thread->claim_oops_do(true, _thread_parity)) {
 424         JavaThread* jt = (JavaThread*)thread;
 425         jt->satb_mark_queue().apply_closure_and_empty(_satb_cl);
 426       }
 427     } else if (thread->is_VM_thread()) {
 428       if (thread->claim_oops_do(true, _thread_parity)) {
 429         JavaThread::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(_satb_cl);
 430       }
 431     }
 432   }
 433 };
 434 
 435 void ShenandoahConcurrentMark::drain_satb_buffers(uint worker_id, bool remark) {
 436 
 437   // tty->print_cr("start draining SATB buffers");
 438 
 439   ShenandoahHeap* sh = (ShenandoahHeap*) Universe::heap();
 440   SCMObjToScanQueue* q = get_queue(worker_id);
 441   ShenandoahSATBBufferClosure cl(q);
 442 
 443   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
 444   while (satb_mq_set.apply_closure_to_completed_buffer(&cl));
 445 
 446   if (remark) {
 447     ShenandoahSATBThreadsClosure tc(&cl);
 448     Threads::threads_do(&tc);
 449   }
 450 
 451   // tty->print_cr("end draining SATB buffers");
 452 
 453 }
 454 
 455 bool ShenandoahConcurrentMark::drain_one_satb_buffer(uint worker_id) {
 456 
 457   ShenandoahHeap* sh = (ShenandoahHeap*) Universe::heap();
 458   SCMObjToScanQueue* q = get_queue(worker_id);
 459   ShenandoahSATBBufferClosure cl(q);
 460 
 461   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
 462   bool result = satb_mq_set.apply_closure_to_completed_buffer(&cl);
 463   return result;
 464 }
 465 
 466 #if TASKQUEUE_STATS
 467 void ShenandoahConcurrentMark::print_taskqueue_stats_hdr(outputStream* const st) {
 468   st->print_raw_cr("GC Task Stats");
 469   st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr();
 470   st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr();
 471 }
 472 
 473 void ShenandoahConcurrentMark::print_taskqueue_stats(outputStream* const st) const {
 474   print_taskqueue_stats_hdr(st);
 475   ShenandoahHeap* sh = (ShenandoahHeap*) Universe::heap();
 476   TaskQueueStats totals;
 477   const int n = sh->max_conc_workers();
 478   for (int i = 0; i < n; ++i) {
 479     st->print(INT32_FORMAT_W(3), i);
 480     _task_queues->queue(i)->stats.print(st);
 481     st->print("\n");
 482     totals += _task_queues->queue(i)->stats;
 483   }
 484   st->print_raw("tot "); totals.print(st); st->cr();
 485   DEBUG_ONLY(totals.verify());
 486 
 487 }
 488 
 489 void ShenandoahConcurrentMark::print_push_only_taskqueue_stats(outputStream* const st) const {
 490   print_taskqueue_stats_hdr(st);
 491   ShenandoahHeap* sh = (ShenandoahHeap*) Universe::heap();
 492   TaskQueueStats totals;
 493   const int n = sh->max_conc_workers();
 494   for (int i = 0; i < n; ++i) {
 495     st->print(INT32_FORMAT_W(3), i);
 496     _task_queues->queue(i)->stats.print(st);
 497     st->print("\n");
 498     totals += _task_queues->queue(i)->stats;
 499   }
 500   st->print_raw("tot "); totals.print(st); st->cr();
 501 
 502   DEBUG_ONLY(totals.verify_only_pushes());
 503 }
 504 
 505 void ShenandoahConcurrentMark::reset_taskqueue_stats() {
 506   ShenandoahHeap* sh = (ShenandoahHeap*) Universe::heap();
 507   const int n = sh->max_conc_workers();
 508   for (int i = 0; i < n; ++i) {
 509     _task_queues->queue(i)->stats.reset();
 510   }
 511 }
 512 #endif // TASKQUEUE_STATS
 513 
 514 // Weak Reference Closures
 515 class ShenandoahCMDrainMarkingStackClosure: public VoidClosure {
 516   ShenandoahHeap* _sh;
 517   ShenandoahConcurrentMark* _scm;
 518   uint _worker_id;
 519   int _seed;
 520 
 521 public:
 522   ShenandoahCMDrainMarkingStackClosure(uint worker_id): _worker_id(worker_id), _seed(17) {
 523     _sh = (ShenandoahHeap*) Universe::heap();
 524     _scm = _sh->concurrentMark();
 525   }
 526 
 527 
 528   void do_void() {
 529 
 530     SCMObjToScanQueue* q = _scm->get_queue(_worker_id);
 531     ShenandoahMarkObjsClosure cl(q, _sh->need_update_refs());
 532     while (true) {
 533       if (!_scm->try_queue(q, &cl) &&
 534           !_scm->try_draining_an_satb_buffer(_worker_id) &&
 535           !_scm->try_to_steal(_worker_id, &cl, &_seed)) {
 536         break;
 537       }
 538     }
 539   }
 540 };
 541 
 542 
 543 class ShenandoahCMKeepAliveAndDrainClosure: public OopClosure {
 544   SCMObjToScanQueue* _queue;
 545   ShenandoahHeap* _sh;
 546   ShenandoahConcurrentMark* _scm;
 547 
 548   size_t _ref_count;
 549 
 550 public:
 551   ShenandoahCMKeepAliveAndDrainClosure(SCMObjToScanQueue* q) :
 552     _queue(q) {
 553     _sh = (ShenandoahHeap*) Universe::heap();
 554     _scm = _sh->concurrentMark();
 555     _ref_count = 0;
 556   }
 557 
 558   virtual void do_oop(oop* p){ do_oop_work(p);}
 559   virtual void do_oop(narrowOop* p) {
 560     assert(false, "narrowOops Aren't implemented");
 561   }
 562 
 563 
 564   void do_oop_work(oop* p) {
 565 
 566     oop obj;
 567     if (_sh->need_update_refs()) {
 568       obj = _sh->maybe_update_oop_ref(p);
 569     } else {
 570       obj = oopDesc::load_heap_oop(p);
 571     }
 572 
 573     assert(obj == oopDesc::bs()->resolve_oop(obj), "only get updated oops in weak ref processing");
 574 
 575     if (obj != NULL) {
 576       if (Verbose && ShenandoahTraceWeakReferences) {
 577         gclog_or_tty->print_cr("\twe're looking at location "
 578                                "*"PTR_FORMAT" = "PTR_FORMAT,
 579                                p2i(p), p2i((void*) obj));
 580         obj->print();
 581       }
 582       bool pushed = _queue->push(obj);
 583       assert(pushed, "overflow queue should always succeed pushing");
 584 
 585       _ref_count++;
 586     }
 587   }
 588 
 589   size_t ref_count() { return _ref_count; }
 590 
 591 };
 592 
 593 class ShenandoahRefProcTaskProxy : public AbstractGangTask {
 594 
 595 private:
 596   AbstractRefProcTaskExecutor::ProcessTask& _proc_task;
 597 
 598 public:
 599 
 600   ShenandoahRefProcTaskProxy(AbstractRefProcTaskExecutor::ProcessTask& proc_task) :
 601     AbstractGangTask("Process reference objects in parallel"),
 602     _proc_task(proc_task) {
 603   }
 604 
 605   void work(uint worker_id) {
 606     ShenandoahHeap* heap = ShenandoahHeap::heap();
 607     ShenandoahIsAliveClosure is_alive;
 608     ShenandoahCMKeepAliveAndDrainClosure keep_alive(heap->concurrentMark()->get_queue(worker_id));
 609     ShenandoahCMDrainMarkingStackClosure complete_gc(worker_id);
 610     _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
 611   }
 612 };
 613 
 614 class ShenandoahRefEnqueueTaskProxy : public AbstractGangTask {
 615 
 616 private:
 617   AbstractRefProcTaskExecutor::EnqueueTask& _enqueue_task;
 618 
 619 public:
 620 
 621   ShenandoahRefEnqueueTaskProxy(AbstractRefProcTaskExecutor::EnqueueTask& enqueue_task) :
 622     AbstractGangTask("Enqueue reference objects in parallel"),
 623     _enqueue_task(enqueue_task) {
 624   }
 625 
 626   void work(uint worker_id) {
 627     _enqueue_task.work(worker_id);
 628   }
 629 };
 630 
 631 class ShenandoahRefProcTaskExecutor : public AbstractRefProcTaskExecutor {
 632 
 633 private:
 634   WorkGang* _workers;
 635 
 636 public:
 637 
 638   ShenandoahRefProcTaskExecutor() : _workers(ShenandoahHeap::heap()->conc_workers()) {
 639   }
 640 
 641   // Executes a task using worker threads.
 642   void execute(ProcessTask& task) {
 643     ShenandoahRefProcTaskProxy proc_task_proxy(task);
 644     _workers->run_task(&proc_task_proxy);
 645   }
 646 
 647   void execute(EnqueueTask& task) {
 648     ShenandoahRefEnqueueTaskProxy enqueue_task_proxy(task);
 649     _workers->run_task(&enqueue_task_proxy);
 650   }
 651 };
 652 
 653 
 654 void ShenandoahConcurrentMark::weak_refs_work() {
 655    ShenandoahHeap* sh = (ShenandoahHeap*) Universe::heap();
 656    ReferenceProcessor* rp = sh->ref_processor();
 657 
 658    // Setup collector policy for softref cleaning.
 659    bool clear_soft_refs = sh->collector_policy()->use_should_clear_all_soft_refs(true /* bogus arg*/);
 660    if (ShenandoahTraceWeakReferences) {
 661      tty->print_cr("clearing soft refs: %s", BOOL_TO_STR(clear_soft_refs));
 662    }
 663    rp->setup_policy(clear_soft_refs);
 664 
 665    uint serial_worker_id = 0;
 666    ShenandoahIsAliveClosure is_alive;
 667    ShenandoahCMKeepAliveAndDrainClosure keep_alive(get_queue(serial_worker_id));
 668    ShenandoahCMDrainMarkingStackClosure complete_gc(serial_worker_id);
 669    ShenandoahRefProcTaskExecutor par_task_executor;
 670    bool processing_is_mt = true;
 671    AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL);
 672 
 673    if (ShenandoahTraceWeakReferences) {
 674      gclog_or_tty->print_cr("start processing references");
 675    }
 676 
 677    rp->process_discovered_references(&is_alive, &keep_alive,
 678                                      &complete_gc, &par_task_executor,
 679                                      NULL,
 680                                      ShenandoahHeap::heap()->tracer()->gc_id());
 681 
 682    if (ShenandoahTraceWeakReferences) {
 683      gclog_or_tty->print_cr("finished processing references, processed "SIZE_FORMAT" refs", keep_alive.ref_count());
 684      gclog_or_tty->print_cr("start enqueuing references");
 685    }
 686 
 687    rp->enqueue_discovered_references(executor);
 688 
 689    if (ShenandoahTraceWeakReferences) {
 690      gclog_or_tty->print_cr("finished enqueueing references");
 691    }
 692 
 693    rp->verify_no_references_recorded();
 694    assert(!rp->discovery_enabled(), "Post condition");
 695 
 696    if (ClassUnloadingWithConcurrentMark) {
 697      // Unload classes and purge SystemDictionary.
 698      bool purged_class = SystemDictionary::do_unloading(&is_alive);
 699      // Unload nmethods.
 700      CodeCache::do_unloading(&is_alive, purged_class);
 701      // Prune dead klasses from subklass/sibling/implementor lists.
 702      Klass::clean_weak_klass_links(&is_alive);
 703      // Delete entries from dead interned strings.
 704      StringTable::unlink(&is_alive);
 705      // Clean up unreferenced symbols in symbol table.
 706      SymbolTable::unlink();
 707 
 708      ClassLoaderDataGraph::purge();
 709    }
 710 }
 711 
 712 void ShenandoahConcurrentMark::cancel() {
 713   ShenandoahHeap* sh = ShenandoahHeap::heap();
 714 
 715   // Cancel weak-ref discovery.
 716   if (ShenandoahProcessReferences) {
 717     ReferenceProcessor* rp = sh->ref_processor();
 718     rp->abandon_partial_discovery();
 719     rp->disable_discovery();
 720   }
 721 
 722   // Clean up marking stacks.
 723   SCMObjToScanQueueSet* queues = task_queues();
 724   for (uint i = 0; i < _max_conc_worker_id; ++i) {
 725     SCMObjToScanQueue* task_queue = queues->queue(i);
 726     task_queue->set_empty();
 727     task_queue->overflow_stack()->clear();
 728   }
 729 
 730   // Cancel SATB buffers.
 731   JavaThread::satb_mark_queue_set().abandon_partial_marking();
 732 }
 733 
 734 SCMObjToScanQueue* ShenandoahConcurrentMark::get_queue(uint worker_id) {
 735   worker_id = worker_id % _max_conc_worker_id;
 736   return _task_queues->queue(worker_id);
 737 }