1 /*
   2  * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "classfile/classLoaderDataGraph.hpp"
  26 #include "gc/z/zBarrier.inline.hpp"
  27 #include "gc/z/zMark.inline.hpp"
  28 #include "gc/z/zMarkCache.inline.hpp"
  29 #include "gc/z/zMarkStack.inline.hpp"
  30 #include "gc/z/zMarkTerminate.inline.hpp"
  31 #include "gc/z/zOopClosures.inline.hpp"
  32 #include "gc/z/zPage.hpp"
  33 #include "gc/z/zPageTable.inline.hpp"
  34 #include "gc/z/zRootsIterator.hpp"
  35 #include "gc/z/zStat.hpp"
  36 #include "gc/z/zTask.hpp"
  37 #include "gc/z/zThread.inline.hpp"
  38 #include "gc/z/zThreadLocalAllocBuffer.hpp"
  39 #include "gc/z/zUtils.inline.hpp"
  40 #include "gc/z/zWorkers.inline.hpp"
  41 #include "logging/log.hpp"
  42 #include "memory/iterator.inline.hpp"
  43 #include "oops/objArrayOop.inline.hpp"
  44 #include "oops/oop.inline.hpp"
  45 #include "runtime/atomic.hpp"
  46 #include "runtime/handshake.hpp"
  47 #include "runtime/prefetch.inline.hpp"
  48 #include "runtime/thread.hpp"
  49 #include "utilities/align.hpp"
  50 #include "utilities/globalDefinitions.hpp"
  51 #include "utilities/powerOfTwo.hpp"
  52 #include "utilities/ticks.hpp"
  53 
  54 static const ZStatSubPhase ZSubPhaseConcurrentMark("Concurrent Mark");
  55 static const ZStatSubPhase ZSubPhaseConcurrentMarkTryFlush("Concurrent Mark Try Flush");
  56 static const ZStatSubPhase ZSubPhaseConcurrentMarkIdle("Concurrent Mark Idle");
  57 static const ZStatSubPhase ZSubPhaseConcurrentMarkTryTerminate("Concurrent Mark Try Terminate");
  58 static const ZStatSubPhase ZSubPhaseMarkTryComplete("Pause Mark Try Complete");
  59 
  60 ZMark::ZMark(ZWorkers* workers, ZPageTable* page_table) :
  61     _workers(workers),
  62     _page_table(page_table),
  63     _allocator(),
  64     _stripes(),
  65     _terminate(),
  66     _work_terminateflush(true),
  67     _work_nproactiveflush(0),
  68     _work_nterminateflush(0),
  69     _nproactiveflush(0),
  70     _nterminateflush(0),
  71     _ntrycomplete(0),
  72     _ncontinue(0),
  73     _nworkers(0) {}
  74 
  75 bool ZMark::is_initialized() const {
  76   return _allocator.is_initialized();
  77 }
  78 
  79 size_t ZMark::calculate_nstripes(uint nworkers) const {
  80   // Calculate the number of stripes from the number of workers we use,
  81   // where the number of stripes must be a power of two and we want to
  82   // have at least one worker per stripe.
  83   const size_t nstripes = round_down_power_of_2(nworkers);
  84   return MIN2(nstripes, ZMarkStripesMax);
  85 }
  86 
  87 void ZMark::prepare_mark() {
  88   // Increment global sequence number to invalidate
  89   // marking information for all pages.
  90   ZGlobalSeqNum++;
  91 
  92   // Reset flush/continue counters
  93   _nproactiveflush = 0;
  94   _nterminateflush = 0;
  95   _ntrycomplete = 0;
  96   _ncontinue = 0;
  97 
  98   // Set number of workers to use
  99   _nworkers = _workers->nconcurrent();
 100 
 101   // Set number of mark stripes to use, based on number
 102   // of workers we will use in the concurrent mark phase.
 103   const size_t nstripes = calculate_nstripes(_nworkers);
 104   _stripes.set_nstripes(nstripes);
 105 
 106   // Update statistics
 107   ZStatMark::set_at_mark_start(nstripes);
 108 
 109   // Print worker/stripe distribution
 110   LogTarget(Debug, gc, marking) log;
 111   if (log.is_enabled()) {
 112     log.print("Mark Worker/Stripe Distribution");
 113     for (uint worker_id = 0; worker_id < _nworkers; worker_id++) {
 114       const ZMarkStripe* const stripe = _stripes.stripe_for_worker(_nworkers, worker_id);
 115       const size_t stripe_id = _stripes.stripe_id(stripe);
 116       log.print("  Worker %u(%u) -> Stripe " SIZE_FORMAT "(" SIZE_FORMAT ")",
 117                 worker_id, _nworkers, stripe_id, nstripes);
 118     }
 119   }
 120 }
 121 
 122 class ZMarkRootsIteratorClosure : public ZRootsIteratorClosure {
 123 public:
 124   ZMarkRootsIteratorClosure() {
 125     ZThreadLocalAllocBuffer::reset_statistics();
 126   }
 127 
 128   ~ZMarkRootsIteratorClosure() {
 129     ZThreadLocalAllocBuffer::publish_statistics();
 130   }
 131 
 132   virtual void do_thread(Thread* thread) {
 133     // Update thread local address bad mask
 134     ZThreadLocalData::set_address_bad_mask(thread, ZAddressBadMask);
 135 
 136     // Mark invisible root
 137     ZThreadLocalData::do_invisible_root(thread, ZBarrier::mark_barrier_on_invisible_root_oop_field);
 138 
 139     // Retire TLAB
 140     ZThreadLocalAllocBuffer::retire(thread);
 141   }
 142 
 143   virtual bool should_disarm_nmethods() const {
 144     return true;
 145   }
 146 
 147   virtual void do_oop(oop* p) {
 148     ZBarrier::mark_barrier_on_root_oop_field(p);
 149   }
 150 
 151   virtual void do_oop(narrowOop* p) {
 152     ShouldNotReachHere();
 153   }
 154 };
 155 
 156 class ZMarkRootsTask : public ZTask {
 157 private:
 158   ZMark* const              _mark;
 159   ZRootsIterator            _roots;
 160   ZMarkRootsIteratorClosure _cl;
 161 
 162 public:
 163   ZMarkRootsTask(ZMark* mark) :
 164       ZTask("ZMarkRootsTask"),
 165       _mark(mark),
 166       _roots(false /* visit_jvmti_weak_export */) {}
 167 
 168   virtual void work() {
 169     _roots.oops_do(&_cl);
 170 
 171     // Flush and free worker stacks. Needed here since
 172     // the set of workers executing during root scanning
 173     // can be different from the set of workers executing
 174     // during mark.
 175     _mark->flush_and_free();
 176   }
 177 };
 178 
 179 void ZMark::start() {
 180   // Verification
 181   if (ZVerifyMarking) {
 182     verify_all_stacks_empty();
 183   }
 184 
 185   // Prepare for concurrent mark
 186   prepare_mark();
 187 
 188   // Mark roots
 189   ZMarkRootsTask task(this);
 190   _workers->run_parallel(&task);
 191 }
 192 
 193 void ZMark::prepare_work() {
 194   assert(_nworkers == _workers->nconcurrent(), "Invalid number of workers");
 195 
 196   // Set number of active workers
 197   _terminate.reset(_nworkers);
 198 
 199   // Reset flush counters
 200   _work_nproactiveflush = _work_nterminateflush = 0;
 201   _work_terminateflush = true;
 202 }
 203 
 204 void ZMark::finish_work() {
 205   // Accumulate proactive/terminate flush counters
 206   _nproactiveflush += _work_nproactiveflush;
 207   _nterminateflush += _work_nterminateflush;
 208 }
 209 
 210 bool ZMark::is_array(uintptr_t addr) const {
 211   return ZOop::from_address(addr)->is_objArray();
 212 }
 213 
 214 void ZMark::push_partial_array(uintptr_t addr, size_t size, bool finalizable) {
 215   assert(is_aligned(addr, ZMarkPartialArrayMinSize), "Address misaligned");
 216   ZMarkThreadLocalStacks* const stacks = ZThreadLocalData::stacks(Thread::current());
 217   ZMarkStripe* const stripe = _stripes.stripe_for_addr(addr);
 218   const uintptr_t offset = ZAddress::offset(addr) >> ZMarkPartialArrayMinSizeShift;
 219   const uintptr_t length = size / oopSize;
 220   const ZMarkStackEntry entry(offset, length, finalizable);
 221 
 222   log_develop_trace(gc, marking)("Array push partial: " PTR_FORMAT " (" SIZE_FORMAT "), stripe: " SIZE_FORMAT,
 223                                  addr, size, _stripes.stripe_id(stripe));
 224 
 225   stacks->push(&_allocator, &_stripes, stripe, entry, false /* publish */);
 226 }
 227 
 228 void ZMark::follow_small_array(uintptr_t addr, size_t size, bool finalizable) {
 229   assert(size <= ZMarkPartialArrayMinSize, "Too large, should be split");
 230   const size_t length = size / oopSize;
 231 
 232   log_develop_trace(gc, marking)("Array follow small: " PTR_FORMAT " (" SIZE_FORMAT ")", addr, size);
 233 
 234   ZBarrier::mark_barrier_on_oop_array((oop*)addr, length, finalizable);
 235 }
 236 
 237 void ZMark::follow_large_array(uintptr_t addr, size_t size, bool finalizable) {
 238   assert(size <= (size_t)arrayOopDesc::max_array_length(T_OBJECT) * oopSize, "Too large");
 239   assert(size > ZMarkPartialArrayMinSize, "Too small, should not be split");
 240   const uintptr_t start = addr;
 241   const uintptr_t end = start + size;
 242 
 243   // Calculate the aligned middle start/end/size, where the middle start
 244   // should always be greater than the start (hence the +1 below) to make
 245   // sure we always do some follow work, not just split the array into pieces.
 246   const uintptr_t middle_start = align_up(start + 1, ZMarkPartialArrayMinSize);
 247   const size_t    middle_size = align_down(end - middle_start, ZMarkPartialArrayMinSize);
 248   const uintptr_t middle_end = middle_start + middle_size;
 249 
 250   log_develop_trace(gc, marking)("Array follow large: " PTR_FORMAT "-" PTR_FORMAT" (" SIZE_FORMAT "), "
 251                                  "middle: " PTR_FORMAT "-" PTR_FORMAT " (" SIZE_FORMAT ")",
 252                                  start, end, size, middle_start, middle_end, middle_size);
 253 
 254   // Push unaligned trailing part
 255   if (end > middle_end) {
 256     const uintptr_t trailing_addr = middle_end;
 257     const size_t trailing_size = end - middle_end;
 258     push_partial_array(trailing_addr, trailing_size, finalizable);
 259   }
 260 
 261   // Push aligned middle part(s)
 262   uintptr_t partial_addr = middle_end;
 263   while (partial_addr > middle_start) {
 264     const size_t parts = 2;
 265     const size_t partial_size = align_up((partial_addr - middle_start) / parts, ZMarkPartialArrayMinSize);
 266     partial_addr -= partial_size;
 267     push_partial_array(partial_addr, partial_size, finalizable);
 268   }
 269 
 270   // Follow leading part
 271   assert(start < middle_start, "Miscalculated middle start");
 272   const uintptr_t leading_addr = start;
 273   const size_t leading_size = middle_start - start;
 274   follow_small_array(leading_addr, leading_size, finalizable);
 275 }
 276 
 277 void ZMark::follow_array(uintptr_t addr, size_t size, bool finalizable) {
 278   if (size <= ZMarkPartialArrayMinSize) {
 279     follow_small_array(addr, size, finalizable);
 280   } else {
 281     follow_large_array(addr, size, finalizable);
 282   }
 283 }
 284 
 285 void ZMark::follow_partial_array(ZMarkStackEntry entry, bool finalizable) {
 286   const uintptr_t addr = ZAddress::good(entry.partial_array_offset() << ZMarkPartialArrayMinSizeShift);
 287   const size_t size = entry.partial_array_length() * oopSize;
 288 
 289   follow_array(addr, size, finalizable);
 290 }
 291 
 292 void ZMark::follow_array_object(objArrayOop obj, bool finalizable) {
 293   if (finalizable) {
 294     ZMarkBarrierOopClosure<true /* finalizable */> cl;
 295     cl.do_klass(obj->klass());
 296   } else {
 297     ZMarkBarrierOopClosure<false /* finalizable */> cl;
 298     cl.do_klass(obj->klass());
 299   }
 300 
 301   const uintptr_t addr = (uintptr_t)obj->base();
 302   const size_t size = (size_t)obj->length() * oopSize;
 303 
 304   follow_array(addr, size, finalizable);
 305 }
 306 
 307 void ZMark::follow_object(oop obj, bool finalizable) {
 308   if (finalizable) {
 309     ZMarkBarrierOopClosure<true /* finalizable */> cl;
 310     obj->oop_iterate(&cl);
 311   } else {
 312     ZMarkBarrierOopClosure<false /* finalizable */> cl;
 313     obj->oop_iterate(&cl);
 314   }
 315 }
 316 
 317 bool ZMark::try_mark_object(ZMarkCache* cache, uintptr_t addr, bool finalizable) {
 318   ZPage* const page = _page_table->get(addr);
 319   if (page->is_allocating()) {
 320     // Newly allocated objects are implicitly marked
 321     return false;
 322   }
 323 
 324   // Try mark object
 325   bool inc_live = false;
 326   const bool success = page->mark_object(addr, finalizable, inc_live);
 327   if (inc_live) {
 328     // Update live objects/bytes for page. We use the aligned object
 329     // size since that is the actual number of bytes used on the page
 330     // and alignment paddings can never be reclaimed.
 331     const size_t size = ZUtils::object_size(addr);
 332     const size_t aligned_size = align_up(size, page->object_alignment());
 333     cache->inc_live(page, aligned_size);
 334   }
 335 
 336   return success;
 337 }
 338 
 339 void ZMark::mark_and_follow(ZMarkCache* cache, ZMarkStackEntry entry) {
 340   // Decode flags
 341   const bool finalizable = entry.finalizable();
 342   const bool partial_array = entry.partial_array();
 343 
 344   if (partial_array) {
 345     follow_partial_array(entry, finalizable);
 346     return;
 347   }
 348 
 349   // Decode object address and follow flag
 350   const uintptr_t addr = entry.object_address();
 351 
 352   if (!try_mark_object(cache, addr, finalizable)) {
 353     // Already marked
 354     return;
 355   }
 356 
 357   if (is_array(addr)) {
 358     // Decode follow flag
 359     const bool follow = entry.follow();
 360 
 361     // The follow flag is currently only relevant for object arrays
 362     if (follow) {
 363       follow_array_object(objArrayOop(ZOop::from_address(addr)), finalizable);
 364     }
 365   } else {
 366     follow_object(ZOop::from_address(addr), finalizable);
 367   }
 368 }
 369 
 370 template <typename T>
 371 bool ZMark::drain(ZMarkStripe* stripe, ZMarkThreadLocalStacks* stacks, ZMarkCache* cache, T* timeout) {
 372   ZMarkStackEntry entry;
 373 
 374   // Drain stripe stacks
 375   while (stacks->pop(&_allocator, &_stripes, stripe, entry)) {
 376     mark_and_follow(cache, entry);
 377 
 378     // Check timeout
 379     if (timeout->has_expired()) {
 380       // Timeout
 381       return false;
 382     }
 383   }
 384 
 385   // Success
 386   return true;
 387 }
 388 
 389 template <typename T>
 390 bool ZMark::drain_and_flush(ZMarkStripe* stripe, ZMarkThreadLocalStacks* stacks, ZMarkCache* cache, T* timeout) {
 391   const bool success = drain(stripe, stacks, cache, timeout);
 392 
 393   // Flush and publish worker stacks
 394   stacks->flush(&_allocator, &_stripes);
 395 
 396   return success;
 397 }
 398 
 399 bool ZMark::try_steal(ZMarkStripe* stripe, ZMarkThreadLocalStacks* stacks) {
 400   // Try to steal a stack from another stripe
 401   for (ZMarkStripe* victim_stripe = _stripes.stripe_next(stripe);
 402        victim_stripe != stripe;
 403        victim_stripe = _stripes.stripe_next(victim_stripe)) {
 404     ZMarkStack* const stack = victim_stripe->steal_stack();
 405     if (stack != NULL) {
 406       // Success, install the stolen stack
 407       stacks->install(&_stripes, stripe, stack);
 408       return true;
 409     }
 410   }
 411 
 412   // Nothing to steal
 413   return false;
 414 }
 415 
 416 void ZMark::idle() const {
 417   ZStatTimer timer(ZSubPhaseConcurrentMarkIdle);
 418   os::naked_short_sleep(1);
 419 }
 420 
 421 class ZMarkFlushAndFreeStacksClosure : public HandshakeClosure {
 422 private:
 423   ZMark* const _mark;
 424   bool         _flushed;
 425 
 426 public:
 427   ZMarkFlushAndFreeStacksClosure(ZMark* mark) :
 428       HandshakeClosure("ZMarkFlushAndFreeStacks"),
 429       _mark(mark),
 430       _flushed(false) {}
 431 
 432   void do_thread(Thread* thread) {
 433     if (_mark->flush_and_free(thread)) {
 434       _flushed = true;
 435     }
 436   }
 437 
 438   bool flushed() const {
 439     return _flushed;
 440   }
 441 };
 442 
 443 bool ZMark::flush(bool at_safepoint) {
 444   ZMarkFlushAndFreeStacksClosure cl(this);
 445   if (at_safepoint) {
 446     Threads::threads_do(&cl);
 447   } else {
 448     Handshake::execute(&cl);
 449   }
 450 
 451   // Returns true if more work is available
 452   return cl.flushed() || !_stripes.is_empty();
 453 }
 454 
 455 bool ZMark::try_flush(volatile size_t* nflush) {
 456   // Only flush if handshakes are enabled
 457   if (!ThreadLocalHandshakes) {
 458     return false;
 459   }
 460 
 461   Atomic::inc(nflush);
 462 
 463   ZStatTimer timer(ZSubPhaseConcurrentMarkTryFlush);
 464   return flush(false /* at_safepoint */);
 465 }
 466 
 467 bool ZMark::try_proactive_flush() {
 468   // Only do proactive flushes from worker 0
 469   if (ZThread::worker_id() != 0) {
 470     return false;
 471   }
 472 
 473   if (Atomic::load(&_work_nproactiveflush) == ZMarkProactiveFlushMax ||
 474       Atomic::load(&_work_nterminateflush) != 0) {
 475     // Limit reached or we're trying to terminate
 476     return false;
 477   }
 478 
 479   return try_flush(&_work_nproactiveflush);
 480 }
 481 
 482 bool ZMark::try_terminate() {
 483   ZStatTimer timer(ZSubPhaseConcurrentMarkTryTerminate);
 484 
 485   if (_terminate.enter_stage0()) {
 486     // Last thread entered stage 0, flush
 487     if (Atomic::load(&_work_terminateflush) &&
 488         Atomic::load(&_work_nterminateflush) != ZMarkTerminateFlushMax) {
 489       // Exit stage 0 to allow other threads to continue marking
 490       _terminate.exit_stage0();
 491 
 492       // Flush before termination
 493       if (!try_flush(&_work_nterminateflush)) {
 494         // No more work available, skip further flush attempts
 495         Atomic::store(&_work_terminateflush, false);
 496       }
 497 
 498       // Don't terminate, regardless of whether we successfully
 499       // flushed out more work or not. We've already exited
 500       // termination stage 0, to allow other threads to continue
 501       // marking, so this thread has to return false and also
 502       // make another round of attempted marking.
 503       return false;
 504     }
 505   }
 506 
 507   for (;;) {
 508     if (_terminate.enter_stage1()) {
 509       // Last thread entered stage 1, terminate
 510       return true;
 511     }
 512 
 513     // Idle to give the other threads
 514     // a chance to enter termination.
 515     idle();
 516 
 517     if (!_terminate.try_exit_stage1()) {
 518       // All workers in stage 1, terminate
 519       return true;
 520     }
 521 
 522     if (_terminate.try_exit_stage0()) {
 523       // More work available, don't terminate
 524       return false;
 525     }
 526   }
 527 }
 528 
 529 class ZMarkNoTimeout : public StackObj {
 530 public:
 531   bool has_expired() {
 532     return false;
 533   }
 534 };
 535 
 536 void ZMark::work_without_timeout(ZMarkCache* cache, ZMarkStripe* stripe, ZMarkThreadLocalStacks* stacks) {
 537   ZStatTimer timer(ZSubPhaseConcurrentMark);
 538   ZMarkNoTimeout no_timeout;
 539 
 540   for (;;) {
 541     drain_and_flush(stripe, stacks, cache, &no_timeout);
 542 
 543     if (try_steal(stripe, stacks)) {
 544       // Stole work
 545       continue;
 546     }
 547 
 548     if (try_proactive_flush()) {
 549       // Work available
 550       continue;
 551     }
 552 
 553     if (try_terminate()) {
 554       // Terminate
 555       break;
 556     }
 557   }
 558 }
 559 
 560 class ZMarkTimeout : public StackObj {
 561 private:
 562   const Ticks    _start;
 563   const uint64_t _timeout;
 564   const uint64_t _check_interval;
 565   uint64_t       _check_at;
 566   uint64_t       _check_count;
 567   bool           _expired;
 568 
 569 public:
 570   ZMarkTimeout(uint64_t timeout_in_millis) :
 571       _start(Ticks::now()),
 572       _timeout(_start.value() + TimeHelper::millis_to_counter(timeout_in_millis)),
 573       _check_interval(200),
 574       _check_at(_check_interval),
 575       _check_count(0),
 576       _expired(false) {}
 577 
 578   ~ZMarkTimeout() {
 579     const Tickspan duration = Ticks::now() - _start;
 580     log_debug(gc, marking)("Mark With Timeout (%s): %s, " UINT64_FORMAT " oops, %.3fms",
 581                            ZThread::name(), _expired ? "Expired" : "Completed",
 582                            _check_count, TimeHelper::counter_to_millis(duration.value()));
 583   }
 584 
 585   bool has_expired() {
 586     if (++_check_count == _check_at) {
 587       _check_at += _check_interval;
 588       if ((uint64_t)Ticks::now().value() >= _timeout) {
 589         // Timeout
 590         _expired = true;
 591       }
 592     }
 593 
 594     return _expired;
 595   }
 596 };
 597 
 598 void ZMark::work_with_timeout(ZMarkCache* cache, ZMarkStripe* stripe, ZMarkThreadLocalStacks* stacks, uint64_t timeout_in_millis) {
 599   ZStatTimer timer(ZSubPhaseMarkTryComplete);
 600   ZMarkTimeout timeout(timeout_in_millis);
 601 
 602   for (;;) {
 603     if (!drain_and_flush(stripe, stacks, cache, &timeout)) {
 604       // Timed out
 605       break;
 606     }
 607 
 608     if (try_steal(stripe, stacks)) {
 609       // Stole work
 610       continue;
 611     }
 612 
 613     // Terminate
 614     break;
 615   }
 616 }
 617 
 618 void ZMark::work(uint64_t timeout_in_millis) {
 619   ZMarkCache cache(_stripes.nstripes());
 620   ZMarkStripe* const stripe = _stripes.stripe_for_worker(_nworkers, ZThread::worker_id());
 621   ZMarkThreadLocalStacks* const stacks = ZThreadLocalData::stacks(Thread::current());
 622 
 623   if (timeout_in_millis == 0) {
 624     work_without_timeout(&cache, stripe, stacks);
 625   } else {
 626     work_with_timeout(&cache, stripe, stacks, timeout_in_millis);
 627   }
 628 
 629   // Make sure stacks have been flushed
 630   assert(stacks->is_empty(&_stripes), "Should be empty");
 631 
 632   // Free remaining stacks
 633   stacks->free(&_allocator);
 634 }
 635 
 636 class ZMarkConcurrentRootsIteratorClosure : public ZRootsIteratorClosure {
 637 public:
 638   virtual void do_oop(oop* p) {
 639     ZBarrier::mark_barrier_on_oop_field(p, false /* finalizable */);
 640   }
 641 
 642   virtual void do_oop(narrowOop* p) {
 643     ShouldNotReachHere();
 644   }
 645 };
 646 
 647 
 648 class ZMarkConcurrentRootsTask : public ZTask {
 649 private:
 650   SuspendibleThreadSetJoiner          _sts_joiner;
 651   ZConcurrentRootsIteratorClaimStrong _roots;
 652   ZMarkConcurrentRootsIteratorClosure _cl;
 653 
 654 public:
 655   ZMarkConcurrentRootsTask(ZMark* mark) :
 656       ZTask("ZMarkConcurrentRootsTask"),
 657       _sts_joiner(),
 658       _roots(),
 659       _cl() {
 660     ClassLoaderDataGraph_lock->lock();
 661   }
 662 
 663   ~ZMarkConcurrentRootsTask() {
 664     ClassLoaderDataGraph_lock->unlock();
 665   }
 666 
 667   virtual void work() {
 668     _roots.oops_do(&_cl);
 669   }
 670 };
 671 
 672 class ZMarkTask : public ZTask {
 673 private:
 674   ZMark* const   _mark;
 675   const uint64_t _timeout_in_millis;
 676 
 677 public:
 678   ZMarkTask(ZMark* mark, uint64_t timeout_in_millis = 0) :
 679       ZTask("ZMarkTask"),
 680       _mark(mark),
 681       _timeout_in_millis(timeout_in_millis) {
 682     _mark->prepare_work();
 683   }
 684 
 685   ~ZMarkTask() {
 686     _mark->finish_work();
 687   }
 688 
 689   virtual void work() {
 690     _mark->work(_timeout_in_millis);
 691   }
 692 };
 693 
 694 void ZMark::mark(bool initial) {
 695   if (initial) {
 696     ZMarkConcurrentRootsTask task(this);
 697     _workers->run_concurrent(&task);
 698   }
 699 
 700   ZMarkTask task(this);
 701   _workers->run_concurrent(&task);
 702 }
 703 
 704 bool ZMark::try_complete() {
 705   _ntrycomplete++;
 706 
 707   // Use nconcurrent number of worker threads to maintain the
 708   // worker/stripe distribution used during concurrent mark.
 709   ZMarkTask task(this, ZMarkCompleteTimeout);
 710   _workers->run_concurrent(&task);
 711 
 712   // Successful if all stripes are empty
 713   return _stripes.is_empty();
 714 }
 715 
 716 bool ZMark::try_end() {
 717   // Flush all mark stacks
 718   if (!flush(true /* at_safepoint */)) {
 719     // Mark completed
 720     return true;
 721   }
 722 
 723   // Try complete marking by doing a limited
 724   // amount of mark work in this phase.
 725   return try_complete();
 726 }
 727 
 728 bool ZMark::end() {
 729   // Try end marking
 730   if (!try_end()) {
 731     // Mark not completed
 732     _ncontinue++;
 733     return false;
 734   }
 735 
 736   // Verification
 737   if (ZVerifyMarking) {
 738     verify_all_stacks_empty();
 739   }
 740 
 741   // Update statistics
 742   ZStatMark::set_at_mark_end(_nproactiveflush, _nterminateflush, _ntrycomplete, _ncontinue);
 743 
 744   // Mark completed
 745   return true;
 746 }
 747 
 748 void ZMark::flush_and_free() {
 749   Thread* const thread = Thread::current();
 750   flush_and_free(thread);
 751 }
 752 
 753 bool ZMark::flush_and_free(Thread* thread) {
 754   ZMarkThreadLocalStacks* const stacks = ZThreadLocalData::stacks(thread);
 755   const bool flushed = stacks->flush(&_allocator, &_stripes);
 756   stacks->free(&_allocator);
 757   return flushed;
 758 }
 759 
 760 class ZVerifyMarkStacksEmptyClosure : public ThreadClosure {
 761 private:
 762   const ZMarkStripeSet* const _stripes;
 763 
 764 public:
 765   ZVerifyMarkStacksEmptyClosure(const ZMarkStripeSet* stripes) :
 766       _stripes(stripes) {}
 767 
 768   void do_thread(Thread* thread) {
 769     ZMarkThreadLocalStacks* const stacks = ZThreadLocalData::stacks(thread);
 770     guarantee(stacks->is_empty(_stripes), "Should be empty");
 771   }
 772 };
 773 
 774 void ZMark::verify_all_stacks_empty() const {
 775   // Verify thread stacks
 776   ZVerifyMarkStacksEmptyClosure cl(&_stripes);
 777   Threads::threads_do(&cl);
 778 
 779   // Verify stripe stacks
 780   guarantee(_stripes.is_empty(), "Should be empty");
 781 }