1 /*
   2  * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "gc/z/zBarrier.inline.hpp"
  26 #include "gc/z/zMark.inline.hpp"
  27 #include "gc/z/zMarkCache.inline.hpp"
  28 #include "gc/z/zMarkStack.inline.hpp"
  29 #include "gc/z/zMarkTerminate.inline.hpp"
  30 #include "gc/z/zOopClosures.inline.hpp"
  31 #include "gc/z/zPage.hpp"
  32 #include "gc/z/zPageTable.inline.hpp"
  33 #include "gc/z/zRootsIterator.hpp"
  34 #include "gc/z/zStat.hpp"
  35 #include "gc/z/zTask.hpp"
  36 #include "gc/z/zThread.hpp"
  37 #include "gc/z/zThreadLocalAllocBuffer.hpp"
  38 #include "gc/z/zUtils.inline.hpp"
  39 #include "gc/z/zWorkers.inline.hpp"
  40 #include "logging/log.hpp"
  41 #include "memory/iterator.inline.hpp"
  42 #include "oops/objArrayOop.inline.hpp"
  43 #include "oops/oop.inline.hpp"
  44 #include "runtime/atomic.hpp"
  45 #include "runtime/handshake.hpp"
  46 #include "runtime/orderAccess.hpp"
  47 #include "runtime/prefetch.inline.hpp"
  48 #include "runtime/thread.hpp"
  49 #include "utilities/align.hpp"
  50 #include "utilities/globalDefinitions.hpp"
  51 #include "utilities/ticks.hpp"
  52 
  53 static const ZStatSubPhase ZSubPhaseConcurrentMark("Concurrent Mark");
  54 static const ZStatSubPhase ZSubPhaseConcurrentMarkTryFlush("Concurrent Mark Try Flush");
  55 static const ZStatSubPhase ZSubPhaseConcurrentMarkIdle("Concurrent Mark Idle");
  56 static const ZStatSubPhase ZSubPhaseConcurrentMarkTryTerminate("Concurrent Mark Try Terminate");
  57 static const ZStatSubPhase ZSubPhaseMarkTryComplete("Pause Mark Try Complete");
  58 
  59 ZMark::ZMark(ZWorkers* workers, ZPageTable* pagetable) :
  60     _workers(workers),
  61     _pagetable(pagetable),
  62     _allocator(),
  63     _stripes(),
  64     _terminate(),
  65     _work_terminateflush(true),
  66     _work_nproactiveflush(0),
  67     _work_nterminateflush(0),
  68     _nproactiveflush(0),
  69     _nterminateflush(0),
  70     _ntrycomplete(0),
  71     _ncontinue(0),
  72     _nworkers(0) {}
  73 
  74 bool ZMark::is_initialized() const {
  75   return _allocator.is_initialized();
  76 }
  77 
  78 size_t ZMark::calculate_nstripes(uint nworkers) const {
  79   // Calculate the number of stripes from the number of workers we use,
  80   // where the number of stripes must be a power of two and we want to
  81   // have at least one worker per stripe.
  82   const size_t nstripes = ZUtils::round_down_power_of_2(nworkers);
  83   return MIN2(nstripes, ZMarkStripesMax);
  84 }
  85 
  86 void ZMark::prepare_mark() {
  87   // Increment global sequence number to invalidate
  88   // marking information for all pages.
  89   ZGlobalSeqNum++;
  90 
  91   // Reset flush/continue counters
  92   _nproactiveflush = 0;
  93   _nterminateflush = 0;
  94   _ntrycomplete = 0;
  95   _ncontinue = 0;
  96 
  97   // Set number of workers to use
  98   _nworkers = _workers->nconcurrent();
  99 
 100   // Set number of mark stripes to use, based on number
 101   // of workers we will use in the concurrent mark phase.
 102   const size_t nstripes = calculate_nstripes(_nworkers);
 103   _stripes.set_nstripes(nstripes);
 104 
 105   // Update statistics
 106   ZStatMark::set_at_mark_start(nstripes);
 107 
 108   // Print worker/stripe distribution
 109   LogTarget(Debug, gc, marking) log;
 110   if (log.is_enabled()) {
 111     log.print("Mark Worker/Stripe Distribution");
 112     for (uint worker_id = 0; worker_id < _nworkers; worker_id++) {
 113       const ZMarkStripe* const stripe = _stripes.stripe_for_worker(_nworkers, worker_id);
 114       const size_t stripe_id = _stripes.stripe_id(stripe);
 115       log.print("  Worker %u(%u) -> Stripe " SIZE_FORMAT "(" SIZE_FORMAT ")",
 116                 worker_id, _nworkers, stripe_id, nstripes);
 117     }
 118   }
 119 }
 120 
 121 class ZMarkRootsIteratorClosure : public ZRootsIteratorClosure {
 122 public:
 123   ZMarkRootsIteratorClosure() {
 124     ZThreadLocalAllocBuffer::reset_statistics();
 125   }
 126 
 127   ~ZMarkRootsIteratorClosure() {
 128     ZThreadLocalAllocBuffer::publish_statistics();
 129   }
 130 
 131   virtual void do_thread(Thread* thread) {
 132     ZRootsIteratorClosure::do_thread(thread);
 133 
 134     // Update thread local address bad mask
 135     ZThreadLocalData::set_address_bad_mask(thread, ZAddressBadMask);
 136 
 137     // Retire TLAB
 138     ZThreadLocalAllocBuffer::retire(thread);
 139   }
 140 
 141   virtual void do_oop(oop* p) {
 142     ZBarrier::mark_barrier_on_root_oop_field(p);
 143   }
 144 
 145   virtual void do_oop(narrowOop* p) {
 146     ShouldNotReachHere();
 147   }
 148 };
 149 
 150 class ZMarkRootsTask : public ZTask {
 151 private:
 152   ZMark* const              _mark;
 153   ZRootsIterator            _roots;
 154   ZMarkRootsIteratorClosure _cl;
 155 
 156 public:
 157   ZMarkRootsTask(ZMark* mark) :
 158       ZTask("ZMarkRootsTask"),
 159       _mark(mark),
 160       _roots() {}
 161 
 162   virtual void work() {
 163     _roots.oops_do(&_cl);
 164 
 165     // Flush and free worker stacks. Needed here since
 166     // the set of workers executing during root scanning
 167     // can be different from the set of workers executing
 168     // during mark.
 169     _mark->flush_and_free();
 170   }
 171 };
 172 
 173 void ZMark::start() {
 174   // Verification
 175   if (ZVerifyMarking) {
 176     verify_all_stacks_empty();
 177   }
 178 
 179   // Prepare for concurrent mark
 180   prepare_mark();
 181 
 182   // Mark roots
 183   ZMarkRootsTask task(this);
 184   _workers->run_parallel(&task);
 185 }
 186 
 187 void ZMark::prepare_work() {
 188   assert(_nworkers == _workers->nconcurrent(), "Invalid number of workers");
 189 
 190   // Set number of active workers
 191   _terminate.reset(_nworkers);
 192 
 193   // Reset flush counters
 194   _work_nproactiveflush = _work_nterminateflush = 0;
 195   _work_terminateflush = true;
 196 }
 197 
 198 void ZMark::finish_work() {
 199   // Accumulate proactive/terminate flush counters
 200   _nproactiveflush += _work_nproactiveflush;
 201   _nterminateflush += _work_nterminateflush;
 202 }
 203 
 204 bool ZMark::is_array(uintptr_t addr) const {
 205   return ZOop::to_oop(addr)->is_objArray();
 206 }
 207 
 208 void ZMark::push_partial_array(uintptr_t addr, size_t size, bool finalizable) {
 209   assert(is_aligned(addr, ZMarkPartialArrayMinSize), "Address misaligned");
 210   ZMarkThreadLocalStacks* const stacks = ZThreadLocalData::stacks(Thread::current());
 211   ZMarkStripe* const stripe = _stripes.stripe_for_addr(addr);
 212   const uintptr_t offset = ZAddress::offset(addr) >> ZMarkPartialArrayMinSizeShift;
 213   const uintptr_t length = size / oopSize;
 214   const ZMarkStackEntry entry(offset, length, finalizable);
 215 
 216   log_develop_trace(gc, marking)("Array push partial: " PTR_FORMAT " (" SIZE_FORMAT "), stripe: " SIZE_FORMAT,
 217                                  addr, size, _stripes.stripe_id(stripe));
 218 
 219   stacks->push(&_allocator, &_stripes, stripe, entry, false /* publish */);
 220 }
 221 
 222 void ZMark::follow_small_array(uintptr_t addr, size_t size, bool finalizable) {
 223   assert(size <= ZMarkPartialArrayMinSize, "Too large, should be split");
 224   const size_t length = size / oopSize;
 225 
 226   log_develop_trace(gc, marking)("Array follow small: " PTR_FORMAT " (" SIZE_FORMAT ")", addr, size);
 227 
 228   ZBarrier::mark_barrier_on_oop_array((oop*)addr, length, finalizable);
 229 }
 230 
 231 void ZMark::follow_large_array(uintptr_t addr, size_t size, bool finalizable) {
 232   assert(size <= (size_t)arrayOopDesc::max_array_length(T_OBJECT) * oopSize, "Too large");
 233   assert(size > ZMarkPartialArrayMinSize, "Too small, should not be split");
 234   const uintptr_t start = addr;
 235   const uintptr_t end = start + size;
 236 
 237   // Calculate the aligned middle start/end/size, where the middle start
 238   // should always be greater than the start (hence the +1 below) to make
 239   // sure we always do some follow work, not just split the array into pieces.
 240   const uintptr_t middle_start = align_up(start + 1, ZMarkPartialArrayMinSize);
 241   const size_t    middle_size = align_down(end - middle_start, ZMarkPartialArrayMinSize);
 242   const uintptr_t middle_end = middle_start + middle_size;
 243 
 244   log_develop_trace(gc, marking)("Array follow large: " PTR_FORMAT "-" PTR_FORMAT" (" SIZE_FORMAT "), "
 245                                  "middle: " PTR_FORMAT "-" PTR_FORMAT " (" SIZE_FORMAT ")",
 246                                  start, end, size, middle_start, middle_end, middle_size);
 247 
 248   // Push unaligned trailing part
 249   if (end > middle_end) {
 250     const uintptr_t trailing_addr = middle_end;
 251     const size_t trailing_size = end - middle_end;
 252     push_partial_array(trailing_addr, trailing_size, finalizable);
 253   }
 254 
 255   // Push aligned middle part(s)
 256   uintptr_t partial_addr = middle_end;
 257   while (partial_addr > middle_start) {
 258     const size_t parts = 2;
 259     const size_t partial_size = align_up((partial_addr - middle_start) / parts, ZMarkPartialArrayMinSize);
 260     partial_addr -= partial_size;
 261     push_partial_array(partial_addr, partial_size, finalizable);
 262   }
 263 
 264   // Follow leading part
 265   assert(start < middle_start, "Miscalculated middle start");
 266   const uintptr_t leading_addr = start;
 267   const size_t leading_size = middle_start - start;
 268   follow_small_array(leading_addr, leading_size, finalizable);
 269 }
 270 
 271 void ZMark::follow_array(uintptr_t addr, size_t size, bool finalizable) {
 272   if (size <= ZMarkPartialArrayMinSize) {
 273     follow_small_array(addr, size, finalizable);
 274   } else {
 275     follow_large_array(addr, size, finalizable);
 276   }
 277 }
 278 
 279 void ZMark::follow_partial_array(ZMarkStackEntry entry, bool finalizable) {
 280   const uintptr_t addr = ZAddress::good(entry.partial_array_offset() << ZMarkPartialArrayMinSizeShift);
 281   const size_t size = entry.partial_array_length() * oopSize;
 282 
 283   follow_array(addr, size, finalizable);
 284 }
 285 
 286 void ZMark::follow_array_object(objArrayOop obj, bool finalizable) {
 287   if (finalizable) {
 288     ZMarkBarrierOopClosure<true /* finalizable */> cl;
 289     cl.do_klass(obj->klass());
 290   } else {
 291     ZMarkBarrierOopClosure<false /* finalizable */> cl;
 292     cl.do_klass(obj->klass());
 293   }
 294 
 295   const uintptr_t addr = (uintptr_t)obj->base();
 296   const size_t size = (size_t)obj->length() * oopSize;
 297 
 298   follow_array(addr, size, finalizable);
 299 }
 300 
 301 void ZMark::follow_object(oop obj, bool finalizable) {
 302   if (finalizable) {
 303     ZMarkBarrierOopClosure<true /* finalizable */> cl;
 304     obj->oop_iterate(&cl);
 305   } else {
 306     ZMarkBarrierOopClosure<false /* finalizable */> cl;
 307     obj->oop_iterate(&cl);
 308   }
 309 }
 310 
 311 bool ZMark::try_mark_object(ZMarkCache* cache, uintptr_t addr, bool finalizable) {
 312   ZPage* const page = _pagetable->get(addr);
 313   if (page->is_allocating()) {
 314     // Newly allocated objects are implicitly marked
 315     return false;
 316   }
 317 
 318   // Try mark object
 319   bool inc_live = false;
 320   const bool success = page->mark_object(addr, finalizable, inc_live);
 321   if (inc_live) {
 322     // Update live objects/bytes for page. We use the aligned object
 323     // size since that is the actual number of bytes used on the page
 324     // and alignment paddings can never be reclaimed.
 325     const size_t size = ZUtils::object_size(addr);
 326     const size_t aligned_size = align_up(size, page->object_alignment());
 327     cache->inc_live(page, aligned_size);
 328   }
 329 
 330   return success;
 331 }
 332 
 333 void ZMark::mark_and_follow(ZMarkCache* cache, ZMarkStackEntry entry) {
 334   // Decode flags
 335   const bool finalizable = entry.finalizable();
 336   const bool partial_array = entry.partial_array();
 337 
 338   if (partial_array) {
 339     follow_partial_array(entry, finalizable);
 340     return;
 341   }
 342 
 343   // Decode object address
 344   const uintptr_t addr = entry.object_address();
 345 
 346   if (!try_mark_object(cache, addr, finalizable)) {
 347     // Already marked
 348     return;
 349   }
 350 
 351   if (is_array(addr)) {
 352     follow_array_object(objArrayOop(ZOop::to_oop(addr)), finalizable);
 353   } else {
 354     follow_object(ZOop::to_oop(addr), finalizable);
 355   }
 356 }
 357 
 358 template <typename T>
 359 bool ZMark::drain(ZMarkStripe* stripe, ZMarkThreadLocalStacks* stacks, ZMarkCache* cache, T* timeout) {
 360   ZMarkStackEntry entry;
 361 
 362   // Drain stripe stacks
 363   while (stacks->pop(&_allocator, &_stripes, stripe, entry)) {
 364     mark_and_follow(cache, entry);
 365 
 366     // Check timeout
 367     if (timeout->has_expired()) {
 368       // Timeout
 369       return false;
 370     }
 371   }
 372 
 373   // Success
 374   return true;
 375 }
 376 
 377 template <typename T>
 378 bool ZMark::drain_and_flush(ZMarkStripe* stripe, ZMarkThreadLocalStacks* stacks, ZMarkCache* cache, T* timeout) {
 379   const bool success = drain(stripe, stacks, cache, timeout);
 380 
 381   // Flush and publish worker stacks
 382   stacks->flush(&_allocator, &_stripes);
 383 
 384   return success;
 385 }
 386 
 387 bool ZMark::try_steal(ZMarkStripe* stripe, ZMarkThreadLocalStacks* stacks) {
 388   // Try to steal a stack from another stripe
 389   for (ZMarkStripe* victim_stripe = _stripes.stripe_next(stripe);
 390        victim_stripe != stripe;
 391        victim_stripe = _stripes.stripe_next(victim_stripe)) {
 392     ZMarkStack* const stack = victim_stripe->steal_stack();
 393     if (stack != NULL) {
 394       // Success, install the stolen stack
 395       stacks->install(&_stripes, stripe, stack);
 396       return true;
 397     }
 398   }
 399 
 400   // Nothing to steal
 401   return false;
 402 }
 403 
 404 void ZMark::idle() const {
 405   ZStatTimer timer(ZSubPhaseConcurrentMarkIdle);
 406   os::naked_short_sleep(1);
 407 }
 408 
 409 class ZMarkFlushAndFreeStacksClosure : public ThreadClosure {
 410 private:
 411   ZMark* const _mark;
 412   bool         _flushed;
 413 
 414 public:
 415   ZMarkFlushAndFreeStacksClosure(ZMark* mark) :
 416       _mark(mark),
 417       _flushed(false) {}
 418 
 419   void do_thread(Thread* thread) {
 420     if (_mark->flush_and_free(thread)) {
 421       _flushed = true;
 422     }
 423   }
 424 
 425   bool flushed() const {
 426     return _flushed;
 427   }
 428 };
 429 
 430 bool ZMark::flush(bool at_safepoint) {
 431   ZMarkFlushAndFreeStacksClosure cl(this);
 432   if (at_safepoint) {
 433     Threads::threads_do(&cl);
 434   } else {
 435     Handshake::execute(&cl);
 436   }
 437 
 438   // Returns true if more work is available
 439   return cl.flushed() || !_stripes.is_empty();
 440 }
 441 
 442 bool ZMark::try_flush(volatile size_t* nflush) {
 443   // Only flush if handshakes are enabled
 444   if (!ThreadLocalHandshakes) {
 445     return false;
 446   }
 447 
 448   Atomic::inc(nflush);
 449 
 450   ZStatTimer timer(ZSubPhaseConcurrentMarkTryFlush);
 451   return flush(false /* at_safepoint */);
 452 }
 453 
 454 bool ZMark::try_proactive_flush() {
 455   // Only do proactive flushes from worker 0
 456   if (ZThread::worker_id() != 0) {
 457     return false;
 458   }
 459 
 460   if (Atomic::load(&_work_nproactiveflush) == ZMarkProactiveFlushMax ||
 461       Atomic::load(&_work_nterminateflush) != 0) {
 462     // Limit reached or we're trying to terminate
 463     return false;
 464   }
 465 
 466   return try_flush(&_work_nproactiveflush);
 467 }
 468 
 469 bool ZMark::try_terminate() {
 470   ZStatTimer timer(ZSubPhaseConcurrentMarkTryTerminate);
 471 
 472   if (_terminate.enter_stage0()) {
 473     // Last thread entered stage 0, flush
 474     if (Atomic::load(&_work_terminateflush) &&
 475         Atomic::load(&_work_nterminateflush) != ZMarkTerminateFlushMax) {
 476       // Exit stage 0 to allow other threads to continue marking
 477       _terminate.exit_stage0();
 478 
 479       // Flush before termination
 480       if (!try_flush(&_work_nterminateflush)) {
 481         // No more work available, skip further flush attempts
 482         Atomic::store(false, &_work_terminateflush);
 483       }
 484 
 485       // Don't terminate, regardless of whether we successfully
 486       // flushed out more work or not. We've already exited
 487       // termination stage 0, to allow other threads to continue
 488       // marking, so this thread has to return false and also
 489       // make another round of attempted marking.
 490       return false;
 491     }
 492   }
 493 
 494   for (;;) {
 495     if (_terminate.enter_stage1()) {
 496       // Last thread entered stage 1, terminate
 497       return true;
 498     }
 499 
 500     // Idle to give the other threads
 501     // a chance to enter termination.
 502     idle();
 503 
 504     if (!_terminate.try_exit_stage1()) {
 505       // All workers in stage 1, terminate
 506       return true;
 507     }
 508 
 509     if (_terminate.try_exit_stage0()) {
 510       // More work available, don't terminate
 511       return false;
 512     }
 513   }
 514 }
 515 
 516 class ZMarkNoTimeout : public StackObj {
 517 public:
 518   bool has_expired() {
 519     return false;
 520   }
 521 };
 522 
 523 void ZMark::work_without_timeout(ZMarkCache* cache, ZMarkStripe* stripe, ZMarkThreadLocalStacks* stacks) {
 524   ZStatTimer timer(ZSubPhaseConcurrentMark);
 525   ZMarkNoTimeout no_timeout;
 526 
 527   for (;;) {
 528     drain_and_flush(stripe, stacks, cache, &no_timeout);
 529 
 530     if (try_steal(stripe, stacks)) {
 531       // Stole work
 532       continue;
 533     }
 534 
 535     if (try_proactive_flush()) {
 536       // Work available
 537       continue;
 538     }
 539 
 540     if (try_terminate()) {
 541       // Terminate
 542       break;
 543     }
 544   }
 545 }
 546 
 547 class ZMarkTimeout : public StackObj {
 548 private:
 549   const Ticks    _start;
 550   const uint64_t _timeout;
 551   const uint64_t _check_interval;
 552   uint64_t       _check_at;
 553   uint64_t       _check_count;
 554   bool           _expired;
 555 
 556 public:
 557   ZMarkTimeout(uint64_t timeout_in_millis) :
 558       _start(Ticks::now()),
 559       _timeout(_start.value() + TimeHelper::millis_to_counter(timeout_in_millis)),
 560       _check_interval(200),
 561       _check_at(_check_interval),
 562       _check_count(0),
 563       _expired(false) {}
 564 
 565   ~ZMarkTimeout() {
 566     const Tickspan duration = Ticks::now() - _start;
 567     log_debug(gc, marking)("Mark With Timeout (%s): %s, " UINT64_FORMAT " oops, %.3fms",
 568                            ZThread::name(), _expired ? "Expired" : "Completed",
 569                            _check_count, TimeHelper::counter_to_millis(duration.value()));
 570   }
 571 
 572   bool has_expired() {
 573     if (++_check_count == _check_at) {
 574       _check_at += _check_interval;
 575       if ((uint64_t)Ticks::now().value() >= _timeout) {
 576         // Timeout
 577         _expired = true;
 578       }
 579     }
 580 
 581     return _expired;
 582   }
 583 };
 584 
 585 void ZMark::work_with_timeout(ZMarkCache* cache, ZMarkStripe* stripe, ZMarkThreadLocalStacks* stacks, uint64_t timeout_in_millis) {
 586   ZStatTimer timer(ZSubPhaseMarkTryComplete);
 587   ZMarkTimeout timeout(timeout_in_millis);
 588 
 589   for (;;) {
 590     if (!drain_and_flush(stripe, stacks, cache, &timeout)) {
 591       // Timed out
 592       break;
 593     }
 594 
 595     if (try_steal(stripe, stacks)) {
 596       // Stole work
 597       continue;
 598     }
 599 
 600     // Terminate
 601     break;
 602   }
 603 }
 604 
 605 void ZMark::work(uint64_t timeout_in_millis) {
 606   ZMarkCache cache(_stripes.nstripes());
 607   ZMarkStripe* const stripe = _stripes.stripe_for_worker(_nworkers, ZThread::worker_id());
 608   ZMarkThreadLocalStacks* const stacks = ZThreadLocalData::stacks(Thread::current());
 609 
 610   if (timeout_in_millis == 0) {
 611     work_without_timeout(&cache, stripe, stacks);
 612   } else {
 613     work_with_timeout(&cache, stripe, stacks, timeout_in_millis);
 614   }
 615 
 616   // Make sure stacks have been flushed
 617   assert(stacks->is_empty(&_stripes), "Should be empty");
 618 
 619   // Free remaining stacks
 620   stacks->free(&_allocator);
 621 }
 622 
 623 class ZMarkConcurrentRootsIteratorClosure : public ZRootsIteratorClosure {
 624 public:
 625   virtual void do_oop(oop* p) {
 626     ZBarrier::mark_barrier_on_oop_field(p, false /* finalizable */);
 627   }
 628 
 629   virtual void do_oop(narrowOop* p) {
 630     ShouldNotReachHere();
 631   }
 632 };
 633 
 634 
 635 class ZMarkConcurrentRootsTask : public ZTask {
 636 private:
 637   ZConcurrentRootsIterator            _roots;
 638   ZMarkConcurrentRootsIteratorClosure _cl;
 639 
 640 public:
 641   ZMarkConcurrentRootsTask(ZMark* mark) :
 642       ZTask("ZMarkConcurrentRootsTask"),
 643       _roots(true /* marking */),
 644       _cl() {}
 645 
 646   virtual void work() {
 647     _roots.oops_do(&_cl);
 648   }
 649 };
 650 
 651 class ZMarkTask : public ZTask {
 652 private:
 653   ZMark* const   _mark;
 654   const uint64_t _timeout_in_millis;
 655 
 656 public:
 657   ZMarkTask(ZMark* mark, uint64_t timeout_in_millis = 0) :
 658       ZTask("ZMarkTask"),
 659       _mark(mark),
 660       _timeout_in_millis(timeout_in_millis) {
 661     _mark->prepare_work();
 662   }
 663 
 664   ~ZMarkTask() {
 665     _mark->finish_work();
 666   }
 667 
 668   virtual void work() {
 669     _mark->work(_timeout_in_millis);
 670   }
 671 };
 672 
 673 void ZMark::mark(bool initial) {
 674   if (initial) {
 675     ZMarkConcurrentRootsTask task(this);
 676     _workers->run_concurrent(&task);
 677   }
 678 
 679   ZMarkTask task(this);
 680   _workers->run_concurrent(&task);
 681 }
 682 
 683 bool ZMark::try_complete() {
 684   _ntrycomplete++;
 685 
 686   // Use nconcurrent number of worker threads to maintain the
 687   // worker/stripe distribution used during concurrent mark.
 688   ZMarkTask task(this, ZMarkCompleteTimeout);
 689   _workers->run_concurrent(&task);
 690 
 691   // Successful if all stripes are empty
 692   return _stripes.is_empty();
 693 }
 694 
 695 bool ZMark::try_end() {
 696   // Flush all mark stacks
 697   if (!flush(true /* at_safepoint */)) {
 698     // Mark completed
 699     return true;
 700   }
 701 
 702   // Try complete marking by doing a limited
 703   // amount of mark work in this phase.
 704   return try_complete();
 705 }
 706 
 707 bool ZMark::end() {
 708   // Try end marking
 709   if (!try_end()) {
 710     // Mark not completed
 711     _ncontinue++;
 712     return false;
 713   }
 714 
 715   // Verification
 716   if (ZVerifyMarking) {
 717     verify_all_stacks_empty();
 718   }
 719 
 720   // Update statistics
 721   ZStatMark::set_at_mark_end(_nproactiveflush, _nterminateflush, _ntrycomplete, _ncontinue);
 722 
 723   // Mark completed
 724   return true;
 725 }
 726 
 727 void ZMark::flush_and_free() {
 728   Thread* const thread = Thread::current();
 729   flush_and_free(thread);
 730 }
 731 
 732 bool ZMark::flush_and_free(Thread* thread) {
 733   ZMarkThreadLocalStacks* const stacks = ZThreadLocalData::stacks(thread);
 734   const bool flushed = stacks->flush(&_allocator, &_stripes);
 735   stacks->free(&_allocator);
 736   return flushed;
 737 }
 738 
 739 class ZVerifyMarkStacksEmptyClosure : public ThreadClosure {
 740 private:
 741   const ZMarkStripeSet* const _stripes;
 742 
 743 public:
 744   ZVerifyMarkStacksEmptyClosure(const ZMarkStripeSet* stripes) :
 745       _stripes(stripes) {}
 746 
 747   void do_thread(Thread* thread) {
 748     ZMarkThreadLocalStacks* const stacks = ZThreadLocalData::stacks(thread);
 749     guarantee(stacks->is_empty(_stripes), "Should be empty");
 750   }
 751 };
 752 
 753 void ZMark::verify_all_stacks_empty() const {
 754   // Verify thread stacks
 755   ZVerifyMarkStacksEmptyClosure cl(&_stripes);
 756   Threads::threads_do(&cl);
 757 
 758   // Verify stripe stacks
 759   guarantee(_stripes.is_empty(), "Should be empty");
 760 }