1 /*
   2  * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "gc/z/zBarrier.inline.hpp"
  26 #include "gc/z/zMark.inline.hpp"
  27 #include "gc/z/zMarkCache.inline.hpp"
  28 #include "gc/z/zMarkStack.inline.hpp"
  29 #include "gc/z/zMarkTerminate.inline.hpp"
  30 #include "gc/z/zOopClosures.inline.hpp"
  31 #include "gc/z/zPage.hpp"
  32 #include "gc/z/zPageTable.inline.hpp"
  33 #include "gc/z/zRootsIterator.hpp"
  34 #include "gc/z/zStat.hpp"
  35 #include "gc/z/zStatTLAB.hpp"
  36 #include "gc/z/zTask.hpp"
  37 #include "gc/z/zThread.hpp"
  38 #include "gc/z/zUtils.inline.hpp"
  39 #include "gc/z/zWorkers.inline.hpp"
  40 #include "logging/log.hpp"
  41 #include "memory/iterator.inline.hpp"
  42 #include "oops/objArrayOop.inline.hpp"
  43 #include "oops/oop.inline.hpp"
  44 #include "runtime/atomic.hpp"
  45 #include "runtime/handshake.hpp"
  46 #include "runtime/orderAccess.hpp"
  47 #include "runtime/prefetch.inline.hpp"
  48 #include "runtime/thread.hpp"
  49 #include "utilities/align.hpp"
  50 #include "utilities/globalDefinitions.hpp"
  51 #include "utilities/ticks.hpp"
  52 
  53 static const ZStatSubPhase ZSubPhaseConcurrentMark("Concurrent Mark");
  54 static const ZStatSubPhase ZSubPhaseConcurrentMarkTryFlush("Concurrent Mark Try Flush");
  55 static const ZStatSubPhase ZSubPhaseConcurrentMarkIdle("Concurrent Mark Idle");
  56 static const ZStatSubPhase ZSubPhaseConcurrentMarkTryTerminate("Concurrent Mark Try Terminate");
  57 static const ZStatSubPhase ZSubPhaseMarkTryComplete("Pause Mark Try Complete");
  58 
  59 ZMark::ZMark(ZWorkers* workers, ZPageTable* pagetable) :
  60     _workers(workers),
  61     _pagetable(pagetable),
  62     _allocator(),
  63     _stripes(),
  64     _terminate(),
  65     _work_terminateflush(true),
  66     _work_nproactiveflush(0),
  67     _work_nterminateflush(0),
  68     _nproactiveflush(0),
  69     _nterminateflush(0),
  70     _ntrycomplete(0),
  71     _ncontinue(0),
  72     _nworkers(0) {}
  73 
  74 bool ZMark::is_initialized() const {
  75   return _allocator.is_initialized();
  76 }
  77 
  78 size_t ZMark::calculate_nstripes(uint nworkers) const {
  79   // Calculate the number of stripes from the number of workers we use,
  80   // where the number of stripes must be a power of two and we want to
  81   // have at least one worker per stripe.
  82   const size_t nstripes = ZUtils::round_down_power_of_2(nworkers);
  83   return MIN2(nstripes, ZMarkStripesMax);
  84 }
  85 
  86 void ZMark::prepare_mark() {
  87   // Increment global sequence number to invalidate
  88   // marking information for all pages.
  89   ZGlobalSeqNum++;
  90 
  91   // Reset flush/continue counters
  92   _nproactiveflush = 0;
  93   _nterminateflush = 0;
  94   _ntrycomplete = 0;
  95   _ncontinue = 0;
  96 
  97   // Set number of workers to use
  98   _nworkers = _workers->nconcurrent();
  99 
 100   // Set number of mark stripes to use, based on number
 101   // of workers we will use in the concurrent mark phase.
 102   const size_t nstripes = calculate_nstripes(_nworkers);
 103   _stripes.set_nstripes(nstripes);
 104 
 105   // Update statistics
 106   ZStatMark::set_at_mark_start(nstripes);
 107 
 108   // Print worker/stripe distribution
 109   LogTarget(Debug, gc, marking) log;
 110   if (log.is_enabled()) {
 111     log.print("Mark Worker/Stripe Distribution");
 112     for (uint worker_id = 0; worker_id < _nworkers; worker_id++) {
 113       const ZMarkStripe* const stripe = _stripes.stripe_for_worker(_nworkers, worker_id);
 114       const size_t stripe_id = _stripes.stripe_id(stripe);
 115       log.print("  Worker %u(%u) -> Stripe " SIZE_FORMAT "(" SIZE_FORMAT ")",
 116                 worker_id, _nworkers, stripe_id, nstripes);
 117     }
 118   }
 119 }
 120 
 121 class ZMarkRootsIteratorClosure : public ZRootsIteratorClosure {
 122 private:
 123   static void fixup_address(HeapWord** p) {
 124     *p = (HeapWord*)ZAddress::good_or_null((uintptr_t)*p);
 125   }
 126 
 127 public:
 128   ZMarkRootsIteratorClosure() {
 129     ZStatTLAB::reset();
 130   }
 131 
 132   ~ZMarkRootsIteratorClosure() {
 133     ZStatTLAB::publish();
 134   }
 135 
 136   virtual void do_thread(Thread* thread) {
 137     ZRootsIteratorClosure::do_thread(thread);
 138 
 139     // Update thread local address bad mask
 140     ZThreadLocalData::set_address_bad_mask(thread, ZAddressBadMask);
 141 
 142     // Retire TLAB
 143     if (UseTLAB && thread->is_Java_thread()) {
 144       thread->tlab().addresses_do(fixup_address);
 145       thread->tlab().retire(ZStatTLAB::get());
 146       thread->tlab().resize();
 147     }
 148   }
 149 
 150   virtual void do_oop(oop* p) {
 151     ZBarrier::mark_barrier_on_root_oop_field(p);
 152   }
 153 
 154   virtual void do_oop(narrowOop* p) {
 155     ShouldNotReachHere();
 156   }
 157 };
 158 
 159 class ZMarkRootsTask : public ZTask {
 160 private:
 161   ZMark* const              _mark;
 162   ZRootsIterator            _roots;
 163   ZMarkRootsIteratorClosure _cl;
 164 
 165 public:
 166   ZMarkRootsTask(ZMark* mark) :
 167       ZTask("ZMarkRootsTask"),
 168       _mark(mark),
 169       _roots() {}
 170 
 171   virtual void work() {
 172     _roots.oops_do(&_cl);
 173 
 174     // Flush and free worker stacks. Needed here since
 175     // the set of workers executing during root scanning
 176     // can be different from the set of workers executing
 177     // during mark.
 178     _mark->flush_and_free();
 179   }
 180 };
 181 
 182 void ZMark::start() {
 183   // Verification
 184   if (ZVerifyMarking) {
 185     verify_all_stacks_empty();
 186   }
 187 
 188   // Prepare for concurrent mark
 189   prepare_mark();
 190 
 191   // Mark roots
 192   ZMarkRootsTask task(this);
 193   _workers->run_parallel(&task);
 194 }
 195 
 196 void ZMark::prepare_work() {
 197   assert(_nworkers == _workers->nconcurrent(), "Invalid number of workers");
 198 
 199   // Set number of active workers
 200   _terminate.reset(_nworkers);
 201 
 202   // Reset flush counters
 203   _work_nproactiveflush = _work_nterminateflush = 0;
 204   _work_terminateflush = true;
 205 }
 206 
 207 void ZMark::finish_work() {
 208   // Accumulate proactive/terminate flush counters
 209   _nproactiveflush += _work_nproactiveflush;
 210   _nterminateflush += _work_nterminateflush;
 211 }
 212 
 213 bool ZMark::is_array(uintptr_t addr) const {
 214   return ZOop::to_oop(addr)->is_objArray();
 215 }
 216 
 217 void ZMark::push_partial_array(uintptr_t addr, size_t size, bool finalizable) {
 218   assert(is_aligned(addr, ZMarkPartialArrayMinSize), "Address misaligned");
 219   ZMarkThreadLocalStacks* const stacks = ZThreadLocalData::stacks(Thread::current());
 220   ZMarkStripe* const stripe = _stripes.stripe_for_addr(addr);
 221   const uintptr_t offset = ZAddress::offset(addr) >> ZMarkPartialArrayMinSizeShift;
 222   const uintptr_t length = size / oopSize;
 223   const ZMarkStackEntry entry(offset, length, finalizable);
 224 
 225   log_develop_trace(gc, marking)("Array push partial: " PTR_FORMAT " (" SIZE_FORMAT "), stripe: " SIZE_FORMAT,
 226                                  addr, size, _stripes.stripe_id(stripe));
 227 
 228   stacks->push(&_allocator, &_stripes, stripe, entry, false /* publish */);
 229 }
 230 
 231 void ZMark::follow_small_array(uintptr_t addr, size_t size, bool finalizable) {
 232   assert(size <= ZMarkPartialArrayMinSize, "Too large, should be split");
 233   const size_t length = size / oopSize;
 234 
 235   log_develop_trace(gc, marking)("Array follow small: " PTR_FORMAT " (" SIZE_FORMAT ")", addr, size);
 236 
 237   ZBarrier::mark_barrier_on_oop_array((oop*)addr, length, finalizable);
 238 }
 239 
 240 void ZMark::follow_large_array(uintptr_t addr, size_t size, bool finalizable) {
 241   assert(size <= (size_t)arrayOopDesc::max_array_length(T_OBJECT) * oopSize, "Too large");
 242   assert(size > ZMarkPartialArrayMinSize, "Too small, should not be split");
 243   const uintptr_t start = addr;
 244   const uintptr_t end = start + size;
 245 
 246   // Calculate the aligned middle start/end/size, where the middle start
 247   // should always be greater than the start (hence the +1 below) to make
 248   // sure we always do some follow work, not just split the array into pieces.
 249   const uintptr_t middle_start = align_up(start + 1, ZMarkPartialArrayMinSize);
 250   const size_t    middle_size = align_down(end - middle_start, ZMarkPartialArrayMinSize);
 251   const uintptr_t middle_end = middle_start + middle_size;
 252 
 253   log_develop_trace(gc, marking)("Array follow large: " PTR_FORMAT "-" PTR_FORMAT" (" SIZE_FORMAT "), "
 254                                  "middle: " PTR_FORMAT "-" PTR_FORMAT " (" SIZE_FORMAT ")",
 255                                  start, end, size, middle_start, middle_end, middle_size);
 256 
 257   // Push unaligned trailing part
 258   if (end > middle_end) {
 259     const uintptr_t trailing_addr = middle_end;
 260     const size_t trailing_size = end - middle_end;
 261     push_partial_array(trailing_addr, trailing_size, finalizable);
 262   }
 263 
 264   // Push aligned middle part(s)
 265   uintptr_t partial_addr = middle_end;
 266   while (partial_addr > middle_start) {
 267     const size_t parts = 2;
 268     const size_t partial_size = align_up((partial_addr - middle_start) / parts, ZMarkPartialArrayMinSize);
 269     partial_addr -= partial_size;
 270     push_partial_array(partial_addr, partial_size, finalizable);
 271   }
 272 
 273   // Follow leading part
 274   assert(start < middle_start, "Miscalculated middle start");
 275   const uintptr_t leading_addr = start;
 276   const size_t leading_size = middle_start - start;
 277   follow_small_array(leading_addr, leading_size, finalizable);
 278 }
 279 
 280 void ZMark::follow_array(uintptr_t addr, size_t size, bool finalizable) {
 281   if (size <= ZMarkPartialArrayMinSize) {
 282     follow_small_array(addr, size, finalizable);
 283   } else {
 284     follow_large_array(addr, size, finalizable);
 285   }
 286 }
 287 
 288 void ZMark::follow_partial_array(ZMarkStackEntry entry, bool finalizable) {
 289   const uintptr_t addr = ZAddress::good(entry.partial_array_offset() << ZMarkPartialArrayMinSizeShift);
 290   const size_t size = entry.partial_array_length() * oopSize;
 291 
 292   follow_array(addr, size, finalizable);
 293 }
 294 
 295 void ZMark::follow_array_object(objArrayOop obj, bool finalizable) {
 296   if (finalizable) {
 297     ZMarkBarrierOopClosure<true /* finalizable */> cl;
 298     cl.do_klass(obj->klass());
 299   } else {
 300     ZMarkBarrierOopClosure<false /* finalizable */> cl;
 301     cl.do_klass(obj->klass());
 302   }
 303 
 304   const uintptr_t addr = (uintptr_t)obj->base();
 305   const size_t size = (size_t)obj->length() * oopSize;
 306 
 307   follow_array(addr, size, finalizable);
 308 }
 309 
 310 void ZMark::follow_object(oop obj, bool finalizable) {
 311   if (finalizable) {
 312     ZMarkBarrierOopClosure<true /* finalizable */> cl;
 313     obj->oop_iterate(&cl);
 314   } else {
 315     ZMarkBarrierOopClosure<false /* finalizable */> cl;
 316     obj->oop_iterate(&cl);
 317   }
 318 }
 319 
 320 bool ZMark::try_mark_object(ZMarkCache* cache, uintptr_t addr, bool finalizable) {
 321   ZPage* const page = _pagetable->get(addr);
 322   if (page->is_allocating()) {
 323     // Newly allocated objects are implicitly marked
 324     return false;
 325   }
 326 
 327   // Try mark object
 328   bool inc_live = false;
 329   const bool success = page->mark_object(addr, finalizable, inc_live);
 330   if (inc_live) {
 331     // Update live objects/bytes for page. We use the aligned object
 332     // size since that is the actual number of bytes used on the page
 333     // and alignment paddings can never be reclaimed.
 334     const size_t size = ZUtils::object_size(addr);
 335     const size_t aligned_size = align_up(size, page->object_alignment());
 336     cache->inc_live(page, aligned_size);
 337   }
 338 
 339   return success;
 340 }
 341 
 342 void ZMark::mark_and_follow(ZMarkCache* cache, ZMarkStackEntry entry) {
 343   // Decode flags
 344   const bool finalizable = entry.finalizable();
 345   const bool partial_array = entry.partial_array();
 346 
 347   if (partial_array) {
 348     follow_partial_array(entry, finalizable);
 349     return;
 350   }
 351 
 352   // Decode object address
 353   const uintptr_t addr = entry.object_address();
 354 
 355   if (!try_mark_object(cache, addr, finalizable)) {
 356     // Already marked
 357     return;
 358   }
 359 
 360   if (is_array(addr)) {
 361     follow_array_object(objArrayOop(ZOop::to_oop(addr)), finalizable);
 362   } else {
 363     follow_object(ZOop::to_oop(addr), finalizable);
 364   }
 365 }
 366 
 367 template <typename T>
 368 bool ZMark::drain(ZMarkStripe* stripe, ZMarkThreadLocalStacks* stacks, ZMarkCache* cache, T* timeout) {
 369   ZMarkStackEntry entry;
 370 
 371   // Drain stripe stacks
 372   while (stacks->pop(&_allocator, &_stripes, stripe, entry)) {
 373     mark_and_follow(cache, entry);
 374 
 375     // Check timeout
 376     if (timeout->has_expired()) {
 377       // Timeout
 378       return false;
 379     }
 380   }
 381 
 382   // Success
 383   return true;
 384 }
 385 
 386 template <typename T>
 387 bool ZMark::drain_and_flush(ZMarkStripe* stripe, ZMarkThreadLocalStacks* stacks, ZMarkCache* cache, T* timeout) {
 388   const bool success = drain(stripe, stacks, cache, timeout);
 389 
 390   // Flush and publish worker stacks
 391   stacks->flush(&_allocator, &_stripes);
 392 
 393   return success;
 394 }
 395 
 396 bool ZMark::try_steal(ZMarkStripe* stripe, ZMarkThreadLocalStacks* stacks) {
 397   // Try to steal a stack from another stripe
 398   for (ZMarkStripe* victim_stripe = _stripes.stripe_next(stripe);
 399        victim_stripe != stripe;
 400        victim_stripe = _stripes.stripe_next(victim_stripe)) {
 401     ZMarkStack* const stack = victim_stripe->steal_stack();
 402     if (stack != NULL) {
 403       // Success, install the stolen stack
 404       stacks->install(&_stripes, stripe, stack);
 405       return true;
 406     }
 407   }
 408 
 409   // Nothing to steal
 410   return false;
 411 }
 412 
 413 void ZMark::idle() const {
 414   ZStatTimer timer(ZSubPhaseConcurrentMarkIdle);
 415   os::naked_short_sleep(1);
 416 }
 417 
 418 class ZMarkFlushAndFreeStacksClosure : public ThreadClosure {
 419 private:
 420   ZMark* const _mark;
 421   bool         _flushed;
 422 
 423 public:
 424   ZMarkFlushAndFreeStacksClosure(ZMark* mark) :
 425       _mark(mark),
 426       _flushed(false) {}
 427 
 428   void do_thread(Thread* thread) {
 429     if (_mark->flush_and_free(thread)) {
 430       _flushed = true;
 431     }
 432   }
 433 
 434   bool flushed() const {
 435     return _flushed;
 436   }
 437 };
 438 
 439 bool ZMark::flush(bool at_safepoint) {
 440   ZMarkFlushAndFreeStacksClosure cl(this);
 441   if (at_safepoint) {
 442     Threads::threads_do(&cl);
 443   } else {
 444     Handshake::execute(&cl);
 445   }
 446 
 447   // Returns true if more work is available
 448   return cl.flushed() || !_stripes.is_empty();
 449 }
 450 
 451 bool ZMark::try_flush(volatile size_t* nflush) {
 452   // Only flush if handshakes are enabled
 453   if (!ThreadLocalHandshakes) {
 454     return false;
 455   }
 456 
 457   Atomic::inc(nflush);
 458 
 459   ZStatTimer timer(ZSubPhaseConcurrentMarkTryFlush);
 460   return flush(false /* at_safepoint */);
 461 }
 462 
 463 bool ZMark::try_proactive_flush() {
 464   // Only do proactive flushes from worker 0
 465   if (ZThread::worker_id() != 0) {
 466     return false;
 467   }
 468 
 469   if (Atomic::load(&_work_nproactiveflush) == ZMarkProactiveFlushMax ||
 470       Atomic::load(&_work_nterminateflush) != 0) {
 471     // Limit reached or we're trying to terminate
 472     return false;
 473   }
 474 
 475   return try_flush(&_work_nproactiveflush);
 476 }
 477 
 478 bool ZMark::try_terminate() {
 479   ZStatTimer timer(ZSubPhaseConcurrentMarkTryTerminate);
 480 
 481   if (_terminate.enter_stage0()) {
 482     // Last thread entered stage 0, flush
 483     if (Atomic::load(&_work_terminateflush) &&
 484         Atomic::load(&_work_nterminateflush) != ZMarkTerminateFlushMax) {
 485       // Exit stage 0 to allow other threads to continue marking
 486       _terminate.exit_stage0();
 487 
 488       // Flush before termination
 489       if (!try_flush(&_work_nterminateflush)) {
 490         // No more work available, skip further flush attempts
 491         Atomic::store(false, &_work_terminateflush);
 492       }
 493 
 494       // Don't terminate, regardless of whether we successfully
 495       // flushed out more work or not. We've already exited
 496       // termination stage 0, to allow other threads to continue
 497       // marking, so this thread has to return false and also
 498       // make another round of attempted marking.
 499       return false;
 500     }
 501   }
 502 
 503   for (;;) {
 504     if (_terminate.enter_stage1()) {
 505       // Last thread entered stage 1, terminate
 506       return true;
 507     }
 508 
 509     // Idle to give the other threads
 510     // a chance to enter termination.
 511     idle();
 512 
 513     if (!_terminate.try_exit_stage1()) {
 514       // All workers in stage 1, terminate
 515       return true;
 516     }
 517 
 518     if (_terminate.try_exit_stage0()) {
 519       // More work available, don't terminate
 520       return false;
 521     }
 522   }
 523 }
 524 
 525 class ZMarkNoTimeout : public StackObj {
 526 public:
 527   bool has_expired() {
 528     return false;
 529   }
 530 };
 531 
 532 void ZMark::work_without_timeout(ZMarkCache* cache, ZMarkStripe* stripe, ZMarkThreadLocalStacks* stacks) {
 533   ZStatTimer timer(ZSubPhaseConcurrentMark);
 534   ZMarkNoTimeout no_timeout;
 535 
 536   for (;;) {
 537     drain_and_flush(stripe, stacks, cache, &no_timeout);
 538 
 539     if (try_steal(stripe, stacks)) {
 540       // Stole work
 541       continue;
 542     }
 543 
 544     if (try_proactive_flush()) {
 545       // Work available
 546       continue;
 547     }
 548 
 549     if (try_terminate()) {
 550       // Terminate
 551       break;
 552     }
 553   }
 554 }
 555 
 556 class ZMarkTimeout : public StackObj {
 557 private:
 558   const Ticks    _start;
 559   const uint64_t _timeout;
 560   const uint64_t _check_interval;
 561   uint64_t       _check_at;
 562   uint64_t       _check_count;
 563   bool           _expired;
 564 
 565 public:
 566   ZMarkTimeout(uint64_t timeout_in_millis) :
 567       _start(Ticks::now()),
 568       _timeout(_start.value() + TimeHelper::millis_to_counter(timeout_in_millis)),
 569       _check_interval(200),
 570       _check_at(_check_interval),
 571       _check_count(0),
 572       _expired(false) {}
 573 
 574   ~ZMarkTimeout() {
 575     const Tickspan duration = Ticks::now() - _start;
 576     log_debug(gc, marking)("Mark With Timeout (%s): %s, " UINT64_FORMAT " oops, %.3fms",
 577                            ZThread::name(), _expired ? "Expired" : "Completed",
 578                            _check_count, TimeHelper::counter_to_millis(duration.value()));
 579   }
 580 
 581   bool has_expired() {
 582     if (++_check_count == _check_at) {
 583       _check_at += _check_interval;
 584       if ((uint64_t)Ticks::now().value() >= _timeout) {
 585         // Timeout
 586         _expired = true;
 587       }
 588     }
 589 
 590     return _expired;
 591   }
 592 };
 593 
 594 void ZMark::work_with_timeout(ZMarkCache* cache, ZMarkStripe* stripe, ZMarkThreadLocalStacks* stacks, uint64_t timeout_in_millis) {
 595   ZStatTimer timer(ZSubPhaseMarkTryComplete);
 596   ZMarkTimeout timeout(timeout_in_millis);
 597 
 598   for (;;) {
 599     if (!drain_and_flush(stripe, stacks, cache, &timeout)) {
 600       // Timed out
 601       break;
 602     }
 603 
 604     if (try_steal(stripe, stacks)) {
 605       // Stole work
 606       continue;
 607     }
 608 
 609     // Terminate
 610     break;
 611   }
 612 }
 613 
 614 void ZMark::work(uint64_t timeout_in_millis) {
 615   ZMarkCache cache(_stripes.nstripes());
 616   ZMarkStripe* const stripe = _stripes.stripe_for_worker(_nworkers, ZThread::worker_id());
 617   ZMarkThreadLocalStacks* const stacks = ZThreadLocalData::stacks(Thread::current());
 618 
 619   if (timeout_in_millis == 0) {
 620     work_without_timeout(&cache, stripe, stacks);
 621   } else {
 622     work_with_timeout(&cache, stripe, stacks, timeout_in_millis);
 623   }
 624 
 625   // Make sure stacks have been flushed
 626   assert(stacks->is_empty(&_stripes), "Should be empty");
 627 
 628   // Free remaining stacks
 629   stacks->free(&_allocator);
 630 }
 631 
 632 class ZMarkConcurrentRootsIteratorClosure : public ZRootsIteratorClosure {
 633 public:
 634   virtual void do_oop(oop* p) {
 635     ZBarrier::mark_barrier_on_oop_field(p, false /* finalizable */);
 636   }
 637 
 638   virtual void do_oop(narrowOop* p) {
 639     ShouldNotReachHere();
 640   }
 641 };
 642 
 643 
 644 class ZMarkConcurrentRootsTask : public ZTask {
 645 private:
 646   ZConcurrentRootsIterator            _roots;
 647   ZMarkConcurrentRootsIteratorClosure _cl;
 648 
 649 public:
 650   ZMarkConcurrentRootsTask(ZMark* mark) :
 651       ZTask("ZMarkConcurrentRootsTask"),
 652       _roots(true /* marking */),
 653       _cl() {}
 654 
 655   virtual void work() {
 656     _roots.oops_do(&_cl);
 657   }
 658 };
 659 
 660 class ZMarkTask : public ZTask {
 661 private:
 662   ZMark* const   _mark;
 663   const uint64_t _timeout_in_millis;
 664 
 665 public:
 666   ZMarkTask(ZMark* mark, uint64_t timeout_in_millis = 0) :
 667       ZTask("ZMarkTask"),
 668       _mark(mark),
 669       _timeout_in_millis(timeout_in_millis) {
 670     _mark->prepare_work();
 671   }
 672 
 673   ~ZMarkTask() {
 674     _mark->finish_work();
 675   }
 676 
 677   virtual void work() {
 678     _mark->work(_timeout_in_millis);
 679   }
 680 };
 681 
 682 void ZMark::mark(bool initial) {
 683   if (initial) {
 684     ZMarkConcurrentRootsTask task(this);
 685     _workers->run_concurrent(&task);
 686   }
 687 
 688   ZMarkTask task(this);
 689   _workers->run_concurrent(&task);
 690 }
 691 
 692 bool ZMark::try_complete() {
 693   _ntrycomplete++;
 694 
 695   // Use nconcurrent number of worker threads to maintain the
 696   // worker/stripe distribution used during concurrent mark.
 697   ZMarkTask task(this, ZMarkCompleteTimeout);
 698   _workers->run_concurrent(&task);
 699 
 700   // Successful if all stripes are empty
 701   return _stripes.is_empty();
 702 }
 703 
 704 bool ZMark::try_end() {
 705   // Flush all mark stacks
 706   if (!flush(true /* at_safepoint */)) {
 707     // Mark completed
 708     return true;
 709   }
 710 
 711   // Try complete marking by doing a limited
 712   // amount of mark work in this phase.
 713   return try_complete();
 714 }
 715 
 716 bool ZMark::end() {
 717   // Try end marking
 718   if (!try_end()) {
 719     // Mark not completed
 720     _ncontinue++;
 721     return false;
 722   }
 723 
 724   // Verification
 725   if (ZVerifyMarking) {
 726     verify_all_stacks_empty();
 727   }
 728 
 729   // Update statistics
 730   ZStatMark::set_at_mark_end(_nproactiveflush, _nterminateflush, _ntrycomplete, _ncontinue);
 731 
 732   // Mark completed
 733   return true;
 734 }
 735 
 736 void ZMark::flush_and_free() {
 737   Thread* const thread = Thread::current();
 738   flush_and_free(thread);
 739 }
 740 
 741 bool ZMark::flush_and_free(Thread* thread) {
 742   ZMarkThreadLocalStacks* const stacks = ZThreadLocalData::stacks(thread);
 743   const bool flushed = stacks->flush(&_allocator, &_stripes);
 744   stacks->free(&_allocator);
 745   return flushed;
 746 }
 747 
 748 class ZVerifyMarkStacksEmptyClosure : public ThreadClosure {
 749 private:
 750   const ZMarkStripeSet* const _stripes;
 751 
 752 public:
 753   ZVerifyMarkStacksEmptyClosure(const ZMarkStripeSet* stripes) :
 754       _stripes(stripes) {}
 755 
 756   void do_thread(Thread* thread) {
 757     ZMarkThreadLocalStacks* const stacks = ZThreadLocalData::stacks(thread);
 758     guarantee(stacks->is_empty(_stripes), "Should be empty");
 759   }
 760 };
 761 
 762 void ZMark::verify_all_stacks_empty() const {
 763   // Verify thread stacks
 764   ZVerifyMarkStacksEmptyClosure cl(&_stripes);
 765   Threads::threads_do(&cl);
 766 
 767   // Verify stripe stacks
 768   guarantee(_stripes.is_empty(), "Should be empty");
 769 }