1 /*
   2  * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "gc/z/zBarrier.inline.hpp"
  26 #include "gc/z/zMark.inline.hpp"
  27 #include "gc/z/zMarkCache.inline.hpp"
  28 #include "gc/z/zMarkStack.inline.hpp"
  29 #include "gc/z/zMarkTerminate.inline.hpp"
  30 #include "gc/z/zOopClosures.inline.hpp"
  31 #include "gc/z/zPage.hpp"
  32 #include "gc/z/zPageTable.inline.hpp"
  33 #include "gc/z/zRootsIterator.hpp"
  34 #include "gc/z/zStat.hpp"
  35 #include "gc/z/zTask.hpp"
  36 #include "gc/z/zThread.hpp"
  37 #include "gc/z/zUtils.inline.hpp"
  38 #include "gc/z/zWorkers.inline.hpp"
  39 #include "logging/log.hpp"
  40 #include "oops/objArrayOop.inline.hpp"
  41 #include "oops/oop.inline.hpp"
  42 #include "runtime/atomic.hpp"
  43 #include "runtime/handshake.hpp"
  44 #include "runtime/orderAccess.hpp"
  45 #include "runtime/prefetch.inline.hpp"
  46 #include "runtime/thread.hpp"
  47 #include "utilities/align.hpp"
  48 #include "utilities/globalDefinitions.hpp"
  49 #include "utilities/ticks.hpp"
  50 
  51 static const ZStatSubPhase ZSubPhaseConcurrentMark("Concurrent Mark");
  52 static const ZStatSubPhase ZSubPhaseConcurrentMarkTryFlush("Concurrent Mark Try Flush");
  53 static const ZStatSubPhase ZSubPhaseConcurrentMarkIdle("Concurrent Mark Idle");
  54 static const ZStatSubPhase ZSubPhaseConcurrentMarkTryTerminate("Concurrent Mark Try Terminate");
  55 static const ZStatSubPhase ZSubPhaseMarkTryComplete("Pause Mark Try Complete");
  56 
  57 ZMark::ZMark(ZWorkers* workers, ZPageTable* pagetable) :
  58     _workers(workers),
  59     _pagetable(pagetable),
  60     _allocator(),
  61     _stripes(),
  62     _terminate(),
  63     _work_terminateflush(true),
  64     _work_nproactiveflush(0),
  65     _work_nterminateflush(0),
  66     _nproactiveflush(0),
  67     _nterminateflush(0),
  68     _ntrycomplete(0),
  69     _ncontinue(0),
  70     _nworkers(0) {}
  71 
  72 size_t ZMark::calculate_nstripes(uint nworkers) const {
  73   // Calculate the number of stripes from the number of workers we use,
  74   // where the number of stripes must be a power of two and we want to
  75   // have at least one worker per stripe.
  76   const size_t nstripes = ZUtils::round_down_power_of_2(nworkers);
  77   return MIN2(nstripes, ZMarkStripesMax);
  78 }
  79 
  80 void ZMark::prepare_mark() {
  81   // Increment global sequence number to invalidate
  82   // marking information for all pages.
  83   ZGlobalSeqNum++;
  84 
  85   // Reset flush/continue counters
  86   _nproactiveflush = 0;
  87   _nterminateflush = 0;
  88   _ntrycomplete = 0;
  89   _ncontinue = 0;
  90 
  91   // Set number of workers to use
  92   _nworkers = _workers->nconcurrent();
  93 
  94   // Set number of mark stripes to use, based on number
  95   // of workers we will use in the concurrent mark phase.
  96   const size_t nstripes = calculate_nstripes(_nworkers);
  97   _stripes.set_nstripes(nstripes);
  98 
  99   // Update statistics
 100   ZStatMark::set_at_mark_start(nstripes);
 101 
 102   // Print worker/stripe distribution
 103   LogTarget(Debug, gc, marking) log;
 104   if (log.is_enabled()) {
 105     log.print("Mark Worker/Stripe Distribution");
 106     for (uint worker_id = 0; worker_id < _nworkers; worker_id++) {
 107       const ZMarkStripe* const stripe = _stripes.stripe_for_worker(_nworkers, worker_id);
 108       const size_t stripe_id = _stripes.stripe_id(stripe);
 109       log.print("  Worker %u(%u) -> Stripe " SIZE_FORMAT "(" SIZE_FORMAT ")",
 110                 worker_id, _nworkers, stripe_id, nstripes);
 111     }
 112   }
 113 }
 114 
 115 class ZMarkRootsTask : public ZTask {
 116 private:
 117   ZMark* const   _mark;
 118   ZRootsIterator _roots;
 119 
 120 public:
 121   ZMarkRootsTask(ZMark* mark) :
 122       ZTask("ZMarkRootsTask"),
 123       _mark(mark),
 124       _roots() {}
 125 
 126   virtual void work() {
 127     ZMarkRootOopClosure cl;
 128     _roots.oops_do(&cl);
 129 
 130     // Flush and free worker stacks. Needed here since
 131     // the set of workers executing during root scanning
 132     // can be different from the set of workers executing
 133     // during mark.
 134     _mark->flush_and_free();
 135   }
 136 };
 137 
 138 void ZMark::start() {
 139   // Verification
 140   if (ZVerifyMarking) {
 141     verify_all_stacks_empty();
 142   }
 143 
 144   // Prepare for concurrent mark
 145   prepare_mark();
 146 
 147   // Mark roots
 148   ZMarkRootsTask task(this);
 149   _workers->run_parallel(&task);
 150 }
 151 
 152 void ZMark::prepare_work() {
 153   assert(_nworkers == _workers->nconcurrent(), "Invalid number of workers");
 154 
 155   // Set number of active workers
 156   _terminate.reset(_nworkers);
 157 
 158   // Reset flush counters
 159   _work_nproactiveflush = _work_nterminateflush = 0;
 160   _work_terminateflush = true;
 161 }
 162 
 163 void ZMark::finish_work() {
 164   // Accumulate proactive/terminate flush counters
 165   _nproactiveflush += _work_nproactiveflush;
 166   _nterminateflush += _work_nterminateflush;
 167 }
 168 
 169 bool ZMark::is_array(uintptr_t addr) const {
 170   return ZOop::to_oop(addr)->is_objArray();
 171 }
 172 
 173 void ZMark::push_partial_array(uintptr_t addr, size_t size, bool finalizable) {
 174   assert(is_aligned(addr, ZMarkPartialArrayMinSize), "Address misaligned");
 175   ZMarkThreadLocalStacks* const stacks = ZThreadLocalData::stacks(Thread::current());
 176   ZMarkStripe* const stripe = _stripes.stripe_for_addr(addr);
 177   const uintptr_t offset = ZAddress::offset(addr) >> ZMarkPartialArrayMinSizeShift;
 178   const uintptr_t length = size / oopSize;
 179   const ZMarkStackEntry entry(offset, length, finalizable);
 180 
 181   log_develop_trace(gc, marking)("Array push partial: " PTR_FORMAT " (" SIZE_FORMAT "), stripe: " SIZE_FORMAT,
 182                                  addr, size, _stripes.stripe_id(stripe));
 183 
 184   stacks->push(&_allocator, &_stripes, stripe, entry, false /* publish */);
 185 }
 186 
 187 void ZMark::follow_small_array(uintptr_t addr, size_t size, bool finalizable) {
 188   assert(size <= ZMarkPartialArrayMinSize, "Too large, should be split");
 189   const size_t length = size / oopSize;
 190 
 191   log_develop_trace(gc, marking)("Array follow small: " PTR_FORMAT " (" SIZE_FORMAT ")", addr, size);
 192 
 193   ZBarrier::mark_barrier_on_oop_array((oop*)addr, length, finalizable);
 194 }
 195 
 196 void ZMark::follow_large_array(uintptr_t addr, size_t size, bool finalizable) {
 197   assert(size <= (size_t)arrayOopDesc::max_array_length(T_OBJECT) * oopSize, "Too large");
 198   assert(size > ZMarkPartialArrayMinSize, "Too small, should not be split");
 199   const uintptr_t start = addr;
 200   const uintptr_t end = start + size;
 201 
 202   // Calculate the aligned middle start/end/size, where the middle start
 203   // should always be greater than the start (hence the +1 below) to make
 204   // sure we always do some follow work, not just split the array into pieces.
 205   const uintptr_t middle_start = align_up(start + 1, ZMarkPartialArrayMinSize);
 206   const size_t    middle_size = align_down(end - middle_start, ZMarkPartialArrayMinSize);
 207   const uintptr_t middle_end = middle_start + middle_size;
 208 
 209   log_develop_trace(gc, marking)("Array follow large: " PTR_FORMAT "-" PTR_FORMAT" (" SIZE_FORMAT "), "
 210                                  "middle: " PTR_FORMAT "-" PTR_FORMAT " (" SIZE_FORMAT ")",
 211                                  start, end, size, middle_start, middle_end, middle_size);
 212 
 213   // Push unaligned trailing part
 214   if (end > middle_end) {
 215     const uintptr_t trailing_addr = middle_end;
 216     const size_t trailing_size = end - middle_end;
 217     push_partial_array(trailing_addr, trailing_size, finalizable);
 218   }
 219 
 220   // Push aligned middle part(s)
 221   uintptr_t partial_addr = middle_end;
 222   while (partial_addr > middle_start) {
 223     const size_t parts = 2;
 224     const size_t partial_size = align_up((partial_addr - middle_start) / parts, ZMarkPartialArrayMinSize);
 225     partial_addr -= partial_size;
 226     push_partial_array(partial_addr, partial_size, finalizable);
 227   }
 228 
 229   // Follow leading part
 230   assert(start < middle_start, "Miscalculated middle start");
 231   const uintptr_t leading_addr = start;
 232   const size_t leading_size = middle_start - start;
 233   follow_small_array(leading_addr, leading_size, finalizable);
 234 }
 235 
 236 void ZMark::follow_array(uintptr_t addr, size_t size, bool finalizable) {
 237   if (size <= ZMarkPartialArrayMinSize) {
 238     follow_small_array(addr, size, finalizable);
 239   } else {
 240     follow_large_array(addr, size, finalizable);
 241   }
 242 }
 243 
 244 void ZMark::follow_partial_array(ZMarkStackEntry entry, bool finalizable) {
 245   const uintptr_t addr = ZAddress::good(entry.partial_array_offset() << ZMarkPartialArrayMinSizeShift);
 246   const size_t size = entry.partial_array_length() * oopSize;
 247 
 248   follow_array(addr, size, finalizable);
 249 }
 250 
 251 void ZMark::follow_array_object(objArrayOop obj, bool finalizable) {
 252   const uintptr_t addr = (uintptr_t)obj->base();
 253   const size_t size = (size_t)obj->length() * oopSize;
 254 
 255   follow_array(addr, size, finalizable);
 256 }
 257 
 258 void ZMark::follow_object(oop obj, bool finalizable) {
 259   if (finalizable) {
 260     ZMarkBarrierOopClosure<true /* finalizable */> cl;
 261     obj->oop_iterate(&cl);
 262   } else {
 263     ZMarkBarrierOopClosure<false /* finalizable */> cl;
 264     obj->oop_iterate(&cl);
 265   }
 266 }
 267 
 268 bool ZMark::try_mark_object(ZMarkCache* cache, uintptr_t addr, bool finalizable) {
 269   ZPage* const page = _pagetable->get(addr);
 270   if (page->is_allocating()) {
 271     // Newly allocated objects are implicitly marked
 272     return false;
 273   }
 274 
 275   // Try mark object
 276   bool inc_live = false;
 277   const bool success = page->mark_object(addr, finalizable, inc_live);
 278   if (inc_live) {
 279     // Update live objects/bytes for page. We use the aligned object
 280     // size since that is the actual number of bytes used on the page
 281     // and alignment paddings can never be reclaimed.
 282     const size_t size = ZUtils::object_size(addr);
 283     const size_t aligned_size = align_up(size, page->object_alignment());
 284     cache->inc_live(page, aligned_size);
 285   }
 286 
 287   return success;
 288 }
 289 
 290 void ZMark::mark_and_follow(ZMarkCache* cache, ZMarkStackEntry entry) {
 291   // Decode flags
 292   const bool finalizable = entry.finalizable();
 293   const bool partial_array = entry.partial_array();
 294 
 295   if (partial_array) {
 296     follow_partial_array(entry, finalizable);
 297     return;
 298   }
 299 
 300   // Decode object address
 301   const uintptr_t addr = entry.object_address();
 302 
 303   if (!try_mark_object(cache, addr, finalizable)) {
 304     // Already marked
 305     return;
 306   }
 307 
 308   if (is_array(addr)) {
 309     follow_array_object(objArrayOop(ZOop::to_oop(addr)), finalizable);
 310   } else {
 311     follow_object(ZOop::to_oop(addr), finalizable);
 312   }
 313 }
 314 
 315 template <typename T>
 316 bool ZMark::drain(ZMarkStripe* stripe, ZMarkThreadLocalStacks* stacks, ZMarkCache* cache, T* timeout) {
 317   ZMarkStackEntry entry;
 318 
 319   // Drain stripe stacks
 320   while (stacks->pop(&_allocator, &_stripes, stripe, entry)) {
 321     mark_and_follow(cache, entry);
 322 
 323     // Check timeout
 324     if (timeout->has_expired()) {
 325       // Timeout
 326       return false;
 327     }
 328   }
 329 
 330   // Success
 331   return true;
 332 }
 333 
 334 template <typename T>
 335 bool ZMark::drain_and_flush(ZMarkStripe* stripe, ZMarkThreadLocalStacks* stacks, ZMarkCache* cache, T* timeout) {
 336   const bool success = drain(stripe, stacks, cache, timeout);
 337 
 338   // Flush and publish worker stacks
 339   stacks->flush(&_allocator, &_stripes);
 340 
 341   return success;
 342 }
 343 
 344 bool ZMark::try_steal(ZMarkStripe* stripe, ZMarkThreadLocalStacks* stacks) {
 345   // Try to steal a stack from another stripe
 346   for (ZMarkStripe* victim_stripe = _stripes.stripe_next(stripe);
 347        victim_stripe != stripe;
 348        victim_stripe = _stripes.stripe_next(victim_stripe)) {
 349     ZMarkStack* const stack = victim_stripe->steal_stack();
 350     if (stack != NULL) {
 351       // Success, install the stolen stack
 352       stacks->install(&_stripes, stripe, stack);
 353       return true;
 354     }
 355   }
 356 
 357   // Nothing to steal
 358   return false;
 359 }
 360 
 361 void ZMark::idle() const {
 362   ZStatTimer timer(ZSubPhaseConcurrentMarkIdle);
 363   os::naked_short_sleep(1);
 364 }
 365 
 366 class ZMarkFlushAndFreeStacksClosure : public ThreadClosure {
 367 private:
 368   ZMark* const _mark;
 369   bool         _flushed;
 370 
 371 public:
 372   ZMarkFlushAndFreeStacksClosure(ZMark* mark) :
 373       _mark(mark),
 374       _flushed(false) {}
 375 
 376   void do_thread(Thread* thread) {
 377     if (_mark->flush_and_free(thread)) {
 378       _flushed = true;
 379     }
 380   }
 381 
 382   bool flushed() const {
 383     return _flushed;
 384   }
 385 };
 386 
 387 bool ZMark::flush(bool at_safepoint) {
 388   ZMarkFlushAndFreeStacksClosure cl(this);
 389   if (at_safepoint) {
 390     Threads::threads_do(&cl);
 391   } else {
 392     Handshake::execute(&cl);
 393   }
 394 
 395   // Returns true if more work is available
 396   return cl.flushed() || !_stripes.is_empty();
 397 }
 398 
 399 bool ZMark::try_flush(volatile size_t* nflush) {
 400   // Only flush if handhakes are enabled
 401   if (!ThreadLocalHandshakes) {
 402     return false;
 403   }
 404 
 405   Atomic::inc(nflush);
 406 
 407   ZStatTimer timer(ZSubPhaseConcurrentMarkTryFlush);
 408   return flush(false /* at_safepoint */);
 409 }
 410 
 411 bool ZMark::try_proactive_flush() {
 412   // Only do proactive flushes from worker 0
 413   if (ZThread::worker_id() != 0) {
 414     return false;
 415   }
 416 
 417   if (Atomic::load(&_work_nproactiveflush) == ZMarkProactiveFlushMax ||
 418       Atomic::load(&_work_nterminateflush) != 0) {
 419     // Limit reached or we're trying to terminate
 420     return false;
 421   }
 422 
 423   return try_flush(&_work_nproactiveflush);
 424 }
 425 
 426 bool ZMark::try_terminate() {
 427   ZStatTimer timer(ZSubPhaseConcurrentMarkTryTerminate);
 428 
 429   if (_terminate.enter_stage0()) {
 430     // Last thread entered stage 0, flush
 431     if (Atomic::load(&_work_terminateflush) &&
 432         Atomic::load(&_work_nterminateflush) != ZMarkTerminateFlushMax) {
 433       // Exit stage 0 to allow other threads to continue marking
 434       _terminate.exit_stage0();
 435 
 436       // Flush before termination
 437       if (!try_flush(&_work_nterminateflush)) {
 438         // No more work available, skip further flush attempts
 439         Atomic::store(false, &_work_terminateflush);
 440       }
 441 
 442       // Don't terminate, regardless of whether we successfully
 443       // flushed out more work or not. We've already exited
 444       // termination stage 0, to allow other threads to continue
 445       // marking, so this thread has to return false and also
 446       // make another round of attempted marking.
 447       return false;
 448     }
 449   }
 450 
 451   for (;;) {
 452     if (_terminate.enter_stage1()) {
 453       // Last thread entered stage 1, terminate
 454       return true;
 455     }
 456 
 457     // Idle to give the other threads
 458     // a chance to enter termination.
 459     idle();
 460 
 461     if (!_terminate.try_exit_stage1()) {
 462       // All workers in stage 1, terminate
 463       return true;
 464     }
 465 
 466     if (_terminate.try_exit_stage0()) {
 467       // More work available, don't terminate
 468       return false;
 469     }
 470   }
 471 }
 472 
 473 class ZMarkNoTimeout : public StackObj {
 474 public:
 475   bool has_expired() {
 476     return false;
 477   }
 478 };
 479 
 480 void ZMark::work_without_timeout(ZMarkCache* cache, ZMarkStripe* stripe, ZMarkThreadLocalStacks* stacks) {
 481   ZStatTimer timer(ZSubPhaseConcurrentMark);
 482   ZMarkNoTimeout no_timeout;
 483 
 484   for (;;) {
 485     drain_and_flush(stripe, stacks, cache, &no_timeout);
 486 
 487     if (try_steal(stripe, stacks)) {
 488       // Stole work
 489       continue;
 490     }
 491 
 492     if (try_proactive_flush()) {
 493       // Work available
 494       continue;
 495     }
 496 
 497     if (try_terminate()) {
 498       // Terminate
 499       break;
 500     }
 501   }
 502 }
 503 
 504 class ZMarkTimeout : public StackObj {
 505 private:
 506   const Ticks    _start;
 507   const uint64_t _timeout;
 508   const uint64_t _check_interval;
 509   uint64_t       _check_at;
 510   uint64_t       _check_count;
 511   bool           _expired;
 512 
 513 public:
 514   ZMarkTimeout(uint64_t timeout_in_millis) :
 515       _start(Ticks::now()),
 516       _timeout(_start.value() + TimeHelper::millis_to_counter(timeout_in_millis)),
 517       _check_interval(200),
 518       _check_at(_check_interval),
 519       _check_count(0),
 520       _expired(false) {}
 521 
 522   ~ZMarkTimeout() {
 523     const Tickspan duration = Ticks::now() - _start;
 524     log_debug(gc, marking)("Mark With Timeout (%s): %s, " UINT64_FORMAT " oops, %.3fms",
 525                            ZThread::name(), _expired ? "Expired" : "Completed",
 526                            _check_count, TimeHelper::counter_to_millis(duration.value()));
 527   }
 528 
 529   bool has_expired() {
 530     if (++_check_count == _check_at) {
 531       _check_at += _check_interval;
 532       if ((uint64_t)Ticks::now().value() >= _timeout) {
 533         // Timeout
 534         _expired = true;
 535       }
 536     }
 537 
 538     return _expired;
 539   }
 540 };
 541 
 542 void ZMark::work_with_timeout(ZMarkCache* cache, ZMarkStripe* stripe, ZMarkThreadLocalStacks* stacks, uint64_t timeout_in_millis) {
 543   ZStatTimer timer(ZSubPhaseMarkTryComplete);
 544   ZMarkTimeout timeout(timeout_in_millis);
 545 
 546   for (;;) {
 547     if (!drain_and_flush(stripe, stacks, cache, &timeout)) {
 548       // Timed out
 549       break;
 550     }
 551 
 552     if (try_steal(stripe, stacks)) {
 553       // Stole work
 554       continue;
 555     }
 556 
 557     // Terminate
 558     break;
 559   }
 560 }
 561 
 562 void ZMark::work(uint64_t timeout_in_millis) {
 563   ZMarkCache cache(_stripes.nstripes());
 564   ZMarkStripe* const stripe = _stripes.stripe_for_worker(_nworkers, ZThread::worker_id());
 565   ZMarkThreadLocalStacks* const stacks = ZThreadLocalData::stacks(Thread::current());
 566 
 567   if (timeout_in_millis == 0) {
 568     work_without_timeout(&cache, stripe, stacks);
 569   } else {
 570     work_with_timeout(&cache, stripe, stacks, timeout_in_millis);
 571   }
 572 
 573   // Make sure stacks have been flushed
 574   assert(stacks->is_empty(&_stripes), "Should be empty");
 575 
 576   // Free remaining stacks
 577   stacks->free(&_allocator);
 578 }
 579 
 580 class ZMarkTask : public ZTask {
 581 private:
 582   ZMark* const   _mark;
 583   const uint64_t _timeout_in_millis;
 584 
 585 public:
 586   ZMarkTask(ZMark* mark, uint64_t timeout_in_millis = 0) :
 587       ZTask("ZMarkTask"),
 588       _mark(mark),
 589       _timeout_in_millis(timeout_in_millis) {
 590     _mark->prepare_work();
 591   }
 592 
 593   ~ZMarkTask() {
 594     _mark->finish_work();
 595   }
 596 
 597   virtual void work() {
 598     _mark->work(_timeout_in_millis);
 599   }
 600 };
 601 
 602 void ZMark::mark() {
 603   ZMarkTask task(this);
 604   _workers->run_concurrent(&task);
 605 }
 606 
 607 bool ZMark::try_complete() {
 608   _ntrycomplete++;
 609 
 610   // Use nconcurrent number of worker threads to maintain the
 611   // worker/stripe distribution used during concurrent mark.
 612   ZMarkTask task(this, ZMarkCompleteTimeout);
 613   _workers->run_concurrent(&task);
 614 
 615   // Successful if all stripes are empty
 616   return _stripes.is_empty();
 617 }
 618 
 619 bool ZMark::try_end() {
 620   // Flush all mark stacks
 621   if (!flush(true /* at_safepoint */)) {
 622     // Mark completed
 623     return true;
 624   }
 625 
 626   // Try complete marking by doing a limited
 627   // amount of mark work in this phase.
 628   return try_complete();
 629 }
 630 
 631 bool ZMark::end() {
 632   // Try end marking
 633   if (!try_end()) {
 634     // Mark not completed
 635     _ncontinue++;
 636     return false;
 637   }
 638 
 639   // Verification
 640   if (ZVerifyMarking) {
 641     verify_all_stacks_empty();
 642   }
 643 
 644   // Update statistics
 645   ZStatMark::set_at_mark_end(_nproactiveflush, _nterminateflush, _ntrycomplete, _ncontinue);
 646 
 647   // Mark completed
 648   return true;
 649 }
 650 
 651 void ZMark::flush_and_free() {
 652   Thread* const thread = Thread::current();
 653   flush_and_free(thread);
 654 }
 655 
 656 bool ZMark::flush_and_free(Thread* thread) {
 657   ZMarkThreadLocalStacks* const stacks = ZThreadLocalData::stacks(thread);
 658   const bool flushed = stacks->flush(&_allocator, &_stripes);
 659   stacks->free(&_allocator);
 660   return flushed;
 661 }
 662 
 663 class ZVerifyMarkStacksEmptyClosure : public ThreadClosure {
 664 private:
 665   const ZMarkStripeSet* const _stripes;
 666 
 667 public:
 668   ZVerifyMarkStacksEmptyClosure(const ZMarkStripeSet* stripes) :
 669       _stripes(stripes) {}
 670 
 671   void do_thread(Thread* thread) {
 672     ZMarkThreadLocalStacks* const stacks = ZThreadLocalData::stacks(thread);
 673     guarantee(stacks->is_empty(_stripes), "Should be empty");
 674   }
 675 };
 676 
 677 void ZMark::verify_all_stacks_empty() const {
 678   // Verify thread stacks
 679   ZVerifyMarkStacksEmptyClosure cl(&_stripes);
 680   Threads::threads_do(&cl);
 681 
 682   // Verify stripe stacks
 683   guarantee(_stripes.is_empty(), "Should be emtpy");
 684 }