1 /*
  2  * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  */
 23 
 24 #include "precompiled.hpp"
 25 #include "gc/z/zBarrier.inline.hpp"
 26 #include "gc/z/zMark.inline.hpp"
 27 #include "gc/z/zMarkCache.inline.hpp"
 28 #include "gc/z/zMarkStack.inline.hpp"
 29 #include "gc/z/zMarkTerminate.inline.hpp"
 30 #include "gc/z/zOopClosures.inline.hpp"
 31 #include "gc/z/zPage.hpp"
 32 #include "gc/z/zPageTable.inline.hpp"
 33 #include "gc/z/zRootsIterator.hpp"
 34 #include "gc/z/zStat.hpp"
 35 #include "gc/z/zStatTLAB.hpp"
 36 #include "gc/z/zTask.hpp"
 37 #include "gc/z/zThread.hpp"
 38 #include "gc/z/zUtils.inline.hpp"
 39 #include "gc/z/zWorkers.inline.hpp"
 40 #include "logging/log.hpp"
 41 #include "memory/iterator.inline.hpp"
 42 #include "oops/objArrayOop.inline.hpp"
 43 #include "oops/oop.inline.hpp"
 44 #include "runtime/atomic.hpp"
 45 #include "runtime/handshake.hpp"
 46 #include "runtime/orderAccess.hpp"
 47 #include "runtime/prefetch.inline.hpp"
 48 #include "runtime/thread.hpp"
 49 #include "utilities/align.hpp"
 50 #include "utilities/globalDefinitions.hpp"
 51 #include "utilities/ticks.hpp"
 52 
 53 static const ZStatSubPhase ZSubPhaseConcurrentMark("Concurrent Mark");
 54 static const ZStatSubPhase ZSubPhaseConcurrentMarkTryFlush("Concurrent Mark Try Flush");
 55 static const ZStatSubPhase ZSubPhaseConcurrentMarkIdle("Concurrent Mark Idle");
 56 static const ZStatSubPhase ZSubPhaseConcurrentMarkTryTerminate("Concurrent Mark Try Terminate");
 57 static const ZStatSubPhase ZSubPhaseMarkTryComplete("Pause Mark Try Complete");
 58 
 59 ZMark::ZMark(ZWorkers* workers, ZPageTable* pagetable) :
 60     _workers(workers),
 61     _pagetable(pagetable),
 62     _allocator(),
 63     _stripes(),
 64     _terminate(),
 65     _work_terminateflush(true),
 66     _work_nproactiveflush(0),
 67     _work_nterminateflush(0),
 68     _nproactiveflush(0),
 69     _nterminateflush(0),
 70     _ntrycomplete(0),
 71     _ncontinue(0),
 72     _nworkers(0) {}
 73 
 74 bool ZMark::is_initialized() const {
 75   return _allocator.is_initialized();
 76 }
 77 
 78 size_t ZMark::calculate_nstripes(uint nworkers) const {
 79   // Calculate the number of stripes from the number of workers we use,
 80   // where the number of stripes must be a power of two and we want to
 81   // have at least one worker per stripe.
 82   const size_t nstripes = ZUtils::round_down_power_of_2(nworkers);
 83   return MIN2(nstripes, ZMarkStripesMax);
 84 }
 85 
 86 void ZMark::prepare_mark() {
 87   // Increment global sequence number to invalidate
 88   // marking information for all pages.
 89   ZGlobalSeqNum++;
 90 
 91   // Reset flush/continue counters
 92   _nproactiveflush = 0;
 93   _nterminateflush = 0;
 94   _ntrycomplete = 0;
 95   _ncontinue = 0;
 96 
 97   // Set number of workers to use
 98   _nworkers = _workers->nconcurrent();
 99 
100   // Set number of mark stripes to use, based on number
101   // of workers we will use in the concurrent mark phase.
102   const size_t nstripes = calculate_nstripes(_nworkers);
103   _stripes.set_nstripes(nstripes);
104 
105   // Update statistics
106   ZStatMark::set_at_mark_start(nstripes);
107 
108   // Print worker/stripe distribution
109   LogTarget(Debug, gc, marking) log;
110   if (log.is_enabled()) {
111     log.print("Mark Worker/Stripe Distribution");
112     for (uint worker_id = 0; worker_id < _nworkers; worker_id++) {
113       const ZMarkStripe* const stripe = _stripes.stripe_for_worker(_nworkers, worker_id);
114       const size_t stripe_id = _stripes.stripe_id(stripe);
115       log.print("  Worker %u(%u) -> Stripe " SIZE_FORMAT "(" SIZE_FORMAT ")",
116                 worker_id, _nworkers, stripe_id, nstripes);
117     }
118   }
119 }
120 
121 class ZMarkRootsIteratorClosure : public ZRootsIteratorClosure {
122 public:
123   ZMarkRootsIteratorClosure() {
124     ZStatTLAB::reset();
125   }
126 
127   ~ZMarkRootsIteratorClosure() {
128     ZStatTLAB::publish();
129   }
130 
131   virtual void do_thread(Thread* thread) {
132     ZRootsIteratorClosure::do_thread(thread);
133 
134     // Update thread local address bad mask
135     ZThreadLocalData::set_address_bad_mask(thread, ZAddressBadMask);
136 
137     // Retire TLAB
138     if (UseTLAB && thread->is_Java_thread()) {
139       thread->tlab().retire(ZStatTLAB::get());
140       thread->tlab().resize();
141     }
142   }
143 
144   virtual void do_oop(oop* p) {
145     ZBarrier::mark_barrier_on_root_oop_field(p);
146   }
147 
148   virtual void do_oop(narrowOop* p) {
149     ShouldNotReachHere();
150   }
151 };
152 
153 class ZMarkRootsTask : public ZTask {
154 private:
155   ZMark* const              _mark;
156   ZRootsIterator            _roots;
157   ZMarkRootsIteratorClosure _cl;
158 
159 public:
160   ZMarkRootsTask(ZMark* mark) :
161       ZTask("ZMarkRootsTask"),
162       _mark(mark),
163       _roots() {}
164 
165   virtual void work() {
166     _roots.oops_do(&_cl);
167 
168     // Flush and free worker stacks. Needed here since
169     // the set of workers executing during root scanning
170     // can be different from the set of workers executing
171     // during mark.
172     _mark->flush_and_free();
173   }
174 };
175 
176 void ZMark::start() {
177   // Verification
178   if (ZVerifyMarking) {
179     verify_all_stacks_empty();
180   }
181 
182   // Prepare for concurrent mark
183   prepare_mark();
184 
185   // Mark roots
186   ZMarkRootsTask task(this);
187   _workers->run_parallel(&task);
188 }
189 
190 void ZMark::prepare_work() {
191   assert(_nworkers == _workers->nconcurrent(), "Invalid number of workers");
192 
193   // Set number of active workers
194   _terminate.reset(_nworkers);
195 
196   // Reset flush counters
197   _work_nproactiveflush = _work_nterminateflush = 0;
198   _work_terminateflush = true;
199 }
200 
201 void ZMark::finish_work() {
202   // Accumulate proactive/terminate flush counters
203   _nproactiveflush += _work_nproactiveflush;
204   _nterminateflush += _work_nterminateflush;
205 }
206 
207 bool ZMark::is_array(uintptr_t addr) const {
208   return ZOop::to_oop(addr)->is_objArray();
209 }
210 
211 void ZMark::push_partial_array(uintptr_t addr, size_t size, bool finalizable) {
212   assert(is_aligned(addr, ZMarkPartialArrayMinSize), "Address misaligned");
213   ZMarkThreadLocalStacks* const stacks = ZThreadLocalData::stacks(Thread::current());
214   ZMarkStripe* const stripe = _stripes.stripe_for_addr(addr);
215   const uintptr_t offset = ZAddress::offset(addr) >> ZMarkPartialArrayMinSizeShift;
216   const uintptr_t length = size / oopSize;
217   const ZMarkStackEntry entry(offset, length, finalizable);
218 
219   log_develop_trace(gc, marking)("Array push partial: " PTR_FORMAT " (" SIZE_FORMAT "), stripe: " SIZE_FORMAT,
220                                  addr, size, _stripes.stripe_id(stripe));
221 
222   stacks->push(&_allocator, &_stripes, stripe, entry, false /* publish */);
223 }
224 
225 void ZMark::follow_small_array(uintptr_t addr, size_t size, bool finalizable) {
226   assert(size <= ZMarkPartialArrayMinSize, "Too large, should be split");
227   const size_t length = size / oopSize;
228 
229   log_develop_trace(gc, marking)("Array follow small: " PTR_FORMAT " (" SIZE_FORMAT ")", addr, size);
230 
231   ZBarrier::mark_barrier_on_oop_array((oop*)addr, length, finalizable);
232 }
233 
234 void ZMark::follow_large_array(uintptr_t addr, size_t size, bool finalizable) {
235   assert(size <= (size_t)arrayOopDesc::max_array_length(T_OBJECT) * oopSize, "Too large");
236   assert(size > ZMarkPartialArrayMinSize, "Too small, should not be split");
237   const uintptr_t start = addr;
238   const uintptr_t end = start + size;
239 
240   // Calculate the aligned middle start/end/size, where the middle start
241   // should always be greater than the start (hence the +1 below) to make
242   // sure we always do some follow work, not just split the array into pieces.
243   const uintptr_t middle_start = align_up(start + 1, ZMarkPartialArrayMinSize);
244   const size_t    middle_size = align_down(end - middle_start, ZMarkPartialArrayMinSize);
245   const uintptr_t middle_end = middle_start + middle_size;
246 
247   log_develop_trace(gc, marking)("Array follow large: " PTR_FORMAT "-" PTR_FORMAT" (" SIZE_FORMAT "), "
248                                  "middle: " PTR_FORMAT "-" PTR_FORMAT " (" SIZE_FORMAT ")",
249                                  start, end, size, middle_start, middle_end, middle_size);
250 
251   // Push unaligned trailing part
252   if (end > middle_end) {
253     const uintptr_t trailing_addr = middle_end;
254     const size_t trailing_size = end - middle_end;
255     push_partial_array(trailing_addr, trailing_size, finalizable);
256   }
257 
258   // Push aligned middle part(s)
259   uintptr_t partial_addr = middle_end;
260   while (partial_addr > middle_start) {
261     const size_t parts = 2;
262     const size_t partial_size = align_up((partial_addr - middle_start) / parts, ZMarkPartialArrayMinSize);
263     partial_addr -= partial_size;
264     push_partial_array(partial_addr, partial_size, finalizable);
265   }
266 
267   // Follow leading part
268   assert(start < middle_start, "Miscalculated middle start");
269   const uintptr_t leading_addr = start;
270   const size_t leading_size = middle_start - start;
271   follow_small_array(leading_addr, leading_size, finalizable);
272 }
273 
274 void ZMark::follow_array(uintptr_t addr, size_t size, bool finalizable) {
275   if (size <= ZMarkPartialArrayMinSize) {
276     follow_small_array(addr, size, finalizable);
277   } else {
278     follow_large_array(addr, size, finalizable);
279   }
280 }
281 
282 void ZMark::follow_partial_array(ZMarkStackEntry entry, bool finalizable) {
283   const uintptr_t addr = ZAddress::good(entry.partial_array_offset() << ZMarkPartialArrayMinSizeShift);
284   const size_t size = entry.partial_array_length() * oopSize;
285 
286   follow_array(addr, size, finalizable);
287 }
288 
289 void ZMark::follow_array_object(objArrayOop obj, bool finalizable) {
290   const uintptr_t addr = (uintptr_t)obj->base();
291   const size_t size = (size_t)obj->length() * oopSize;
292 
293   follow_array(addr, size, finalizable);
294 }
295 
296 void ZMark::follow_object(oop obj, bool finalizable) {
297   if (finalizable) {
298     ZMarkBarrierOopClosure<true /* finalizable */> cl;
299     obj->oop_iterate(&cl);
300   } else {
301     ZMarkBarrierOopClosure<false /* finalizable */> cl;
302     obj->oop_iterate(&cl);
303   }
304 }
305 
306 bool ZMark::try_mark_object(ZMarkCache* cache, uintptr_t addr, bool finalizable) {
307   ZPage* const page = _pagetable->get(addr);
308   if (page->is_allocating()) {
309     // Newly allocated objects are implicitly marked
310     return false;
311   }
312 
313   // Try mark object
314   bool inc_live = false;
315   const bool success = page->mark_object(addr, finalizable, inc_live);
316   if (inc_live) {
317     // Update live objects/bytes for page. We use the aligned object
318     // size since that is the actual number of bytes used on the page
319     // and alignment paddings can never be reclaimed.
320     const size_t size = ZUtils::object_size(addr);
321     const size_t aligned_size = align_up(size, page->object_alignment());
322     cache->inc_live(page, aligned_size);
323   }
324 
325   return success;
326 }
327 
328 void ZMark::mark_and_follow(ZMarkCache* cache, ZMarkStackEntry entry) {
329   // Decode flags
330   const bool finalizable = entry.finalizable();
331   const bool partial_array = entry.partial_array();
332 
333   if (partial_array) {
334     follow_partial_array(entry, finalizable);
335     return;
336   }
337 
338   // Decode object address
339   const uintptr_t addr = entry.object_address();
340 
341   if (!try_mark_object(cache, addr, finalizable)) {
342     // Already marked
343     return;
344   }
345 
346   if (is_array(addr)) {
347     follow_array_object(objArrayOop(ZOop::to_oop(addr)), finalizable);
348   } else {
349     follow_object(ZOop::to_oop(addr), finalizable);
350   }
351 }
352 
353 template <typename T>
354 bool ZMark::drain(ZMarkStripe* stripe, ZMarkThreadLocalStacks* stacks, ZMarkCache* cache, T* timeout) {
355   ZMarkStackEntry entry;
356 
357   // Drain stripe stacks
358   while (stacks->pop(&_allocator, &_stripes, stripe, entry)) {
359     mark_and_follow(cache, entry);
360 
361     // Check timeout
362     if (timeout->has_expired()) {
363       // Timeout
364       return false;
365     }
366   }
367 
368   // Success
369   return true;
370 }
371 
372 template <typename T>
373 bool ZMark::drain_and_flush(ZMarkStripe* stripe, ZMarkThreadLocalStacks* stacks, ZMarkCache* cache, T* timeout) {
374   const bool success = drain(stripe, stacks, cache, timeout);
375 
376   // Flush and publish worker stacks
377   stacks->flush(&_allocator, &_stripes);
378 
379   return success;
380 }
381 
382 bool ZMark::try_steal(ZMarkStripe* stripe, ZMarkThreadLocalStacks* stacks) {
383   // Try to steal a stack from another stripe
384   for (ZMarkStripe* victim_stripe = _stripes.stripe_next(stripe);
385        victim_stripe != stripe;
386        victim_stripe = _stripes.stripe_next(victim_stripe)) {
387     ZMarkStack* const stack = victim_stripe->steal_stack();
388     if (stack != NULL) {
389       // Success, install the stolen stack
390       stacks->install(&_stripes, stripe, stack);
391       return true;
392     }
393   }
394 
395   // Nothing to steal
396   return false;
397 }
398 
399 void ZMark::idle() const {
400   ZStatTimer timer(ZSubPhaseConcurrentMarkIdle);
401   os::naked_short_sleep(1);
402 }
403 
404 class ZMarkFlushAndFreeStacksClosure : public ThreadClosure {
405 private:
406   ZMark* const _mark;
407   bool         _flushed;
408 
409 public:
410   ZMarkFlushAndFreeStacksClosure(ZMark* mark) :
411       _mark(mark),
412       _flushed(false) {}
413 
414   void do_thread(Thread* thread) {
415     if (_mark->flush_and_free(thread)) {
416       _flushed = true;
417     }
418   }
419 
420   bool flushed() const {
421     return _flushed;
422   }
423 };
424 
425 bool ZMark::flush(bool at_safepoint) {
426   ZMarkFlushAndFreeStacksClosure cl(this);
427   if (at_safepoint) {
428     Threads::threads_do(&cl);
429   } else {
430     Handshake::execute(&cl);
431   }
432 
433   // Returns true if more work is available
434   return cl.flushed() || !_stripes.is_empty();
435 }
436 
437 bool ZMark::try_flush(volatile size_t* nflush) {
438   // Only flush if handshakes are enabled
439   if (!ThreadLocalHandshakes) {
440     return false;
441   }
442 
443   Atomic::inc(nflush);
444 
445   ZStatTimer timer(ZSubPhaseConcurrentMarkTryFlush);
446   return flush(false /* at_safepoint */);
447 }
448 
449 bool ZMark::try_proactive_flush() {
450   // Only do proactive flushes from worker 0
451   if (ZThread::worker_id() != 0) {
452     return false;
453   }
454 
455   if (Atomic::load(&_work_nproactiveflush) == ZMarkProactiveFlushMax ||
456       Atomic::load(&_work_nterminateflush) != 0) {
457     // Limit reached or we're trying to terminate
458     return false;
459   }
460 
461   return try_flush(&_work_nproactiveflush);
462 }
463 
464 bool ZMark::try_terminate() {
465   ZStatTimer timer(ZSubPhaseConcurrentMarkTryTerminate);
466 
467   if (_terminate.enter_stage0()) {
468     // Last thread entered stage 0, flush
469     if (Atomic::load(&_work_terminateflush) &&
470         Atomic::load(&_work_nterminateflush) != ZMarkTerminateFlushMax) {
471       // Exit stage 0 to allow other threads to continue marking
472       _terminate.exit_stage0();
473 
474       // Flush before termination
475       if (!try_flush(&_work_nterminateflush)) {
476         // No more work available, skip further flush attempts
477         Atomic::store(false, &_work_terminateflush);
478       }
479 
480       // Don't terminate, regardless of whether we successfully
481       // flushed out more work or not. We've already exited
482       // termination stage 0, to allow other threads to continue
483       // marking, so this thread has to return false and also
484       // make another round of attempted marking.
485       return false;
486     }
487   }
488 
489   for (;;) {
490     if (_terminate.enter_stage1()) {
491       // Last thread entered stage 1, terminate
492       return true;
493     }
494 
495     // Idle to give the other threads
496     // a chance to enter termination.
497     idle();
498 
499     if (!_terminate.try_exit_stage1()) {
500       // All workers in stage 1, terminate
501       return true;
502     }
503 
504     if (_terminate.try_exit_stage0()) {
505       // More work available, don't terminate
506       return false;
507     }
508   }
509 }
510 
511 class ZMarkNoTimeout : public StackObj {
512 public:
513   bool has_expired() {
514     return false;
515   }
516 };
517 
518 void ZMark::work_without_timeout(ZMarkCache* cache, ZMarkStripe* stripe, ZMarkThreadLocalStacks* stacks) {
519   ZStatTimer timer(ZSubPhaseConcurrentMark);
520   ZMarkNoTimeout no_timeout;
521 
522   for (;;) {
523     drain_and_flush(stripe, stacks, cache, &no_timeout);
524 
525     if (try_steal(stripe, stacks)) {
526       // Stole work
527       continue;
528     }
529 
530     if (try_proactive_flush()) {
531       // Work available
532       continue;
533     }
534 
535     if (try_terminate()) {
536       // Terminate
537       break;
538     }
539   }
540 }
541 
542 class ZMarkTimeout : public StackObj {
543 private:
544   const Ticks    _start;
545   const uint64_t _timeout;
546   const uint64_t _check_interval;
547   uint64_t       _check_at;
548   uint64_t       _check_count;
549   bool           _expired;
550 
551 public:
552   ZMarkTimeout(uint64_t timeout_in_millis) :
553       _start(Ticks::now()),
554       _timeout(_start.value() + TimeHelper::millis_to_counter(timeout_in_millis)),
555       _check_interval(200),
556       _check_at(_check_interval),
557       _check_count(0),
558       _expired(false) {}
559 
560   ~ZMarkTimeout() {
561     const Tickspan duration = Ticks::now() - _start;
562     log_debug(gc, marking)("Mark With Timeout (%s): %s, " UINT64_FORMAT " oops, %.3fms",
563                            ZThread::name(), _expired ? "Expired" : "Completed",
564                            _check_count, TimeHelper::counter_to_millis(duration.value()));
565   }
566 
567   bool has_expired() {
568     if (++_check_count == _check_at) {
569       _check_at += _check_interval;
570       if ((uint64_t)Ticks::now().value() >= _timeout) {
571         // Timeout
572         _expired = true;
573       }
574     }
575 
576     return _expired;
577   }
578 };
579 
580 void ZMark::work_with_timeout(ZMarkCache* cache, ZMarkStripe* stripe, ZMarkThreadLocalStacks* stacks, uint64_t timeout_in_millis) {
581   ZStatTimer timer(ZSubPhaseMarkTryComplete);
582   ZMarkTimeout timeout(timeout_in_millis);
583 
584   for (;;) {
585     if (!drain_and_flush(stripe, stacks, cache, &timeout)) {
586       // Timed out
587       break;
588     }
589 
590     if (try_steal(stripe, stacks)) {
591       // Stole work
592       continue;
593     }
594 
595     // Terminate
596     break;
597   }
598 }
599 
600 void ZMark::work(uint64_t timeout_in_millis) {
601   ZMarkCache cache(_stripes.nstripes());
602   ZMarkStripe* const stripe = _stripes.stripe_for_worker(_nworkers, ZThread::worker_id());
603   ZMarkThreadLocalStacks* const stacks = ZThreadLocalData::stacks(Thread::current());
604 
605   if (timeout_in_millis == 0) {
606     work_without_timeout(&cache, stripe, stacks);
607   } else {
608     work_with_timeout(&cache, stripe, stacks, timeout_in_millis);
609   }
610 
611   // Make sure stacks have been flushed
612   assert(stacks->is_empty(&_stripes), "Should be empty");
613 
614   // Free remaining stacks
615   stacks->free(&_allocator);
616 }
617 
618 class ZMarkTask : public ZTask {
619 private:
620   ZMark* const   _mark;
621   const uint64_t _timeout_in_millis;
622 
623 public:
624   ZMarkTask(ZMark* mark, uint64_t timeout_in_millis = 0) :
625       ZTask("ZMarkTask"),
626       _mark(mark),
627       _timeout_in_millis(timeout_in_millis) {
628     _mark->prepare_work();
629   }
630 
631   ~ZMarkTask() {
632     _mark->finish_work();
633   }
634 
635   virtual void work() {
636     _mark->work(_timeout_in_millis);
637   }
638 };
639 
640 void ZMark::mark() {
641   ZMarkTask task(this);
642   _workers->run_concurrent(&task);
643 }
644 
645 bool ZMark::try_complete() {
646   _ntrycomplete++;
647 
648   // Use nconcurrent number of worker threads to maintain the
649   // worker/stripe distribution used during concurrent mark.
650   ZMarkTask task(this, ZMarkCompleteTimeout);
651   _workers->run_concurrent(&task);
652 
653   // Successful if all stripes are empty
654   return _stripes.is_empty();
655 }
656 
657 bool ZMark::try_end() {
658   // Flush all mark stacks
659   if (!flush(true /* at_safepoint */)) {
660     // Mark completed
661     return true;
662   }
663 
664   // Try complete marking by doing a limited
665   // amount of mark work in this phase.
666   return try_complete();
667 }
668 
669 bool ZMark::end() {
670   // Try end marking
671   if (!try_end()) {
672     // Mark not completed
673     _ncontinue++;
674     return false;
675   }
676 
677   // Verification
678   if (ZVerifyMarking) {
679     verify_all_stacks_empty();
680   }
681 
682   // Update statistics
683   ZStatMark::set_at_mark_end(_nproactiveflush, _nterminateflush, _ntrycomplete, _ncontinue);
684 
685   // Mark completed
686   return true;
687 }
688 
689 void ZMark::flush_and_free() {
690   Thread* const thread = Thread::current();
691   flush_and_free(thread);
692 }
693 
694 bool ZMark::flush_and_free(Thread* thread) {
695   ZMarkThreadLocalStacks* const stacks = ZThreadLocalData::stacks(thread);
696   const bool flushed = stacks->flush(&_allocator, &_stripes);
697   stacks->free(&_allocator);
698   return flushed;
699 }
700 
701 class ZVerifyMarkStacksEmptyClosure : public ThreadClosure {
702 private:
703   const ZMarkStripeSet* const _stripes;
704 
705 public:
706   ZVerifyMarkStacksEmptyClosure(const ZMarkStripeSet* stripes) :
707       _stripes(stripes) {}
708 
709   void do_thread(Thread* thread) {
710     ZMarkThreadLocalStacks* const stacks = ZThreadLocalData::stacks(thread);
711     guarantee(stacks->is_empty(_stripes), "Should be empty");
712   }
713 };
714 
715 void ZMark::verify_all_stacks_empty() const {
716   // Verify thread stacks
717   ZVerifyMarkStacksEmptyClosure cl(&_stripes);
718   Threads::threads_do(&cl);
719 
720   // Verify stripe stacks
721   guarantee(_stripes.is_empty(), "Should be empty");
722 }