1 /*
  2  * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  */
 23 
 24 #include "precompiled.hpp"
 25 #include "gc/z/zBarrier.inline.hpp"
 26 #include "gc/z/zMark.inline.hpp"
 27 #include "gc/z/zMarkCache.inline.hpp"
 28 #include "gc/z/zMarkStack.inline.hpp"
 29 #include "gc/z/zMarkTerminate.inline.hpp"
 30 #include "gc/z/zOopClosures.inline.hpp"
 31 #include "gc/z/zPage.hpp"
 32 #include "gc/z/zPageTable.inline.hpp"
 33 #include "gc/z/zRootsIterator.hpp"
 34 #include "gc/z/zStat.hpp"
 35 #include "gc/z/zTask.hpp"
 36 #include "gc/z/zThread.hpp"
 37 #include "gc/z/zUtils.inline.hpp"
 38 #include "gc/z/zWorkers.inline.hpp"
 39 #include "logging/log.hpp"
 40 #include "memory/iterator.inline.hpp"
 41 #include "oops/objArrayOop.inline.hpp"
 42 #include "oops/oop.inline.hpp"
 43 #include "runtime/atomic.hpp"
 44 #include "runtime/handshake.hpp"
 45 #include "runtime/orderAccess.hpp"
 46 #include "runtime/prefetch.inline.hpp"
 47 #include "runtime/thread.hpp"
 48 #include "utilities/align.hpp"
 49 #include "utilities/globalDefinitions.hpp"
 50 #include "utilities/ticks.hpp"
 51 
 52 static const ZStatSubPhase ZSubPhaseConcurrentMark("Concurrent Mark");
 53 static const ZStatSubPhase ZSubPhaseConcurrentMarkTryFlush("Concurrent Mark Try Flush");
 54 static const ZStatSubPhase ZSubPhaseConcurrentMarkIdle("Concurrent Mark Idle");
 55 static const ZStatSubPhase ZSubPhaseConcurrentMarkTryTerminate("Concurrent Mark Try Terminate");
 56 static const ZStatSubPhase ZSubPhaseMarkTryComplete("Pause Mark Try Complete");
 57 
 58 ZMark::ZMark(ZWorkers* workers, ZPageTable* pagetable) :
 59     _workers(workers),
 60     _pagetable(pagetable),
 61     _allocator(),
 62     _stripes(),
 63     _terminate(),
 64     _work_terminateflush(true),
 65     _work_nproactiveflush(0),
 66     _work_nterminateflush(0),
 67     _nproactiveflush(0),
 68     _nterminateflush(0),
 69     _ntrycomplete(0),
 70     _ncontinue(0),
 71     _nworkers(0) {}
 72 
 73 bool ZMark::is_initialized() const {
 74   return _allocator.is_initialized();
 75 }
 76 
 77 size_t ZMark::calculate_nstripes(uint nworkers) const {
 78   // Calculate the number of stripes from the number of workers we use,
 79   // where the number of stripes must be a power of two and we want to
 80   // have at least one worker per stripe.
 81   const size_t nstripes = ZUtils::round_down_power_of_2(nworkers);
 82   return MIN2(nstripes, ZMarkStripesMax);
 83 }
 84 
 85 void ZMark::prepare_mark() {
 86   // Increment global sequence number to invalidate
 87   // marking information for all pages.
 88   ZGlobalSeqNum++;
 89 
 90   // Reset flush/continue counters
 91   _nproactiveflush = 0;
 92   _nterminateflush = 0;
 93   _ntrycomplete = 0;
 94   _ncontinue = 0;
 95 
 96   // Set number of workers to use
 97   _nworkers = _workers->nconcurrent();
 98 
 99   // Set number of mark stripes to use, based on number
100   // of workers we will use in the concurrent mark phase.
101   const size_t nstripes = calculate_nstripes(_nworkers);
102   _stripes.set_nstripes(nstripes);
103 
104   // Update statistics
105   ZStatMark::set_at_mark_start(nstripes);
106 
107   // Print worker/stripe distribution
108   LogTarget(Debug, gc, marking) log;
109   if (log.is_enabled()) {
110     log.print("Mark Worker/Stripe Distribution");
111     for (uint worker_id = 0; worker_id < _nworkers; worker_id++) {
112       const ZMarkStripe* const stripe = _stripes.stripe_for_worker(_nworkers, worker_id);
113       const size_t stripe_id = _stripes.stripe_id(stripe);
114       log.print("  Worker %u(%u) -> Stripe " SIZE_FORMAT "(" SIZE_FORMAT ")",
115                 worker_id, _nworkers, stripe_id, nstripes);
116     }
117   }
118 }
119 
120 class ZMarkRootsTask : public ZTask {
121 private:
122   ZMark* const   _mark;
123   ZRootsIterator _roots;
124 
125 public:
126   ZMarkRootsTask(ZMark* mark) :
127       ZTask("ZMarkRootsTask"),
128       _mark(mark),
129       _roots(ClassUnloading) {}
130 
131   virtual void work() {
132     ZMarkRootOopClosure cl;
133     _roots.oops_do(&cl);
134 
135     // Flush and free worker stacks. Needed here since
136     // the set of workers executing during root scanning
137     // can be different from the set of workers executing
138     // during mark.
139     _mark->flush_and_free();
140   }
141 };
142 
143 void ZMark::start() {
144   // Verification
145   if (ZVerifyMarking) {
146     verify_all_stacks_empty();
147   }
148 
149   // Prepare for concurrent mark
150   prepare_mark();
151 
152   // Mark roots
153   ZMarkRootsTask task(this);
154   _workers->run_parallel(&task);
155 }
156 
157 void ZMark::prepare_work() {
158   assert(_nworkers == _workers->nconcurrent(), "Invalid number of workers");
159 
160   // Set number of active workers
161   _terminate.reset(_nworkers);
162 
163   // Reset flush counters
164   _work_nproactiveflush = _work_nterminateflush = 0;
165   _work_terminateflush = true;
166 }
167 
168 void ZMark::finish_work() {
169   // Accumulate proactive/terminate flush counters
170   _nproactiveflush += _work_nproactiveflush;
171   _nterminateflush += _work_nterminateflush;
172 }
173 
174 bool ZMark::is_array(uintptr_t addr) const {
175   return ZOop::to_oop(addr)->is_objArray();
176 }
177 
178 void ZMark::push_partial_array(uintptr_t addr, size_t size, bool finalizable) {
179   assert(is_aligned(addr, ZMarkPartialArrayMinSize), "Address misaligned");
180   ZMarkThreadLocalStacks* const stacks = ZThreadLocalData::stacks(Thread::current());
181   ZMarkStripe* const stripe = _stripes.stripe_for_addr(addr);
182   const uintptr_t offset = ZAddress::offset(addr) >> ZMarkPartialArrayMinSizeShift;
183   const uintptr_t length = size / oopSize;
184   const ZMarkStackEntry entry(offset, length, finalizable);
185 
186   log_develop_trace(gc, marking)("Array push partial: " PTR_FORMAT " (" SIZE_FORMAT "), stripe: " SIZE_FORMAT,
187                                  addr, size, _stripes.stripe_id(stripe));
188 
189   stacks->push(&_allocator, &_stripes, stripe, entry, false /* publish */);
190 }
191 
192 void ZMark::follow_small_array(uintptr_t addr, size_t size, bool finalizable) {
193   assert(size <= ZMarkPartialArrayMinSize, "Too large, should be split");
194   const size_t length = size / oopSize;
195 
196   log_develop_trace(gc, marking)("Array follow small: " PTR_FORMAT " (" SIZE_FORMAT ")", addr, size);
197 
198   ZBarrier::mark_barrier_on_oop_array((oop*)addr, length, finalizable);
199 }
200 
201 void ZMark::follow_large_array(uintptr_t addr, size_t size, bool finalizable) {
202   assert(size <= (size_t)arrayOopDesc::max_array_length(T_OBJECT) * oopSize, "Too large");
203   assert(size > ZMarkPartialArrayMinSize, "Too small, should not be split");
204   const uintptr_t start = addr;
205   const uintptr_t end = start + size;
206 
207   // Calculate the aligned middle start/end/size, where the middle start
208   // should always be greater than the start (hence the +1 below) to make
209   // sure we always do some follow work, not just split the array into pieces.
210   const uintptr_t middle_start = align_up(start + 1, ZMarkPartialArrayMinSize);
211   const size_t    middle_size = align_down(end - middle_start, ZMarkPartialArrayMinSize);
212   const uintptr_t middle_end = middle_start + middle_size;
213 
214   log_develop_trace(gc, marking)("Array follow large: " PTR_FORMAT "-" PTR_FORMAT" (" SIZE_FORMAT "), "
215                                  "middle: " PTR_FORMAT "-" PTR_FORMAT " (" SIZE_FORMAT ")",
216                                  start, end, size, middle_start, middle_end, middle_size);
217 
218   // Push unaligned trailing part
219   if (end > middle_end) {
220     const uintptr_t trailing_addr = middle_end;
221     const size_t trailing_size = end - middle_end;
222     push_partial_array(trailing_addr, trailing_size, finalizable);
223   }
224 
225   // Push aligned middle part(s)
226   uintptr_t partial_addr = middle_end;
227   while (partial_addr > middle_start) {
228     const size_t parts = 2;
229     const size_t partial_size = align_up((partial_addr - middle_start) / parts, ZMarkPartialArrayMinSize);
230     partial_addr -= partial_size;
231     push_partial_array(partial_addr, partial_size, finalizable);
232   }
233 
234   // Follow leading part
235   assert(start < middle_start, "Miscalculated middle start");
236   const uintptr_t leading_addr = start;
237   const size_t leading_size = middle_start - start;
238   follow_small_array(leading_addr, leading_size, finalizable);
239 }
240 
241 void ZMark::follow_array(uintptr_t addr, size_t size, bool finalizable) {
242   if (size <= ZMarkPartialArrayMinSize) {
243     follow_small_array(addr, size, finalizable);
244   } else {
245     follow_large_array(addr, size, finalizable);
246   }
247 }
248 
249 void ZMark::follow_partial_array(ZMarkStackEntry entry, bool finalizable) {
250   const uintptr_t addr = ZAddress::good(entry.partial_array_offset() << ZMarkPartialArrayMinSizeShift);
251   const size_t size = entry.partial_array_length() * oopSize;
252 
253   follow_array(addr, size, finalizable);
254 }
255 
256 void ZMark::follow_klass(Klass* klass, bool finalizable) {
257   ClassLoaderData* cld = klass->class_loader_data();
258   if (!cld->claim(finalizable)) {
259     return;
260   }
261 
262   oop holder = cld->holder_no_keepalive();
263   if (holder == NULL) {
264     return;
265   }
266 
267   if (finalizable) {
268     ZHeap::heap()->mark_object<true, false>(ZOop::to_address(holder));
269   } else {
270     ZHeap::heap()->mark_object<false, false>(ZOop::to_address(holder));
271   }
272 }
273 
274 void ZMark::follow_array_object(objArrayOop obj, bool finalizable) {
275   if (ClassUnloading) {
276     follow_klass(obj->klass(), finalizable);
277   }
278 
279   const uintptr_t addr = (uintptr_t)obj->base();
280   const size_t size = (size_t)obj->length() * oopSize;
281 
282   follow_array(addr, size, finalizable);
283 }
284 
285 void ZMark::follow_object(oop obj, bool finalizable) {
286   if (finalizable) {
287     ZMarkBarrierOopClosure<true /* finalizable */> cl;
288     obj->oop_iterate(&cl);
289   } else {
290     ZMarkBarrierOopClosure<false /* finalizable */> cl;
291     obj->oop_iterate(&cl);
292   }
293 }
294 
295 bool ZMark::try_mark_object(ZMarkCache* cache, uintptr_t addr, bool finalizable) {
296   ZPage* const page = _pagetable->get(addr);
297   if (page->is_allocating()) {
298     // Newly allocated objects are implicitly marked
299     return false;
300   }
301 
302   // Try mark object
303   bool inc_live = false;
304   const bool success = page->mark_object(addr, finalizable, inc_live);
305   if (inc_live) {
306     // Update live objects/bytes for page. We use the aligned object
307     // size since that is the actual number of bytes used on the page
308     // and alignment paddings can never be reclaimed.
309     const size_t size = ZUtils::object_size(addr);
310     const size_t aligned_size = align_up(size, page->object_alignment());
311     cache->inc_live(page, aligned_size);
312   }
313 
314   return success;
315 }
316 
317 void ZMark::mark_and_follow(ZMarkCache* cache, ZMarkStackEntry entry) {
318   // Decode flags
319   const bool finalizable = entry.finalizable();
320   const bool partial_array = entry.partial_array();
321 
322   if (partial_array) {
323     follow_partial_array(entry, finalizable);
324     return;
325   }
326 
327   // Decode object address
328   const uintptr_t addr = entry.object_address();
329 
330   if (!try_mark_object(cache, addr, finalizable)) {
331     // Already marked
332     return;
333   }
334 
335   if (is_array(addr)) {
336     follow_array_object(objArrayOop(ZOop::to_oop(addr)), finalizable);
337   } else {
338     follow_object(ZOop::to_oop(addr), finalizable);
339   }
340 }
341 
342 template <typename T>
343 bool ZMark::drain(ZMarkStripe* stripe, ZMarkThreadLocalStacks* stacks, ZMarkCache* cache, T* timeout) {
344   ZMarkStackEntry entry;
345 
346   // Drain stripe stacks
347   while (stacks->pop(&_allocator, &_stripes, stripe, entry)) {
348     mark_and_follow(cache, entry);
349 
350     // Check timeout
351     if (timeout->has_expired()) {
352       // Timeout
353       return false;
354     }
355   }
356 
357   // Success
358   return true;
359 }
360 
361 template <typename T>
362 bool ZMark::drain_and_flush(ZMarkStripe* stripe, ZMarkThreadLocalStacks* stacks, ZMarkCache* cache, T* timeout) {
363   const bool success = drain(stripe, stacks, cache, timeout);
364 
365   // Flush and publish worker stacks
366   stacks->flush(&_allocator, &_stripes);
367 
368   return success;
369 }
370 
371 bool ZMark::try_steal(ZMarkStripe* stripe, ZMarkThreadLocalStacks* stacks) {
372   // Try to steal a stack from another stripe
373   for (ZMarkStripe* victim_stripe = _stripes.stripe_next(stripe);
374        victim_stripe != stripe;
375        victim_stripe = _stripes.stripe_next(victim_stripe)) {
376     ZMarkStack* const stack = victim_stripe->steal_stack();
377     if (stack != NULL) {
378       // Success, install the stolen stack
379       stacks->install(&_stripes, stripe, stack);
380       return true;
381     }
382   }
383 
384   // Nothing to steal
385   return false;
386 }
387 
388 void ZMark::idle() const {
389   ZStatTimer timer(ZSubPhaseConcurrentMarkIdle);
390   os::naked_short_sleep(1);
391 }
392 
393 class ZMarkFlushAndFreeStacksClosure : public ThreadClosure {
394 private:
395   ZMark* const _mark;
396   bool         _flushed;
397 
398 public:
399   ZMarkFlushAndFreeStacksClosure(ZMark* mark) :
400       _mark(mark),
401       _flushed(false) {}
402 
403   void do_thread(Thread* thread) {
404     if (_mark->flush_and_free(thread)) {
405       _flushed = true;
406     }
407   }
408 
409   bool flushed() const {
410     return _flushed;
411   }
412 };
413 
414 bool ZMark::flush(bool at_safepoint) {
415   ZMarkFlushAndFreeStacksClosure cl(this);
416   if (at_safepoint) {
417     Threads::threads_do(&cl);
418   } else {
419     Handshake::execute(&cl);
420   }
421 
422   // Returns true if more work is available
423   return cl.flushed() || !_stripes.is_empty();
424 }
425 
426 bool ZMark::try_flush(volatile size_t* nflush) {
427   // Only flush if handshakes are enabled
428   if (!ThreadLocalHandshakes) {
429     return false;
430   }
431 
432   Atomic::inc(nflush);
433 
434   ZStatTimer timer(ZSubPhaseConcurrentMarkTryFlush);
435   return flush(false /* at_safepoint */);
436 }
437 
438 bool ZMark::try_proactive_flush() {
439   // Only do proactive flushes from worker 0
440   if (ZThread::worker_id() != 0) {
441     return false;
442   }
443 
444   if (Atomic::load(&_work_nproactiveflush) == ZMarkProactiveFlushMax ||
445       Atomic::load(&_work_nterminateflush) != 0) {
446     // Limit reached or we're trying to terminate
447     return false;
448   }
449 
450   return try_flush(&_work_nproactiveflush);
451 }
452 
453 bool ZMark::try_terminate() {
454   ZStatTimer timer(ZSubPhaseConcurrentMarkTryTerminate);
455 
456   if (_terminate.enter_stage0()) {
457     // Last thread entered stage 0, flush
458     if (Atomic::load(&_work_terminateflush) &&
459         Atomic::load(&_work_nterminateflush) != ZMarkTerminateFlushMax) {
460       // Exit stage 0 to allow other threads to continue marking
461       _terminate.exit_stage0();
462 
463       // Flush before termination
464       if (!try_flush(&_work_nterminateflush)) {
465         // No more work available, skip further flush attempts
466         Atomic::store(false, &_work_terminateflush);
467       }
468 
469       // Don't terminate, regardless of whether we successfully
470       // flushed out more work or not. We've already exited
471       // termination stage 0, to allow other threads to continue
472       // marking, so this thread has to return false and also
473       // make another round of attempted marking.
474       return false;
475     }
476   }
477 
478   for (;;) {
479     if (_terminate.enter_stage1()) {
480       // Last thread entered stage 1, terminate
481       return true;
482     }
483 
484     // Idle to give the other threads
485     // a chance to enter termination.
486     idle();
487 
488     if (!_terminate.try_exit_stage1()) {
489       // All workers in stage 1, terminate
490       return true;
491     }
492 
493     if (_terminate.try_exit_stage0()) {
494       // More work available, don't terminate
495       return false;
496     }
497   }
498 }
499 
500 class ZMarkNoTimeout : public StackObj {
501 public:
502   bool has_expired() {
503     return false;
504   }
505 };
506 
507 void ZMark::work_without_timeout(ZMarkCache* cache, ZMarkStripe* stripe, ZMarkThreadLocalStacks* stacks) {
508   ZStatTimer timer(ZSubPhaseConcurrentMark);
509   ZMarkNoTimeout no_timeout;
510 
511   for (;;) {
512     drain_and_flush(stripe, stacks, cache, &no_timeout);
513 
514     if (try_steal(stripe, stacks)) {
515       // Stole work
516       continue;
517     }
518 
519     if (try_proactive_flush()) {
520       // Work available
521       continue;
522     }
523 
524     if (try_terminate()) {
525       // Terminate
526       break;
527     }
528   }
529 }
530 
531 class ZMarkTimeout : public StackObj {
532 private:
533   const Ticks    _start;
534   const uint64_t _timeout;
535   const uint64_t _check_interval;
536   uint64_t       _check_at;
537   uint64_t       _check_count;
538   bool           _expired;
539 
540 public:
541   ZMarkTimeout(uint64_t timeout_in_millis) :
542       _start(Ticks::now()),
543       _timeout(_start.value() + TimeHelper::millis_to_counter(timeout_in_millis)),
544       _check_interval(200),
545       _check_at(_check_interval),
546       _check_count(0),
547       _expired(false) {}
548 
549   ~ZMarkTimeout() {
550     const Tickspan duration = Ticks::now() - _start;
551     log_debug(gc, marking)("Mark With Timeout (%s): %s, " UINT64_FORMAT " oops, %.3fms",
552                            ZThread::name(), _expired ? "Expired" : "Completed",
553                            _check_count, TimeHelper::counter_to_millis(duration.value()));
554   }
555 
556   bool has_expired() {
557     if (++_check_count == _check_at) {
558       _check_at += _check_interval;
559       if ((uint64_t)Ticks::now().value() >= _timeout) {
560         // Timeout
561         _expired = true;
562       }
563     }
564 
565     return _expired;
566   }
567 };
568 
569 void ZMark::work_with_timeout(ZMarkCache* cache, ZMarkStripe* stripe, ZMarkThreadLocalStacks* stacks, uint64_t timeout_in_millis) {
570   ZStatTimer timer(ZSubPhaseMarkTryComplete);
571   ZMarkTimeout timeout(timeout_in_millis);
572 
573   for (;;) {
574     if (!drain_and_flush(stripe, stacks, cache, &timeout)) {
575       // Timed out
576       break;
577     }
578 
579     if (try_steal(stripe, stacks)) {
580       // Stole work
581       continue;
582     }
583 
584     // Terminate
585     break;
586   }
587 }
588 
589 void ZMark::work(uint64_t timeout_in_millis) {
590   ZMarkCache cache(_stripes.nstripes());
591   ZMarkStripe* const stripe = _stripes.stripe_for_worker(_nworkers, ZThread::worker_id());
592   ZMarkThreadLocalStacks* const stacks = ZThreadLocalData::stacks(Thread::current());
593 
594   if (timeout_in_millis == 0) {
595     work_without_timeout(&cache, stripe, stacks);
596   } else {
597     work_with_timeout(&cache, stripe, stacks, timeout_in_millis);
598   }
599 
600   // Make sure stacks have been flushed
601   assert(stacks->is_empty(&_stripes), "Should be empty");
602 
603   // Free remaining stacks
604   stacks->free(&_allocator);
605 }
606 
607 class ZMarkTask : public ZTask {
608 private:
609   ZMark* const   _mark;
610   const uint64_t _timeout_in_millis;
611 
612 public:
613   ZMarkTask(ZMark* mark, uint64_t timeout_in_millis = 0) :
614       ZTask("ZMarkTask"),
615       _mark(mark),
616       _timeout_in_millis(timeout_in_millis) {
617     _mark->prepare_work();
618   }
619 
620   ~ZMarkTask() {
621     _mark->finish_work();
622   }
623 
624   virtual void work() {
625     _mark->work(_timeout_in_millis);
626   }
627 };
628 
629 void ZMark::mark() {
630   ZMarkTask task(this);
631   _workers->run_concurrent(&task);
632 }
633 
634 bool ZMark::try_complete() {
635   _ntrycomplete++;
636 
637   // Use nconcurrent number of worker threads to maintain the
638   // worker/stripe distribution used during concurrent mark.
639   ZMarkTask task(this, ZMarkCompleteTimeout);
640   _workers->run_concurrent(&task);
641 
642   // Successful if all stripes are empty
643   return _stripes.is_empty();
644 }
645 
646 bool ZMark::try_end() {
647   // Flush all mark stacks
648   if (!flush(true /* at_safepoint */)) {
649     // Mark completed
650     return true;
651   }
652 
653   // Try complete marking by doing a limited
654   // amount of mark work in this phase.
655   return try_complete();
656 }
657 
658 bool ZMark::end() {
659   // Try end marking
660   if (!try_end()) {
661     // Mark not completed
662     _ncontinue++;
663     return false;
664   }
665 
666   // Verification
667   if (ZVerifyMarking) {
668     verify_all_stacks_empty();
669   }
670 
671   // Update statistics
672   ZStatMark::set_at_mark_end(_nproactiveflush, _nterminateflush, _ntrycomplete, _ncontinue);
673 
674   // Mark completed
675   return true;
676 }
677 
678 void ZMark::flush_and_free() {
679   Thread* const thread = Thread::current();
680   flush_and_free(thread);
681 }
682 
683 bool ZMark::flush_and_free(Thread* thread) {
684   ZMarkThreadLocalStacks* const stacks = ZThreadLocalData::stacks(thread);
685   const bool flushed = stacks->flush(&_allocator, &_stripes);
686   stacks->free(&_allocator);
687   return flushed;
688 }
689 
690 class ZVerifyMarkStacksEmptyClosure : public ThreadClosure {
691 private:
692   const ZMarkStripeSet* const _stripes;
693 
694 public:
695   ZVerifyMarkStacksEmptyClosure(const ZMarkStripeSet* stripes) :
696       _stripes(stripes) {}
697 
698   void do_thread(Thread* thread) {
699     ZMarkThreadLocalStacks* const stacks = ZThreadLocalData::stacks(thread);
700     guarantee(stacks->is_empty(_stripes), "Should be empty");
701   }
702 };
703 
704 void ZMark::verify_all_stacks_empty() const {
705   // Verify thread stacks
706   ZVerifyMarkStacksEmptyClosure cl(&_stripes);
707   Threads::threads_do(&cl);
708 
709   // Verify stripe stacks
710   guarantee(_stripes.is_empty(), "Should be empty");
711 }