1 /*
  2  * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  */
 23 
 24 #include "precompiled.hpp"
 25 #include "gc/z/zBarrier.inline.hpp"
 26 #include "gc/z/zMark.inline.hpp"
 27 #include "gc/z/zMarkCache.inline.hpp"
 28 #include "gc/z/zMarkStack.inline.hpp"
 29 #include "gc/z/zMarkTerminate.inline.hpp"
 30 #include "gc/z/zOopClosures.inline.hpp"
 31 #include "gc/z/zPage.hpp"
 32 #include "gc/z/zPageTable.inline.hpp"
 33 #include "gc/z/zRootsIterator.hpp"
 34 #include "gc/z/zStat.hpp"
 35 #include "gc/z/zTask.hpp"
 36 #include "gc/z/zThread.hpp"
 37 #include "gc/z/zUtils.inline.hpp"
 38 #include "gc/z/zWorkers.inline.hpp"
 39 #include "logging/log.hpp"
 40 #include "memory/iterator.inline.hpp"
 41 #include "oops/objArrayOop.inline.hpp"
 42 #include "oops/oop.inline.hpp"
 43 #include "runtime/atomic.hpp"
 44 #include "runtime/handshake.hpp"
 45 #include "runtime/orderAccess.hpp"
 46 #include "runtime/prefetch.inline.hpp"
 47 #include "runtime/thread.hpp"
 48 #include "utilities/align.hpp"
 49 #include "utilities/globalDefinitions.hpp"
 50 #include "utilities/ticks.hpp"
 51 
 52 static const ZStatSubPhase ZSubPhaseConcurrentMark("Concurrent Mark");
 53 static const ZStatSubPhase ZSubPhaseConcurrentMarkTryFlush("Concurrent Mark Try Flush");
 54 static const ZStatSubPhase ZSubPhaseConcurrentMarkIdle("Concurrent Mark Idle");
 55 static const ZStatSubPhase ZSubPhaseConcurrentMarkTryTerminate("Concurrent Mark Try Terminate");
 56 static const ZStatSubPhase ZSubPhaseMarkTryComplete("Pause Mark Try Complete");
 57 
 58 ZMark::ZMark(ZWorkers* workers, ZPageTable* pagetable) :
 59     _workers(workers),
 60     _pagetable(pagetable),
 61     _allocator(),
 62     _stripes(),
 63     _terminate(),
 64     _work_terminateflush(true),
 65     _work_nproactiveflush(0),
 66     _work_nterminateflush(0),
 67     _nproactiveflush(0),
 68     _nterminateflush(0),
 69     _ntrycomplete(0),
 70     _ncontinue(0),
 71     _nworkers(0) {}
 72 
 73 bool ZMark::is_initialized() const {
 74   return _allocator.is_initialized();
 75 }
 76 
 77 size_t ZMark::calculate_nstripes(uint nworkers) const {
 78   // Calculate the number of stripes from the number of workers we use,
 79   // where the number of stripes must be a power of two and we want to
 80   // have at least one worker per stripe.
 81   const size_t nstripes = ZUtils::round_down_power_of_2(nworkers);
 82   return MIN2(nstripes, ZMarkStripesMax);
 83 }
 84 
 85 void ZMark::prepare_mark() {
 86   // Increment global sequence number to invalidate
 87   // marking information for all pages.
 88   ZGlobalSeqNum++;
 89 
 90   // Reset flush/continue counters
 91   _nproactiveflush = 0;
 92   _nterminateflush = 0;
 93   _ntrycomplete = 0;
 94   _ncontinue = 0;
 95 
 96   // Set number of workers to use
 97   _nworkers = _workers->nconcurrent();
 98 
 99   // Set number of mark stripes to use, based on number
100   // of workers we will use in the concurrent mark phase.
101   const size_t nstripes = calculate_nstripes(_nworkers);
102   _stripes.set_nstripes(nstripes);
103 
104   // Update statistics
105   ZStatMark::set_at_mark_start(nstripes);
106 
107   // Print worker/stripe distribution
108   LogTarget(Debug, gc, marking) log;
109   if (log.is_enabled()) {
110     log.print("Mark Worker/Stripe Distribution");
111     for (uint worker_id = 0; worker_id < _nworkers; worker_id++) {
112       const ZMarkStripe* const stripe = _stripes.stripe_for_worker(_nworkers, worker_id);
113       const size_t stripe_id = _stripes.stripe_id(stripe);
114       log.print("  Worker %u(%u) -> Stripe " SIZE_FORMAT "(" SIZE_FORMAT ")",
115                 worker_id, _nworkers, stripe_id, nstripes);
116     }
117   }
118 }
119 
120 class ZMarkRootsTask : public ZTask {
121 private:
122   ZMark* const   _mark;
123   ZRootsIterator _roots;
124 
125 public:
126   ZMarkRootsTask(ZMark* mark) :
127       ZTask("ZMarkRootsTask"),
128       _mark(mark),
129       _roots() {}
130 
131   virtual void work() {
132     ZMarkRootOopClosure cl;
133     _roots.oops_do(&cl);
134 
135     // Flush and free worker stacks. Needed here since
136     // the set of workers executing during root scanning
137     // can be different from the set of workers executing
138     // during mark.
139     _mark->flush_and_free();
140   }
141 };
142 
143 class ZMarkConcurrentRootsTask : public ZTask {
144 private:
145   ZMark* const             _mark;
146   ZConcurrentRootsIterator _roots;
147 
148 public:
149   ZMarkConcurrentRootsTask(ZMark* mark) :
150       ZTask("ZMarkConcurrentRootsTask"),
151       _mark(mark),
152       _roots() {}
153 
154   virtual void work() {
155     ZMarkBarrierOopClosure</* finalizable */ false> cl;
156     _roots.oops_do(&cl);
157   }
158 };
159 
160 
161 void ZMark::start() {
162   // Verification
163   if (ZVerifyMarking) {
164     verify_all_stacks_empty();
165   }
166 
167   // Prepare for concurrent mark
168   prepare_mark();
169 
170   // Mark roots
171   ZMarkRootsTask task(this);
172   _workers->run_parallel(&task);
173 }
174 
175 void ZMark::mark_concurrent_roots() {
176   ZMarkConcurrentRootsTask task(this);
177   _workers->run_concurrent(&task);
178 }
179 
180 void ZMark::prepare_work() {
181   assert(_nworkers == _workers->nconcurrent(), "Invalid number of workers");
182 
183   // Set number of active workers
184   _terminate.reset(_nworkers);
185 
186   // Reset flush counters
187   _work_nproactiveflush = _work_nterminateflush = 0;
188   _work_terminateflush = true;
189 }
190 
191 void ZMark::finish_work() {
192   // Accumulate proactive/terminate flush counters
193   _nproactiveflush += _work_nproactiveflush;
194   _nterminateflush += _work_nterminateflush;
195 }
196 
197 bool ZMark::is_array(uintptr_t addr) const {
198   return ZOop::to_oop(addr)->is_objArray();
199 }
200 
201 void ZMark::push_partial_array(uintptr_t addr, size_t size, bool finalizable) {
202   assert(is_aligned(addr, ZMarkPartialArrayMinSize), "Address misaligned");
203   ZMarkThreadLocalStacks* const stacks = ZThreadLocalData::stacks(Thread::current());
204   ZMarkStripe* const stripe = _stripes.stripe_for_addr(addr);
205   const uintptr_t offset = ZAddress::offset(addr) >> ZMarkPartialArrayMinSizeShift;
206   const uintptr_t length = size / oopSize;
207   const ZMarkStackEntry entry(offset, length, finalizable);
208 
209   log_develop_trace(gc, marking)("Array push partial: " PTR_FORMAT " (" SIZE_FORMAT "), stripe: " SIZE_FORMAT,
210                                  addr, size, _stripes.stripe_id(stripe));
211 
212   stacks->push(&_allocator, &_stripes, stripe, entry, false /* publish */);
213 }
214 
215 void ZMark::follow_small_array(uintptr_t addr, size_t size, bool finalizable) {
216   assert(size <= ZMarkPartialArrayMinSize, "Too large, should be split");
217   const size_t length = size / oopSize;
218 
219   log_develop_trace(gc, marking)("Array follow small: " PTR_FORMAT " (" SIZE_FORMAT ")", addr, size);
220 
221   ZBarrier::mark_barrier_on_oop_array((oop*)addr, length, finalizable);
222 }
223 
224 void ZMark::follow_large_array(uintptr_t addr, size_t size, bool finalizable) {
225   assert(size <= (size_t)arrayOopDesc::max_array_length(T_OBJECT) * oopSize, "Too large");
226   assert(size > ZMarkPartialArrayMinSize, "Too small, should not be split");
227   const uintptr_t start = addr;
228   const uintptr_t end = start + size;
229 
230   // Calculate the aligned middle start/end/size, where the middle start
231   // should always be greater than the start (hence the +1 below) to make
232   // sure we always do some follow work, not just split the array into pieces.
233   const uintptr_t middle_start = align_up(start + 1, ZMarkPartialArrayMinSize);
234   const size_t    middle_size = align_down(end - middle_start, ZMarkPartialArrayMinSize);
235   const uintptr_t middle_end = middle_start + middle_size;
236 
237   log_develop_trace(gc, marking)("Array follow large: " PTR_FORMAT "-" PTR_FORMAT" (" SIZE_FORMAT "), "
238                                  "middle: " PTR_FORMAT "-" PTR_FORMAT " (" SIZE_FORMAT ")",
239                                  start, end, size, middle_start, middle_end, middle_size);
240 
241   // Push unaligned trailing part
242   if (end > middle_end) {
243     const uintptr_t trailing_addr = middle_end;
244     const size_t trailing_size = end - middle_end;
245     push_partial_array(trailing_addr, trailing_size, finalizable);
246   }
247 
248   // Push aligned middle part(s)
249   uintptr_t partial_addr = middle_end;
250   while (partial_addr > middle_start) {
251     const size_t parts = 2;
252     const size_t partial_size = align_up((partial_addr - middle_start) / parts, ZMarkPartialArrayMinSize);
253     partial_addr -= partial_size;
254     push_partial_array(partial_addr, partial_size, finalizable);
255   }
256 
257   // Follow leading part
258   assert(start < middle_start, "Miscalculated middle start");
259   const uintptr_t leading_addr = start;
260   const size_t leading_size = middle_start - start;
261   follow_small_array(leading_addr, leading_size, finalizable);
262 }
263 
264 void ZMark::follow_array(uintptr_t addr, size_t size, bool finalizable) {
265   if (size <= ZMarkPartialArrayMinSize) {
266     follow_small_array(addr, size, finalizable);
267   } else {
268     follow_large_array(addr, size, finalizable);
269   }
270 }
271 
272 void ZMark::follow_partial_array(ZMarkStackEntry entry, bool finalizable) {
273   const uintptr_t addr = ZAddress::good(entry.partial_array_offset() << ZMarkPartialArrayMinSizeShift);
274   const size_t size = entry.partial_array_length() * oopSize;
275 
276   follow_array(addr, size, finalizable);
277 }
278 
279 void ZMark::follow_array_object(objArrayOop obj, bool finalizable) {
280   const uintptr_t addr = (uintptr_t)obj->base();
281   const size_t size = (size_t)obj->length() * oopSize;
282 
283   follow_array(addr, size, finalizable);
284 }
285 
286 void ZMark::follow_object(oop obj, bool finalizable) {
287   if (finalizable) {
288     ZMarkBarrierOopClosure<true /* finalizable */> cl;
289     obj->oop_iterate(&cl);
290   } else {
291     ZMarkBarrierOopClosure<false /* finalizable */> cl;
292     obj->oop_iterate(&cl);
293   }
294 }
295 
296 bool ZMark::try_mark_object(ZMarkCache* cache, uintptr_t addr, bool finalizable) {
297   ZPage* const page = _pagetable->get(addr);
298   if (page->is_allocating()) {
299     // Newly allocated objects are implicitly marked
300     return false;
301   }
302 
303   // Try mark object
304   bool inc_live = false;
305   const bool success = page->mark_object(addr, finalizable, inc_live);
306   if (inc_live) {
307     // Update live objects/bytes for page. We use the aligned object
308     // size since that is the actual number of bytes used on the page
309     // and alignment paddings can never be reclaimed.
310     const size_t size = ZUtils::object_size(addr);
311     const size_t aligned_size = align_up(size, page->object_alignment());
312     cache->inc_live(page, aligned_size);
313   }
314 
315   return success;
316 }
317 
318 void ZMark::mark_and_follow(ZMarkCache* cache, ZMarkStackEntry entry) {
319   // Decode flags
320   const bool finalizable = entry.finalizable();
321   const bool partial_array = entry.partial_array();
322 
323   if (partial_array) {
324     follow_partial_array(entry, finalizable);
325     return;
326   }
327 
328   // Decode object address
329   const uintptr_t addr = entry.object_address();
330 
331   if (!try_mark_object(cache, addr, finalizable)) {
332     // Already marked
333     return;
334   }
335 
336   if (is_array(addr)) {
337     follow_array_object(objArrayOop(ZOop::to_oop(addr)), finalizable);
338   } else {
339     follow_object(ZOop::to_oop(addr), finalizable);
340   }
341 }
342 
343 template <typename T>
344 bool ZMark::drain(ZMarkStripe* stripe, ZMarkThreadLocalStacks* stacks, ZMarkCache* cache, T* timeout) {
345   ZMarkStackEntry entry;
346 
347   // Drain stripe stacks
348   while (stacks->pop(&_allocator, &_stripes, stripe, entry)) {
349     mark_and_follow(cache, entry);
350 
351     // Check timeout
352     if (timeout->has_expired()) {
353       // Timeout
354       return false;
355     }
356   }
357 
358   // Success
359   return true;
360 }
361 
362 template <typename T>
363 bool ZMark::drain_and_flush(ZMarkStripe* stripe, ZMarkThreadLocalStacks* stacks, ZMarkCache* cache, T* timeout) {
364   const bool success = drain(stripe, stacks, cache, timeout);
365 
366   // Flush and publish worker stacks
367   stacks->flush(&_allocator, &_stripes);
368 
369   return success;
370 }
371 
372 bool ZMark::try_steal(ZMarkStripe* stripe, ZMarkThreadLocalStacks* stacks) {
373   // Try to steal a stack from another stripe
374   for (ZMarkStripe* victim_stripe = _stripes.stripe_next(stripe);
375        victim_stripe != stripe;
376        victim_stripe = _stripes.stripe_next(victim_stripe)) {
377     ZMarkStack* const stack = victim_stripe->steal_stack();
378     if (stack != NULL) {
379       // Success, install the stolen stack
380       stacks->install(&_stripes, stripe, stack);
381       return true;
382     }
383   }
384 
385   // Nothing to steal
386   return false;
387 }
388 
389 void ZMark::idle() const {
390   ZStatTimer timer(ZSubPhaseConcurrentMarkIdle);
391   os::naked_short_sleep(1);
392 }
393 
394 class ZMarkFlushAndFreeStacksClosure : public ThreadClosure {
395 private:
396   ZMark* const _mark;
397   bool         _flushed;
398 
399 public:
400   ZMarkFlushAndFreeStacksClosure(ZMark* mark) :
401       _mark(mark),
402       _flushed(false) {}
403 
404   void do_thread(Thread* thread) {
405     if (_mark->flush_and_free(thread)) {
406       _flushed = true;
407     }
408   }
409 
410   bool flushed() const {
411     return _flushed;
412   }
413 };
414 
415 bool ZMark::flush(bool at_safepoint) {
416   ZMarkFlushAndFreeStacksClosure cl(this);
417   if (at_safepoint) {
418     Threads::threads_do(&cl);
419   } else {
420     Handshake::execute(&cl);
421   }
422 
423   // Returns true if more work is available
424   return cl.flushed() || !_stripes.is_empty();
425 }
426 
427 bool ZMark::try_flush(volatile size_t* nflush) {
428   // Only flush if handshakes are enabled
429   if (!ThreadLocalHandshakes) {
430     return false;
431   }
432 
433   Atomic::inc(nflush);
434 
435   ZStatTimer timer(ZSubPhaseConcurrentMarkTryFlush);
436   return flush(false /* at_safepoint */);
437 }
438 
439 bool ZMark::try_proactive_flush() {
440   // Only do proactive flushes from worker 0
441   if (ZThread::worker_id() != 0) {
442     return false;
443   }
444 
445   if (Atomic::load(&_work_nproactiveflush) == ZMarkProactiveFlushMax ||
446       Atomic::load(&_work_nterminateflush) != 0) {
447     // Limit reached or we're trying to terminate
448     return false;
449   }
450 
451   return try_flush(&_work_nproactiveflush);
452 }
453 
454 bool ZMark::try_terminate() {
455   ZStatTimer timer(ZSubPhaseConcurrentMarkTryTerminate);
456 
457   if (_terminate.enter_stage0()) {
458     // Last thread entered stage 0, flush
459     if (Atomic::load(&_work_terminateflush) &&
460         Atomic::load(&_work_nterminateflush) != ZMarkTerminateFlushMax) {
461       // Exit stage 0 to allow other threads to continue marking
462       _terminate.exit_stage0();
463 
464       // Flush before termination
465       if (!try_flush(&_work_nterminateflush)) {
466         // No more work available, skip further flush attempts
467         Atomic::store(false, &_work_terminateflush);
468       }
469 
470       // Don't terminate, regardless of whether we successfully
471       // flushed out more work or not. We've already exited
472       // termination stage 0, to allow other threads to continue
473       // marking, so this thread has to return false and also
474       // make another round of attempted marking.
475       return false;
476     }
477   }
478 
479   for (;;) {
480     if (_terminate.enter_stage1()) {
481       // Last thread entered stage 1, terminate
482       return true;
483     }
484 
485     // Idle to give the other threads
486     // a chance to enter termination.
487     idle();
488 
489     if (!_terminate.try_exit_stage1()) {
490       // All workers in stage 1, terminate
491       return true;
492     }
493 
494     if (_terminate.try_exit_stage0()) {
495       // More work available, don't terminate
496       return false;
497     }
498   }
499 }
500 
501 class ZMarkNoTimeout : public StackObj {
502 public:
503   bool has_expired() {
504     return false;
505   }
506 };
507 
508 void ZMark::work_without_timeout(ZMarkCache* cache, ZMarkStripe* stripe, ZMarkThreadLocalStacks* stacks) {
509   ZStatTimer timer(ZSubPhaseConcurrentMark);
510   ZMarkNoTimeout no_timeout;
511 
512   for (;;) {
513     drain_and_flush(stripe, stacks, cache, &no_timeout);
514 
515     if (try_steal(stripe, stacks)) {
516       // Stole work
517       continue;
518     }
519 
520     if (try_proactive_flush()) {
521       // Work available
522       continue;
523     }
524 
525     if (try_terminate()) {
526       // Terminate
527       break;
528     }
529   }
530 }
531 
532 class ZMarkTimeout : public StackObj {
533 private:
534   const Ticks    _start;
535   const uint64_t _timeout;
536   const uint64_t _check_interval;
537   uint64_t       _check_at;
538   uint64_t       _check_count;
539   bool           _expired;
540 
541 public:
542   ZMarkTimeout(uint64_t timeout_in_millis) :
543       _start(Ticks::now()),
544       _timeout(_start.value() + TimeHelper::millis_to_counter(timeout_in_millis)),
545       _check_interval(200),
546       _check_at(_check_interval),
547       _check_count(0),
548       _expired(false) {}
549 
550   ~ZMarkTimeout() {
551     const Tickspan duration = Ticks::now() - _start;
552     log_debug(gc, marking)("Mark With Timeout (%s): %s, " UINT64_FORMAT " oops, %.3fms",
553                            ZThread::name(), _expired ? "Expired" : "Completed",
554                            _check_count, TimeHelper::counter_to_millis(duration.value()));
555   }
556 
557   bool has_expired() {
558     if (++_check_count == _check_at) {
559       _check_at += _check_interval;
560       if ((uint64_t)Ticks::now().value() >= _timeout) {
561         // Timeout
562         _expired = true;
563       }
564     }
565 
566     return _expired;
567   }
568 };
569 
570 void ZMark::work_with_timeout(ZMarkCache* cache, ZMarkStripe* stripe, ZMarkThreadLocalStacks* stacks, uint64_t timeout_in_millis) {
571   ZStatTimer timer(ZSubPhaseMarkTryComplete);
572   ZMarkTimeout timeout(timeout_in_millis);
573 
574   for (;;) {
575     if (!drain_and_flush(stripe, stacks, cache, &timeout)) {
576       // Timed out
577       break;
578     }
579 
580     if (try_steal(stripe, stacks)) {
581       // Stole work
582       continue;
583     }
584 
585     // Terminate
586     break;
587   }
588 }
589 
590 void ZMark::work(uint64_t timeout_in_millis) {
591   ZMarkCache cache(_stripes.nstripes());
592   ZMarkStripe* const stripe = _stripes.stripe_for_worker(_nworkers, ZThread::worker_id());
593   ZMarkThreadLocalStacks* const stacks = ZThreadLocalData::stacks(Thread::current());
594 
595   if (timeout_in_millis == 0) {
596     work_without_timeout(&cache, stripe, stacks);
597   } else {
598     work_with_timeout(&cache, stripe, stacks, timeout_in_millis);
599   }
600 
601   // Make sure stacks have been flushed
602   assert(stacks->is_empty(&_stripes), "Should be empty");
603 
604   // Free remaining stacks
605   stacks->free(&_allocator);
606 }
607 
608 class ZMarkTask : public ZTask {
609 private:
610   ZMark* const   _mark;
611   const uint64_t _timeout_in_millis;
612 
613 public:
614   ZMarkTask(ZMark* mark, uint64_t timeout_in_millis = 0) :
615       ZTask("ZMarkTask"),
616       _mark(mark),
617       _timeout_in_millis(timeout_in_millis) {
618     _mark->prepare_work();
619   }
620 
621   ~ZMarkTask() {
622     _mark->finish_work();
623   }
624 
625   virtual void work() {
626     _mark->work(_timeout_in_millis);
627   }
628 };
629 
630 void ZMark::mark() {
631   ZMarkTask task(this);
632   _workers->run_concurrent(&task);
633 }
634 
635 bool ZMark::try_complete() {
636   _ntrycomplete++;
637 
638   // Use nconcurrent number of worker threads to maintain the
639   // worker/stripe distribution used during concurrent mark.
640   ZMarkTask task(this, ZMarkCompleteTimeout);
641   _workers->run_concurrent(&task);
642 
643   // Successful if all stripes are empty
644   return _stripes.is_empty();
645 }
646 
647 bool ZMark::try_end() {
648   // Flush all mark stacks
649   if (!flush(true /* at_safepoint */)) {
650     // Mark completed
651     return true;
652   }
653 
654   // Try complete marking by doing a limited
655   // amount of mark work in this phase.
656   return try_complete();
657 }
658 
659 bool ZMark::end() {
660   // Try end marking
661   if (!try_end()) {
662     // Mark not completed
663     _ncontinue++;
664     return false;
665   }
666 
667   // Verification
668   if (ZVerifyMarking) {
669     verify_all_stacks_empty();
670   }
671 
672   // Update statistics
673   ZStatMark::set_at_mark_end(_nproactiveflush, _nterminateflush, _ntrycomplete, _ncontinue);
674 
675   // Mark completed
676   return true;
677 }
678 
679 void ZMark::flush_and_free() {
680   Thread* const thread = Thread::current();
681   flush_and_free(thread);
682 }
683 
684 bool ZMark::flush_and_free(Thread* thread) {
685   ZMarkThreadLocalStacks* const stacks = ZThreadLocalData::stacks(thread);
686   const bool flushed = stacks->flush(&_allocator, &_stripes);
687   stacks->free(&_allocator);
688   return flushed;
689 }
690 
691 class ZVerifyMarkStacksEmptyClosure : public ThreadClosure {
692 private:
693   const ZMarkStripeSet* const _stripes;
694 
695 public:
696   ZVerifyMarkStacksEmptyClosure(const ZMarkStripeSet* stripes) :
697       _stripes(stripes) {}
698 
699   void do_thread(Thread* thread) {
700     ZMarkThreadLocalStacks* const stacks = ZThreadLocalData::stacks(thread);
701     guarantee(stacks->is_empty(_stripes), "Should be empty");
702   }
703 };
704 
705 void ZMark::verify_all_stacks_empty() const {
706   // Verify thread stacks
707   ZVerifyMarkStacksEmptyClosure cl(&_stripes);
708   Threads::threads_do(&cl);
709 
710   // Verify stripe stacks
711   guarantee(_stripes.is_empty(), "Should be empty");
712 }