concurrent root iterator v2

concurrent root iterator

0 /*
1  * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
2  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
3  *
4  * This code is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License version 2 only, as
6  * published by the Free Software Foundation.
7  *
8  * This code is distributed in the hope that it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
11  * version 2 for more details (a copy is included in the LICENSE file that
12  * accompanied this code).
13  *
14  * You should have received a copy of the GNU General Public License version
15  * 2 along with this work; if not, write to the Free Software Foundation,
16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
17  *
18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
19  * or visit www.oracle.com if you need additional information or have any
20  * questions.
21  */
22 
23 #include "precompiled.hpp"
24 #include "gc/z/zBarrier.inline.hpp"
25 #include "gc/z/zMark.inline.hpp"
26 #include "gc/z/zMarkCache.inline.hpp"
27 #include "gc/z/zMarkStack.inline.hpp"
28 #include "gc/z/zMarkTerminate.inline.hpp"
29 #include "gc/z/zOopClosures.inline.hpp"
30 #include "gc/z/zPage.hpp"
31 #include "gc/z/zPageTable.inline.hpp"
32 #include "gc/z/zRootsIterator.hpp"
33 #include "gc/z/zStat.hpp"
34 #include "gc/z/zTask.hpp"
35 #include "gc/z/zThread.hpp"
36 #include "gc/z/zUtils.inline.hpp"
37 #include "gc/z/zWorkers.inline.hpp"
38 #include "logging/log.hpp"
39 #include "memory/iterator.inline.hpp"
40 #include "oops/objArrayOop.inline.hpp"
41 #include "oops/oop.inline.hpp"
42 #include "runtime/atomic.hpp"
43 #include "runtime/handshake.hpp"
44 #include "runtime/orderAccess.hpp"
45 #include "runtime/prefetch.inline.hpp"
46 #include "runtime/thread.hpp"
47 #include "utilities/align.hpp"
48 #include "utilities/globalDefinitions.hpp"
49 #include "utilities/ticks.hpp"
50 
51 static const ZStatSubPhase ZSubPhaseConcurrentMark("Concurrent Mark");
52 static const ZStatSubPhase ZSubPhaseConcurrentMarkTryFlush("Concurrent Mark Try Flush");
53 static const ZStatSubPhase ZSubPhaseConcurrentMarkIdle("Concurrent Mark Idle");
54 static const ZStatSubPhase ZSubPhaseConcurrentMarkTryTerminate("Concurrent Mark Try Terminate");
55 static const ZStatSubPhase ZSubPhaseMarkTryComplete("Pause Mark Try Complete");
56 
57 ZMark::ZMark(ZWorkers* workers, ZPageTable* pagetable) :
58     _workers(workers),
59     _pagetable(pagetable),
60     _allocator(),
61     _stripes(),
62     _terminate(),
63     _work_terminateflush(true),
64     _work_nproactiveflush(0),
65     _work_nterminateflush(0),
66     _nproactiveflush(0),
67     _nterminateflush(0),
68     _ntrycomplete(0),
69     _ncontinue(0),
70     _nworkers(0) {}
71 
72 bool ZMark::is_initialized() const {
73   return _allocator.is_initialized();
74 }
75 
76 size_t ZMark::calculate_nstripes(uint nworkers) const {
77   // Calculate the number of stripes from the number of workers we use,
78   // where the number of stripes must be a power of two and we want to
79   // have at least one worker per stripe.
80   const size_t nstripes = ZUtils::round_down_power_of_2(nworkers);
81   return MIN2(nstripes, ZMarkStripesMax);
82 }
83 
84 void ZMark::prepare_mark() {
85   // Increment global sequence number to invalidate
86   // marking information for all pages.
87   ZGlobalSeqNum++;
88 
89   // Reset flush/continue counters
90   _nproactiveflush = 0;
91   _nterminateflush = 0;
92   _ntrycomplete = 0;
93   _ncontinue = 0;
94 
95   // Set number of workers to use
96   _nworkers = _workers->nconcurrent();
97 
98   // Set number of mark stripes to use, based on number
99   // of workers we will use in the concurrent mark phase.
100   const size_t nstripes = calculate_nstripes(_nworkers);
101   _stripes.set_nstripes(nstripes);
102 
103   // Update statistics
104   ZStatMark::set_at_mark_start(nstripes);
105 
106   // Print worker/stripe distribution
107   LogTarget(Debug, gc, marking) log;
108   if (log.is_enabled()) {
109     log.print("Mark Worker/Stripe Distribution");
110     for (uint worker_id = 0; worker_id < _nworkers; worker_id++) {
111       const ZMarkStripe* const stripe = _stripes.stripe_for_worker(_nworkers, worker_id);
112       const size_t stripe_id = _stripes.stripe_id(stripe);
113       log.print("  Worker %u(%u) -> Stripe " SIZE_FORMAT "(" SIZE_FORMAT ")",
114                 worker_id, _nworkers, stripe_id, nstripes);
115     }
116   }
117 }
118 
119 class ZMarkRootsTask : public ZTask {
120 private:
121   ZMark* const   _mark;
122   ZRootsIterator _roots;
123 
124 public:
125   ZMarkRootsTask(ZMark* mark) :
126       ZTask("ZMarkRootsTask"),
127       _mark(mark),
128       _roots() {}
129 
130   virtual void work() {
131     ZMarkRootOopClosure cl;
132     _roots.oops_do(&cl);
133 
134     // Flush and free worker stacks. Needed here since
135     // the set of workers executing during root scanning
136     // can be different from the set of workers executing
137     // during mark.
138     _mark->flush_and_free();
139   }
140 };
141 
142 void ZMark::start() {
143   // Verification
144   if (ZVerifyMarking) {
145     verify_all_stacks_empty();
146   }
147 
148   // Prepare for concurrent mark
149   prepare_mark();
150 
151   // Mark roots
152   ZMarkRootsTask task(this);
153   _workers->run_parallel(&task);
154 }
155 
156 void ZMark::prepare_work() {
157   assert(_nworkers == _workers->nconcurrent(), "Invalid number of workers");
158 
159   // Set number of active workers
160   _terminate.reset(_nworkers);
161 
162   // Reset flush counters
163   _work_nproactiveflush = _work_nterminateflush = 0;
164   _work_terminateflush = true;
165 }
166 
167 void ZMark::finish_work() {
168   // Accumulate proactive/terminate flush counters
169   _nproactiveflush += _work_nproactiveflush;
170   _nterminateflush += _work_nterminateflush;
171 }
172 
173 bool ZMark::is_array(uintptr_t addr) const {
174   return ZOop::to_oop(addr)->is_objArray();
175 }
176 
177 void ZMark::push_partial_array(uintptr_t addr, size_t size, bool finalizable) {
178   assert(is_aligned(addr, ZMarkPartialArrayMinSize), "Address misaligned");
179   ZMarkThreadLocalStacks* const stacks = ZThreadLocalData::stacks(Thread::current());
180   ZMarkStripe* const stripe = _stripes.stripe_for_addr(addr);
181   const uintptr_t offset = ZAddress::offset(addr) >> ZMarkPartialArrayMinSizeShift;
182   const uintptr_t length = size / oopSize;
183   const ZMarkStackEntry entry(offset, length, finalizable);
184 
185   log_develop_trace(gc, marking)("Array push partial: " PTR_FORMAT " (" SIZE_FORMAT "), stripe: " SIZE_FORMAT,
186                                  addr, size, _stripes.stripe_id(stripe));
187 
188   stacks->push(&_allocator, &_stripes, stripe, entry, false /* publish */);
189 }
190 
191 void ZMark::follow_small_array(uintptr_t addr, size_t size, bool finalizable) {
192   assert(size <= ZMarkPartialArrayMinSize, "Too large, should be split");
193   const size_t length = size / oopSize;
194 
195   log_develop_trace(gc, marking)("Array follow small: " PTR_FORMAT " (" SIZE_FORMAT ")", addr, size);
196 
197   ZBarrier::mark_barrier_on_oop_array((oop*)addr, length, finalizable);
198 }
199 
200 void ZMark::follow_large_array(uintptr_t addr, size_t size, bool finalizable) {
201   assert(size <= (size_t)arrayOopDesc::max_array_length(T_OBJECT) * oopSize, "Too large");
202   assert(size > ZMarkPartialArrayMinSize, "Too small, should not be split");
203   const uintptr_t start = addr;
204   const uintptr_t end = start + size;
205 
206   // Calculate the aligned middle start/end/size, where the middle start
207   // should always be greater than the start (hence the +1 below) to make
208   // sure we always do some follow work, not just split the array into pieces.
209   const uintptr_t middle_start = align_up(start + 1, ZMarkPartialArrayMinSize);
210   const size_t    middle_size = align_down(end - middle_start, ZMarkPartialArrayMinSize);
211   const uintptr_t middle_end = middle_start + middle_size;
212 
213   log_develop_trace(gc, marking)("Array follow large: " PTR_FORMAT "-" PTR_FORMAT" (" SIZE_FORMAT "), "
214                                  "middle: " PTR_FORMAT "-" PTR_FORMAT " (" SIZE_FORMAT ")",
215                                  start, end, size, middle_start, middle_end, middle_size);
216 
217   // Push unaligned trailing part
218   if (end > middle_end) {
219     const uintptr_t trailing_addr = middle_end;
220     const size_t trailing_size = end - middle_end;
221     push_partial_array(trailing_addr, trailing_size, finalizable);
222   }
223 
224   // Push aligned middle part(s)
225   uintptr_t partial_addr = middle_end;
226   while (partial_addr > middle_start) {
227     const size_t parts = 2;
228     const size_t partial_size = align_up((partial_addr - middle_start) / parts, ZMarkPartialArrayMinSize);
229     partial_addr -= partial_size;
230     push_partial_array(partial_addr, partial_size, finalizable);
231   }
232 
233   // Follow leading part
234   assert(start < middle_start, "Miscalculated middle start");
235   const uintptr_t leading_addr = start;
236   const size_t leading_size = middle_start - start;
237   follow_small_array(leading_addr, leading_size, finalizable);
238 }
239 
240 void ZMark::follow_array(uintptr_t addr, size_t size, bool finalizable) {
241   if (size <= ZMarkPartialArrayMinSize) {
242     follow_small_array(addr, size, finalizable);
243   } else {
244     follow_large_array(addr, size, finalizable);
245   }
246 }
247 
248 void ZMark::follow_partial_array(ZMarkStackEntry entry, bool finalizable) {
249   const uintptr_t addr = ZAddress::good(entry.partial_array_offset() << ZMarkPartialArrayMinSizeShift);
250   const size_t size = entry.partial_array_length() * oopSize;
251 
252   follow_array(addr, size, finalizable);
253 }
254 
255 void ZMark::follow_array_object(objArrayOop obj, bool finalizable) {
256   const uintptr_t addr = (uintptr_t)obj->base();
257   const size_t size = (size_t)obj->length() * oopSize;
258 
259   follow_array(addr, size, finalizable);
260 }
261 
262 void ZMark::follow_object(oop obj, bool finalizable) {
263   if (finalizable) {
264     ZMarkBarrierOopClosure<true /* finalizable */> cl;
265     obj->oop_iterate(&cl);
266   } else {
267     ZMarkBarrierOopClosure<false /* finalizable */> cl;
268     obj->oop_iterate(&cl);
269   }
270 }
271 
272 bool ZMark::try_mark_object(ZMarkCache* cache, uintptr_t addr, bool finalizable) {
273   ZPage* const page = _pagetable->get(addr);
274   if (page->is_allocating()) {
275     // Newly allocated objects are implicitly marked
276     return false;
277   }
278 
279   // Try mark object
280   bool inc_live = false;
281   const bool success = page->mark_object(addr, finalizable, inc_live);
282   if (inc_live) {
283     // Update live objects/bytes for page. We use the aligned object
284     // size since that is the actual number of bytes used on the page
285     // and alignment paddings can never be reclaimed.
286     const size_t size = ZUtils::object_size(addr);
287     const size_t aligned_size = align_up(size, page->object_alignment());
288     cache->inc_live(page, aligned_size);
289   }
290 
291   return success;
292 }
293 
294 void ZMark::mark_and_follow(ZMarkCache* cache, ZMarkStackEntry entry) {
295   // Decode flags
296   const bool finalizable = entry.finalizable();
297   const bool partial_array = entry.partial_array();
298 
299   if (partial_array) {
300     follow_partial_array(entry, finalizable);
301     return;
302   }
303 
304   // Decode object address
305   const uintptr_t addr = entry.object_address();
306 
307   if (!try_mark_object(cache, addr, finalizable)) {
308     // Already marked
309     return;
310   }
311 
312   if (is_array(addr)) {
313     follow_array_object(objArrayOop(ZOop::to_oop(addr)), finalizable);
314   } else {
315     follow_object(ZOop::to_oop(addr), finalizable);
316   }
317 }
318 
319 template <typename T>
320 bool ZMark::drain(ZMarkStripe* stripe, ZMarkThreadLocalStacks* stacks, ZMarkCache* cache, T* timeout) {
321   ZMarkStackEntry entry;
322 
323   // Drain stripe stacks
324   while (stacks->pop(&_allocator, &_stripes, stripe, entry)) {
325     mark_and_follow(cache, entry);
326 
327     // Check timeout
328     if (timeout->has_expired()) {
329       // Timeout
330       return false;
331     }
332   }
333 
334   // Success
335   return true;
336 }
337 
338 template <typename T>
339 bool ZMark::drain_and_flush(ZMarkStripe* stripe, ZMarkThreadLocalStacks* stacks, ZMarkCache* cache, T* timeout) {
340   const bool success = drain(stripe, stacks, cache, timeout);
341 
342   // Flush and publish worker stacks
343   stacks->flush(&_allocator, &_stripes);
344 
345   return success;
346 }
347 
348 bool ZMark::try_steal(ZMarkStripe* stripe, ZMarkThreadLocalStacks* stacks) {
349   // Try to steal a stack from another stripe
350   for (ZMarkStripe* victim_stripe = _stripes.stripe_next(stripe);
351        victim_stripe != stripe;
352        victim_stripe = _stripes.stripe_next(victim_stripe)) {
353     ZMarkStack* const stack = victim_stripe->steal_stack();
354     if (stack != NULL) {
355       // Success, install the stolen stack
356       stacks->install(&_stripes, stripe, stack);
357       return true;
358     }
359   }
360 
361   // Nothing to steal
362   return false;
363 }
364 
365 void ZMark::idle() const {
366   ZStatTimer timer(ZSubPhaseConcurrentMarkIdle);
367   os::naked_short_sleep(1);
368 }
369 
370 class ZMarkFlushAndFreeStacksClosure : public ThreadClosure {
371 private:
372   ZMark* const _mark;
373   bool         _flushed;
374 
375 public:
376   ZMarkFlushAndFreeStacksClosure(ZMark* mark) :
377       _mark(mark),
378       _flushed(false) {}
379 
380   void do_thread(Thread* thread) {
381     if (_mark->flush_and_free(thread)) {
382       _flushed = true;
383     }
384   }
385 
386   bool flushed() const {
387     return _flushed;
388   }
389 };
390 
391 bool ZMark::flush(bool at_safepoint) {
392   ZMarkFlushAndFreeStacksClosure cl(this);
393   if (at_safepoint) {
394     Threads::threads_do(&cl);
395   } else {
396     Handshake::execute(&cl);
397   }
398 
399   // Returns true if more work is available
400   return cl.flushed() || !_stripes.is_empty();
401 }
402 
403 bool ZMark::try_flush(volatile size_t* nflush) {
404   // Only flush if handshakes are enabled
405   if (!ThreadLocalHandshakes) {
406     return false;
407   }
408 
409   Atomic::inc(nflush);
410 
411   ZStatTimer timer(ZSubPhaseConcurrentMarkTryFlush);
412   return flush(false /* at_safepoint */);
413 }
414 
415 bool ZMark::try_proactive_flush() {
416   // Only do proactive flushes from worker 0
417   if (ZThread::worker_id() != 0) {
418     return false;
419   }
420 
421   if (Atomic::load(&_work_nproactiveflush) == ZMarkProactiveFlushMax ||
422       Atomic::load(&_work_nterminateflush) != 0) {
423     // Limit reached or we're trying to terminate
424     return false;
425   }
426 
427   return try_flush(&_work_nproactiveflush);
428 }
429 
430 bool ZMark::try_terminate() {
431   ZStatTimer timer(ZSubPhaseConcurrentMarkTryTerminate);
432 
433   if (_terminate.enter_stage0()) {
434     // Last thread entered stage 0, flush
435     if (Atomic::load(&_work_terminateflush) &&
436         Atomic::load(&_work_nterminateflush) != ZMarkTerminateFlushMax) {
437       // Exit stage 0 to allow other threads to continue marking
438       _terminate.exit_stage0();
439 
440       // Flush before termination
441       if (!try_flush(&_work_nterminateflush)) {
442         // No more work available, skip further flush attempts
443         Atomic::store(false, &_work_terminateflush);
444       }
445 
446       // Don't terminate, regardless of whether we successfully
447       // flushed out more work or not. We've already exited
448       // termination stage 0, to allow other threads to continue
449       // marking, so this thread has to return false and also
450       // make another round of attempted marking.
451       return false;
452     }
453   }
454 
455   for (;;) {
456     if (_terminate.enter_stage1()) {
457       // Last thread entered stage 1, terminate
458       return true;
459     }
460 
461     // Idle to give the other threads
462     // a chance to enter termination.
463     idle();
464 
465     if (!_terminate.try_exit_stage1()) {
466       // All workers in stage 1, terminate
467       return true;
468     }
469 
470     if (_terminate.try_exit_stage0()) {
471       // More work available, don't terminate
472       return false;
473     }
474   }
475 }
476 
477 class ZMarkNoTimeout : public StackObj {
478 public:
479   bool has_expired() {
480     return false;
481   }
482 };
483 
484 void ZMark::work_without_timeout(ZMarkCache* cache, ZMarkStripe* stripe, ZMarkThreadLocalStacks* stacks) {
485   ZStatTimer timer(ZSubPhaseConcurrentMark);
486   ZMarkNoTimeout no_timeout;
487 
488   for (;;) {
489     drain_and_flush(stripe, stacks, cache, &no_timeout);
490 
491     if (try_steal(stripe, stacks)) {
492       // Stole work
493       continue;
494     }
495 
496     if (try_proactive_flush()) {
497       // Work available
498       continue;
499     }
500 
501     if (try_terminate()) {
502       // Terminate
503       break;
504     }
505   }
506 }
507 
508 class ZMarkTimeout : public StackObj {
509 private:
510   const Ticks    _start;
511   const uint64_t _timeout;
512   const uint64_t _check_interval;
513   uint64_t       _check_at;
514   uint64_t       _check_count;
515   bool           _expired;
516 
517 public:
518   ZMarkTimeout(uint64_t timeout_in_millis) :
519       _start(Ticks::now()),
520       _timeout(_start.value() + TimeHelper::millis_to_counter(timeout_in_millis)),
521       _check_interval(200),
522       _check_at(_check_interval),
523       _check_count(0),
524       _expired(false) {}
525 
526   ~ZMarkTimeout() {
527     const Tickspan duration = Ticks::now() - _start;
528     log_debug(gc, marking)("Mark With Timeout (%s): %s, " UINT64_FORMAT " oops, %.3fms",
529                            ZThread::name(), _expired ? "Expired" : "Completed",
530                            _check_count, TimeHelper::counter_to_millis(duration.value()));
531   }
532 
533   bool has_expired() {
534     if (++_check_count == _check_at) {
535       _check_at += _check_interval;
536       if ((uint64_t)Ticks::now().value() >= _timeout) {
537         // Timeout
538         _expired = true;
539       }
540     }
541 
542     return _expired;
543   }
544 };
545 
546 void ZMark::work_with_timeout(ZMarkCache* cache, ZMarkStripe* stripe, ZMarkThreadLocalStacks* stacks, uint64_t timeout_in_millis) {
547   ZStatTimer timer(ZSubPhaseMarkTryComplete);
548   ZMarkTimeout timeout(timeout_in_millis);
549 
550   for (;;) {
551     if (!drain_and_flush(stripe, stacks, cache, &timeout)) {
552       // Timed out
553       break;
554     }
555 
556     if (try_steal(stripe, stacks)) {
557       // Stole work
558       continue;
559     }
560 
561     // Terminate
562     break;
563   }
564 }
565 
566 void ZMark::work(uint64_t timeout_in_millis) {
567   ZMarkCache cache(_stripes.nstripes());
568   ZMarkStripe* const stripe = _stripes.stripe_for_worker(_nworkers, ZThread::worker_id());
569   ZMarkThreadLocalStacks* const stacks = ZThreadLocalData::stacks(Thread::current());
570 
571   if (timeout_in_millis == 0) {
572     work_without_timeout(&cache, stripe, stacks);
573   } else {
574     work_with_timeout(&cache, stripe, stacks, timeout_in_millis);
575   }
576 
577   // Make sure stacks have been flushed
578   assert(stacks->is_empty(&_stripes), "Should be empty");
579 
580   // Free remaining stacks
581   stacks->free(&_allocator);
582 }
583 
584 class ZMarkConcurrentRootsTask : public ZTask {
585 private:
586   ZMark* const             _mark;
587   ZConcurrentRootsIterator _roots;
588 
589 public:
590   ZMarkConcurrentRootsTask(ZMark* mark) :
591       ZTask("ZMarkConcurrentRootsTask"),
592       _mark(mark),
593       _roots() {}
594 
595   virtual void work() {
596     ZMarkBarrierOopClosure<false /* finalizable */> cl;
597     _roots.oops_do(&cl);
598   }
599 };
600 
601 class ZMarkTask : public ZTask {
602 private:
603   ZMark* const   _mark;
604   const uint64_t _timeout_in_millis;
605 
606 public:
607   ZMarkTask(ZMark* mark, uint64_t timeout_in_millis = 0) :
608       ZTask("ZMarkTask"),
609       _mark(mark),
610       _timeout_in_millis(timeout_in_millis) {
611     _mark->prepare_work();
612   }
613 
614   ~ZMarkTask() {
615     _mark->finish_work();
616   }
617 
618   virtual void work() {
619     _mark->work(_timeout_in_millis);
620   }
621 };
622 
623 void ZMark::mark(bool initial) {
624   if (initial) {
625     ZMarkConcurrentRootsTask task(this);
626     _workers->run_concurrent(&task);
627   }
628 
629   ZMarkTask task(this);
630   _workers->run_concurrent(&task);
631 }
632 
633 bool ZMark::try_complete() {
634   _ntrycomplete++;
635 
636   // Use nconcurrent number of worker threads to maintain the
637   // worker/stripe distribution used during concurrent mark.
638   ZMarkTask task(this, ZMarkCompleteTimeout);
639   _workers->run_concurrent(&task);
640 
641   // Successful if all stripes are empty
642   return _stripes.is_empty();
643 }
644 
645 bool ZMark::try_end() {
646   // Flush all mark stacks
647   if (!flush(true /* at_safepoint */)) {
648     // Mark completed
649     return true;
650   }
651 
652   // Try complete marking by doing a limited
653   // amount of mark work in this phase.
654   return try_complete();
655 }
656 
657 bool ZMark::end() {
658   // Try end marking
659   if (!try_end()) {
660     // Mark not completed
661     _ncontinue++;
662     return false;
663   }
664 
665   // Verification
666   if (ZVerifyMarking) {
667     verify_all_stacks_empty();
668   }
669 
670   // Update statistics
671   ZStatMark::set_at_mark_end(_nproactiveflush, _nterminateflush, _ntrycomplete, _ncontinue);
672 
673   // Mark completed
674   return true;
675 }
676 
677 void ZMark::flush_and_free() {
678   Thread* const thread = Thread::current();
679   flush_and_free(thread);
680 }
681 
682 bool ZMark::flush_and_free(Thread* thread) {
683   ZMarkThreadLocalStacks* const stacks = ZThreadLocalData::stacks(thread);
684   const bool flushed = stacks->flush(&_allocator, &_stripes);
685   stacks->free(&_allocator);
686   return flushed;
687 }
688 
689 class ZVerifyMarkStacksEmptyClosure : public ThreadClosure {
690 private:
691   const ZMarkStripeSet* const _stripes;
692 
693 public:
694   ZVerifyMarkStacksEmptyClosure(const ZMarkStripeSet* stripes) :
695       _stripes(stripes) {}
696 
697   void do_thread(Thread* thread) {
698     ZMarkThreadLocalStacks* const stacks = ZThreadLocalData::stacks(thread);
699     guarantee(stacks->is_empty(_stripes), "Should be empty");
700   }
701 };
702 
703 void ZMark::verify_all_stacks_empty() const {
704   // Verify thread stacks
705   ZVerifyMarkStacksEmptyClosure cl(&_stripes);
706   Threads::threads_do(&cl);
707 
708   // Verify stripe stacks
709   guarantee(_stripes.is_empty(), "Should be empty");
710 }
--- EOF ---