rev 57156 : imported patch 8234796-v3
1 /*
2 * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 */
23
24 #include "precompiled.hpp"
25 #include "classfile/classLoaderDataGraph.hpp"
26 #include "gc/z/zBarrier.inline.hpp"
27 #include "gc/z/zMark.inline.hpp"
28 #include "gc/z/zMarkCache.inline.hpp"
29 #include "gc/z/zMarkStack.inline.hpp"
30 #include "gc/z/zMarkTerminate.inline.hpp"
31 #include "gc/z/zOopClosures.inline.hpp"
32 #include "gc/z/zPage.hpp"
33 #include "gc/z/zPageTable.inline.hpp"
34 #include "gc/z/zRootsIterator.hpp"
35 #include "gc/z/zStat.hpp"
36 #include "gc/z/zTask.hpp"
37 #include "gc/z/zThread.inline.hpp"
38 #include "gc/z/zThreadLocalAllocBuffer.hpp"
39 #include "gc/z/zUtils.inline.hpp"
40 #include "gc/z/zWorkers.inline.hpp"
41 #include "logging/log.hpp"
42 #include "memory/iterator.inline.hpp"
43 #include "oops/objArrayOop.inline.hpp"
44 #include "oops/oop.inline.hpp"
45 #include "runtime/atomic.hpp"
46 #include "runtime/handshake.hpp"
47 #include "runtime/prefetch.inline.hpp"
48 #include "runtime/thread.hpp"
49 #include "utilities/align.hpp"
50 #include "utilities/globalDefinitions.hpp"
51 #include "utilities/ticks.hpp"
52
53 static const ZStatSubPhase ZSubPhaseConcurrentMark("Concurrent Mark");
54 static const ZStatSubPhase ZSubPhaseConcurrentMarkTryFlush("Concurrent Mark Try Flush");
55 static const ZStatSubPhase ZSubPhaseConcurrentMarkIdle("Concurrent Mark Idle");
56 static const ZStatSubPhase ZSubPhaseConcurrentMarkTryTerminate("Concurrent Mark Try Terminate");
57 static const ZStatSubPhase ZSubPhaseMarkTryComplete("Pause Mark Try Complete");
58
59 ZMark::ZMark(ZWorkers* workers, ZPageTable* page_table) :
60 _workers(workers),
61 _page_table(page_table),
62 _allocator(),
63 _stripes(),
64 _terminate(),
65 _work_terminateflush(true),
66 _work_nproactiveflush(0),
67 _work_nterminateflush(0),
68 _nproactiveflush(0),
69 _nterminateflush(0),
70 _ntrycomplete(0),
71 _ncontinue(0),
72 _nworkers(0) {}
73
74 bool ZMark::is_initialized() const {
75 return _allocator.is_initialized();
76 }
77
78 size_t ZMark::calculate_nstripes(uint nworkers) const {
79 // Calculate the number of stripes from the number of workers we use,
80 // where the number of stripes must be a power of two and we want to
81 // have at least one worker per stripe.
82 const size_t nstripes = ZUtils::round_down_power_of_2(nworkers);
83 return MIN2(nstripes, ZMarkStripesMax);
84 }
85
86 void ZMark::prepare_mark() {
87 // Increment global sequence number to invalidate
88 // marking information for all pages.
89 ZGlobalSeqNum++;
90
91 // Reset flush/continue counters
92 _nproactiveflush = 0;
93 _nterminateflush = 0;
94 _ntrycomplete = 0;
95 _ncontinue = 0;
96
97 // Set number of workers to use
98 _nworkers = _workers->nconcurrent();
99
100 // Set number of mark stripes to use, based on number
101 // of workers we will use in the concurrent mark phase.
102 const size_t nstripes = calculate_nstripes(_nworkers);
103 _stripes.set_nstripes(nstripes);
104
105 // Update statistics
106 ZStatMark::set_at_mark_start(nstripes);
107
108 // Print worker/stripe distribution
109 LogTarget(Debug, gc, marking) log;
110 if (log.is_enabled()) {
111 log.print("Mark Worker/Stripe Distribution");
112 for (uint worker_id = 0; worker_id < _nworkers; worker_id++) {
113 const ZMarkStripe* const stripe = _stripes.stripe_for_worker(_nworkers, worker_id);
114 const size_t stripe_id = _stripes.stripe_id(stripe);
115 log.print(" Worker %u(%u) -> Stripe " SIZE_FORMAT "(" SIZE_FORMAT ")",
116 worker_id, _nworkers, stripe_id, nstripes);
117 }
118 }
119 }
120
121 class ZMarkRootsIteratorClosure : public ZRootsIteratorClosure {
122 public:
123 ZMarkRootsIteratorClosure() {
124 ZThreadLocalAllocBuffer::reset_statistics();
125 }
126
127 ~ZMarkRootsIteratorClosure() {
128 ZThreadLocalAllocBuffer::publish_statistics();
129 }
130
131 virtual void do_thread(Thread* thread) {
132 // Update thread local address bad mask
133 ZThreadLocalData::set_address_bad_mask(thread, ZAddressBadMask);
134
135 // Mark invisible root
136 ZThreadLocalData::do_invisible_root(thread, ZBarrier::mark_barrier_on_invisible_root_oop_field);
137
138 // Retire TLAB
139 ZThreadLocalAllocBuffer::retire(thread);
140 }
141
142 virtual void do_oop(oop* p) {
143 ZBarrier::mark_barrier_on_root_oop_field(p);
144 }
145
146 virtual void do_oop(narrowOop* p) {
147 ShouldNotReachHere();
148 }
149 };
150
151 class ZMarkRootsTask : public ZTask {
152 private:
153 ZMark* const _mark;
154 ZRootsIterator _roots;
155 ZMarkRootsIteratorClosure _cl;
156
157 public:
158 ZMarkRootsTask(ZMark* mark) :
159 ZTask("ZMarkRootsTask"),
160 _mark(mark),
161 _roots(false /* visit_jvmti_weak_export */) {}
162
163 virtual void work() {
164 _roots.oops_do(&_cl);
165
166 // Flush and free worker stacks. Needed here since
167 // the set of workers executing during root scanning
168 // can be different from the set of workers executing
169 // during mark.
170 _mark->flush_and_free();
171 }
172 };
173
174 void ZMark::start() {
175 // Verification
176 if (ZVerifyMarking) {
177 verify_all_stacks_empty();
178 }
179
180 // Prepare for concurrent mark
181 prepare_mark();
182
183 // Mark roots
184 ZMarkRootsTask task(this);
185 _workers->run_parallel(&task);
186 }
187
188 void ZMark::prepare_work() {
189 assert(_nworkers == _workers->nconcurrent(), "Invalid number of workers");
190
191 // Set number of active workers
192 _terminate.reset(_nworkers);
193
194 // Reset flush counters
195 _work_nproactiveflush = _work_nterminateflush = 0;
196 _work_terminateflush = true;
197 }
198
199 void ZMark::finish_work() {
200 // Accumulate proactive/terminate flush counters
201 _nproactiveflush += _work_nproactiveflush;
202 _nterminateflush += _work_nterminateflush;
203 }
204
205 bool ZMark::is_array(uintptr_t addr) const {
206 return ZOop::from_address(addr)->is_objArray();
207 }
208
209 void ZMark::push_partial_array(uintptr_t addr, size_t size, bool finalizable) {
210 assert(is_aligned(addr, ZMarkPartialArrayMinSize), "Address misaligned");
211 ZMarkThreadLocalStacks* const stacks = ZThreadLocalData::stacks(Thread::current());
212 ZMarkStripe* const stripe = _stripes.stripe_for_addr(addr);
213 const uintptr_t offset = ZAddress::offset(addr) >> ZMarkPartialArrayMinSizeShift;
214 const uintptr_t length = size / oopSize;
215 const ZMarkStackEntry entry(offset, length, finalizable);
216
217 log_develop_trace(gc, marking)("Array push partial: " PTR_FORMAT " (" SIZE_FORMAT "), stripe: " SIZE_FORMAT,
218 addr, size, _stripes.stripe_id(stripe));
219
220 stacks->push(&_allocator, &_stripes, stripe, entry, false /* publish */);
221 }
222
223 void ZMark::follow_small_array(uintptr_t addr, size_t size, bool finalizable) {
224 assert(size <= ZMarkPartialArrayMinSize, "Too large, should be split");
225 const size_t length = size / oopSize;
226
227 log_develop_trace(gc, marking)("Array follow small: " PTR_FORMAT " (" SIZE_FORMAT ")", addr, size);
228
229 ZBarrier::mark_barrier_on_oop_array((oop*)addr, length, finalizable);
230 }
231
232 void ZMark::follow_large_array(uintptr_t addr, size_t size, bool finalizable) {
233 assert(size <= (size_t)arrayOopDesc::max_array_length(T_OBJECT) * oopSize, "Too large");
234 assert(size > ZMarkPartialArrayMinSize, "Too small, should not be split");
235 const uintptr_t start = addr;
236 const uintptr_t end = start + size;
237
238 // Calculate the aligned middle start/end/size, where the middle start
239 // should always be greater than the start (hence the +1 below) to make
240 // sure we always do some follow work, not just split the array into pieces.
241 const uintptr_t middle_start = align_up(start + 1, ZMarkPartialArrayMinSize);
242 const size_t middle_size = align_down(end - middle_start, ZMarkPartialArrayMinSize);
243 const uintptr_t middle_end = middle_start + middle_size;
244
245 log_develop_trace(gc, marking)("Array follow large: " PTR_FORMAT "-" PTR_FORMAT" (" SIZE_FORMAT "), "
246 "middle: " PTR_FORMAT "-" PTR_FORMAT " (" SIZE_FORMAT ")",
247 start, end, size, middle_start, middle_end, middle_size);
248
249 // Push unaligned trailing part
250 if (end > middle_end) {
251 const uintptr_t trailing_addr = middle_end;
252 const size_t trailing_size = end - middle_end;
253 push_partial_array(trailing_addr, trailing_size, finalizable);
254 }
255
256 // Push aligned middle part(s)
257 uintptr_t partial_addr = middle_end;
258 while (partial_addr > middle_start) {
259 const size_t parts = 2;
260 const size_t partial_size = align_up((partial_addr - middle_start) / parts, ZMarkPartialArrayMinSize);
261 partial_addr -= partial_size;
262 push_partial_array(partial_addr, partial_size, finalizable);
263 }
264
265 // Follow leading part
266 assert(start < middle_start, "Miscalculated middle start");
267 const uintptr_t leading_addr = start;
268 const size_t leading_size = middle_start - start;
269 follow_small_array(leading_addr, leading_size, finalizable);
270 }
271
272 void ZMark::follow_array(uintptr_t addr, size_t size, bool finalizable) {
273 if (size <= ZMarkPartialArrayMinSize) {
274 follow_small_array(addr, size, finalizable);
275 } else {
276 follow_large_array(addr, size, finalizable);
277 }
278 }
279
280 void ZMark::follow_partial_array(ZMarkStackEntry entry, bool finalizable) {
281 const uintptr_t addr = ZAddress::good(entry.partial_array_offset() << ZMarkPartialArrayMinSizeShift);
282 const size_t size = entry.partial_array_length() * oopSize;
283
284 follow_array(addr, size, finalizable);
285 }
286
287 void ZMark::follow_array_object(objArrayOop obj, bool finalizable) {
288 if (finalizable) {
289 ZMarkBarrierOopClosure<true /* finalizable */> cl;
290 cl.do_klass(obj->klass());
291 } else {
292 ZMarkBarrierOopClosure<false /* finalizable */> cl;
293 cl.do_klass(obj->klass());
294 }
295
296 const uintptr_t addr = (uintptr_t)obj->base();
297 const size_t size = (size_t)obj->length() * oopSize;
298
299 follow_array(addr, size, finalizable);
300 }
301
302 void ZMark::follow_object(oop obj, bool finalizable) {
303 if (finalizable) {
304 ZMarkBarrierOopClosure<true /* finalizable */> cl;
305 obj->oop_iterate(&cl);
306 } else {
307 ZMarkBarrierOopClosure<false /* finalizable */> cl;
308 obj->oop_iterate(&cl);
309 }
310 }
311
312 bool ZMark::try_mark_object(ZMarkCache* cache, uintptr_t addr, bool finalizable) {
313 ZPage* const page = _page_table->get(addr);
314 if (page->is_allocating()) {
315 // Newly allocated objects are implicitly marked
316 return false;
317 }
318
319 // Try mark object
320 bool inc_live = false;
321 const bool success = page->mark_object(addr, finalizable, inc_live);
322 if (inc_live) {
323 // Update live objects/bytes for page. We use the aligned object
324 // size since that is the actual number of bytes used on the page
325 // and alignment paddings can never be reclaimed.
326 const size_t size = ZUtils::object_size(addr);
327 const size_t aligned_size = align_up(size, page->object_alignment());
328 cache->inc_live(page, aligned_size);
329 }
330
331 return success;
332 }
333
334 void ZMark::mark_and_follow(ZMarkCache* cache, ZMarkStackEntry entry) {
335 // Decode flags
336 const bool finalizable = entry.finalizable();
337 const bool partial_array = entry.partial_array();
338
339 if (partial_array) {
340 follow_partial_array(entry, finalizable);
341 return;
342 }
343
344 // Decode object address and follow flag
345 const uintptr_t addr = entry.object_address();
346
347 if (!try_mark_object(cache, addr, finalizable)) {
348 // Already marked
349 return;
350 }
351
352 if (is_array(addr)) {
353 // Decode follow flag
354 const bool follow = entry.follow();
355
356 // The follow flag is currently only relevant for object arrays
357 if (follow) {
358 follow_array_object(objArrayOop(ZOop::from_address(addr)), finalizable);
359 }
360 } else {
361 follow_object(ZOop::from_address(addr), finalizable);
362 }
363 }
364
365 template <typename T>
366 bool ZMark::drain(ZMarkStripe* stripe, ZMarkThreadLocalStacks* stacks, ZMarkCache* cache, T* timeout) {
367 ZMarkStackEntry entry;
368
369 // Drain stripe stacks
370 while (stacks->pop(&_allocator, &_stripes, stripe, entry)) {
371 mark_and_follow(cache, entry);
372
373 // Check timeout
374 if (timeout->has_expired()) {
375 // Timeout
376 return false;
377 }
378 }
379
380 // Success
381 return true;
382 }
383
384 template <typename T>
385 bool ZMark::drain_and_flush(ZMarkStripe* stripe, ZMarkThreadLocalStacks* stacks, ZMarkCache* cache, T* timeout) {
386 const bool success = drain(stripe, stacks, cache, timeout);
387
388 // Flush and publish worker stacks
389 stacks->flush(&_allocator, &_stripes);
390
391 return success;
392 }
393
394 bool ZMark::try_steal(ZMarkStripe* stripe, ZMarkThreadLocalStacks* stacks) {
395 // Try to steal a stack from another stripe
396 for (ZMarkStripe* victim_stripe = _stripes.stripe_next(stripe);
397 victim_stripe != stripe;
398 victim_stripe = _stripes.stripe_next(victim_stripe)) {
399 ZMarkStack* const stack = victim_stripe->steal_stack();
400 if (stack != NULL) {
401 // Success, install the stolen stack
402 stacks->install(&_stripes, stripe, stack);
403 return true;
404 }
405 }
406
407 // Nothing to steal
408 return false;
409 }
410
411 void ZMark::idle() const {
412 ZStatTimer timer(ZSubPhaseConcurrentMarkIdle);
413 os::naked_short_sleep(1);
414 }
415
416 class ZMarkFlushAndFreeStacksClosure : public ThreadClosure {
417 private:
418 ZMark* const _mark;
419 bool _flushed;
420
421 public:
422 ZMarkFlushAndFreeStacksClosure(ZMark* mark) :
423 _mark(mark),
424 _flushed(false) {}
425
426 void do_thread(Thread* thread) {
427 if (_mark->flush_and_free(thread)) {
428 _flushed = true;
429 }
430 }
431
432 bool flushed() const {
433 return _flushed;
434 }
435 };
436
437 bool ZMark::flush(bool at_safepoint) {
438 ZMarkFlushAndFreeStacksClosure cl(this);
439 if (at_safepoint) {
440 Threads::threads_do(&cl);
441 } else {
442 Handshake::execute(&cl);
443 }
444
445 // Returns true if more work is available
446 return cl.flushed() || !_stripes.is_empty();
447 }
448
449 bool ZMark::try_flush(volatile size_t* nflush) {
450 // Only flush if handshakes are enabled
451 if (!ThreadLocalHandshakes) {
452 return false;
453 }
454
455 Atomic::inc(nflush);
456
457 ZStatTimer timer(ZSubPhaseConcurrentMarkTryFlush);
458 return flush(false /* at_safepoint */);
459 }
460
461 bool ZMark::try_proactive_flush() {
462 // Only do proactive flushes from worker 0
463 if (ZThread::worker_id() != 0) {
464 return false;
465 }
466
467 if (Atomic::load(&_work_nproactiveflush) == ZMarkProactiveFlushMax ||
468 Atomic::load(&_work_nterminateflush) != 0) {
469 // Limit reached or we're trying to terminate
470 return false;
471 }
472
473 return try_flush(&_work_nproactiveflush);
474 }
475
476 bool ZMark::try_terminate() {
477 ZStatTimer timer(ZSubPhaseConcurrentMarkTryTerminate);
478
479 if (_terminate.enter_stage0()) {
480 // Last thread entered stage 0, flush
481 if (Atomic::load(&_work_terminateflush) &&
482 Atomic::load(&_work_nterminateflush) != ZMarkTerminateFlushMax) {
483 // Exit stage 0 to allow other threads to continue marking
484 _terminate.exit_stage0();
485
486 // Flush before termination
487 if (!try_flush(&_work_nterminateflush)) {
488 // No more work available, skip further flush attempts
489 Atomic::store(&_work_terminateflush, false);
490 }
491
492 // Don't terminate, regardless of whether we successfully
493 // flushed out more work or not. We've already exited
494 // termination stage 0, to allow other threads to continue
495 // marking, so this thread has to return false and also
496 // make another round of attempted marking.
497 return false;
498 }
499 }
500
501 for (;;) {
502 if (_terminate.enter_stage1()) {
503 // Last thread entered stage 1, terminate
504 return true;
505 }
506
507 // Idle to give the other threads
508 // a chance to enter termination.
509 idle();
510
511 if (!_terminate.try_exit_stage1()) {
512 // All workers in stage 1, terminate
513 return true;
514 }
515
516 if (_terminate.try_exit_stage0()) {
517 // More work available, don't terminate
518 return false;
519 }
520 }
521 }
522
523 class ZMarkNoTimeout : public StackObj {
524 public:
525 bool has_expired() {
526 return false;
527 }
528 };
529
530 void ZMark::work_without_timeout(ZMarkCache* cache, ZMarkStripe* stripe, ZMarkThreadLocalStacks* stacks) {
531 ZStatTimer timer(ZSubPhaseConcurrentMark);
532 ZMarkNoTimeout no_timeout;
533
534 for (;;) {
535 drain_and_flush(stripe, stacks, cache, &no_timeout);
536
537 if (try_steal(stripe, stacks)) {
538 // Stole work
539 continue;
540 }
541
542 if (try_proactive_flush()) {
543 // Work available
544 continue;
545 }
546
547 if (try_terminate()) {
548 // Terminate
549 break;
550 }
551 }
552 }
553
554 class ZMarkTimeout : public StackObj {
555 private:
556 const Ticks _start;
557 const uint64_t _timeout;
558 const uint64_t _check_interval;
559 uint64_t _check_at;
560 uint64_t _check_count;
561 bool _expired;
562
563 public:
564 ZMarkTimeout(uint64_t timeout_in_millis) :
565 _start(Ticks::now()),
566 _timeout(_start.value() + TimeHelper::millis_to_counter(timeout_in_millis)),
567 _check_interval(200),
568 _check_at(_check_interval),
569 _check_count(0),
570 _expired(false) {}
571
572 ~ZMarkTimeout() {
573 const Tickspan duration = Ticks::now() - _start;
574 log_debug(gc, marking)("Mark With Timeout (%s): %s, " UINT64_FORMAT " oops, %.3fms",
575 ZThread::name(), _expired ? "Expired" : "Completed",
576 _check_count, TimeHelper::counter_to_millis(duration.value()));
577 }
578
579 bool has_expired() {
580 if (++_check_count == _check_at) {
581 _check_at += _check_interval;
582 if ((uint64_t)Ticks::now().value() >= _timeout) {
583 // Timeout
584 _expired = true;
585 }
586 }
587
588 return _expired;
589 }
590 };
591
592 void ZMark::work_with_timeout(ZMarkCache* cache, ZMarkStripe* stripe, ZMarkThreadLocalStacks* stacks, uint64_t timeout_in_millis) {
593 ZStatTimer timer(ZSubPhaseMarkTryComplete);
594 ZMarkTimeout timeout(timeout_in_millis);
595
596 for (;;) {
597 if (!drain_and_flush(stripe, stacks, cache, &timeout)) {
598 // Timed out
599 break;
600 }
601
602 if (try_steal(stripe, stacks)) {
603 // Stole work
604 continue;
605 }
606
607 // Terminate
608 break;
609 }
610 }
611
612 void ZMark::work(uint64_t timeout_in_millis) {
613 ZMarkCache cache(_stripes.nstripes());
614 ZMarkStripe* const stripe = _stripes.stripe_for_worker(_nworkers, ZThread::worker_id());
615 ZMarkThreadLocalStacks* const stacks = ZThreadLocalData::stacks(Thread::current());
616
617 if (timeout_in_millis == 0) {
618 work_without_timeout(&cache, stripe, stacks);
619 } else {
620 work_with_timeout(&cache, stripe, stacks, timeout_in_millis);
621 }
622
623 // Make sure stacks have been flushed
624 assert(stacks->is_empty(&_stripes), "Should be empty");
625
626 // Free remaining stacks
627 stacks->free(&_allocator);
628 }
629
630 class ZMarkConcurrentRootsIteratorClosure : public ZRootsIteratorClosure {
631 public:
632 virtual void do_oop(oop* p) {
633 ZBarrier::mark_barrier_on_oop_field(p, false /* finalizable */);
634 }
635
636 virtual void do_oop(narrowOop* p) {
637 ShouldNotReachHere();
638 }
639 };
640
641
642 class ZMarkConcurrentRootsTask : public ZTask {
643 private:
644 SuspendibleThreadSetJoiner _sts_joiner;
645 ZConcurrentRootsIteratorClaimStrong _roots;
646 ZMarkConcurrentRootsIteratorClosure _cl;
647
648 public:
649 ZMarkConcurrentRootsTask(ZMark* mark) :
650 ZTask("ZMarkConcurrentRootsTask"),
651 _sts_joiner(),
652 _roots(),
653 _cl() {
654 ClassLoaderDataGraph_lock->lock();
655 }
656
657 ~ZMarkConcurrentRootsTask() {
658 ClassLoaderDataGraph_lock->unlock();
659 }
660
661 virtual void work() {
662 _roots.oops_do(&_cl);
663 }
664 };
665
666 class ZMarkTask : public ZTask {
667 private:
668 ZMark* const _mark;
669 const uint64_t _timeout_in_millis;
670
671 public:
672 ZMarkTask(ZMark* mark, uint64_t timeout_in_millis = 0) :
673 ZTask("ZMarkTask"),
674 _mark(mark),
675 _timeout_in_millis(timeout_in_millis) {
676 _mark->prepare_work();
677 }
678
679 ~ZMarkTask() {
680 _mark->finish_work();
681 }
682
683 virtual void work() {
684 _mark->work(_timeout_in_millis);
685 }
686 };
687
688 void ZMark::mark(bool initial) {
689 if (initial) {
690 ZMarkConcurrentRootsTask task(this);
691 _workers->run_concurrent(&task);
692 }
693
694 ZMarkTask task(this);
695 _workers->run_concurrent(&task);
696 }
697
698 bool ZMark::try_complete() {
699 _ntrycomplete++;
700
701 // Use nconcurrent number of worker threads to maintain the
702 // worker/stripe distribution used during concurrent mark.
703 ZMarkTask task(this, ZMarkCompleteTimeout);
704 _workers->run_concurrent(&task);
705
706 // Successful if all stripes are empty
707 return _stripes.is_empty();
708 }
709
710 bool ZMark::try_end() {
711 // Flush all mark stacks
712 if (!flush(true /* at_safepoint */)) {
713 // Mark completed
714 return true;
715 }
716
717 // Try complete marking by doing a limited
718 // amount of mark work in this phase.
719 return try_complete();
720 }
721
722 bool ZMark::end() {
723 // Try end marking
724 if (!try_end()) {
725 // Mark not completed
726 _ncontinue++;
727 return false;
728 }
729
730 // Verification
731 if (ZVerifyMarking) {
732 verify_all_stacks_empty();
733 }
734
735 // Update statistics
736 ZStatMark::set_at_mark_end(_nproactiveflush, _nterminateflush, _ntrycomplete, _ncontinue);
737
738 // Mark completed
739 return true;
740 }
741
742 void ZMark::flush_and_free() {
743 Thread* const thread = Thread::current();
744 flush_and_free(thread);
745 }
746
747 bool ZMark::flush_and_free(Thread* thread) {
748 ZMarkThreadLocalStacks* const stacks = ZThreadLocalData::stacks(thread);
749 const bool flushed = stacks->flush(&_allocator, &_stripes);
750 stacks->free(&_allocator);
751 return flushed;
752 }
753
754 class ZVerifyMarkStacksEmptyClosure : public ThreadClosure {
755 private:
756 const ZMarkStripeSet* const _stripes;
757
758 public:
759 ZVerifyMarkStacksEmptyClosure(const ZMarkStripeSet* stripes) :
760 _stripes(stripes) {}
761
762 void do_thread(Thread* thread) {
763 ZMarkThreadLocalStacks* const stacks = ZThreadLocalData::stacks(thread);
764 guarantee(stacks->is_empty(_stripes), "Should be empty");
765 }
766 };
767
768 void ZMark::verify_all_stacks_empty() const {
769 // Verify thread stacks
770 ZVerifyMarkStacksEmptyClosure cl(&_stripes);
771 Threads::threads_do(&cl);
772
773 // Verify stripe stacks
774 guarantee(_stripes.is_empty(), "Should be empty");
775 }
--- EOF ---