1 /* 2 * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 */ 23 24 #include "precompiled.hpp" 25 #include "classfile/classLoaderDataGraph.hpp" 26 #include "gc/z/zBarrier.inline.hpp" 27 #include "gc/z/zMark.inline.hpp" 28 #include "gc/z/zMarkCache.inline.hpp" 29 #include "gc/z/zMarkStack.inline.hpp" 30 #include "gc/z/zMarkTerminate.inline.hpp" 31 #include "gc/z/zOopClosures.inline.hpp" 32 #include "gc/z/zPage.hpp" 33 #include "gc/z/zPageTable.inline.hpp" 34 #include "gc/z/zRootsIterator.hpp" 35 #include "gc/z/zStat.hpp" 36 #include "gc/z/zTask.hpp" 37 #include "gc/z/zThread.inline.hpp" 38 #include "gc/z/zThreadLocalAllocBuffer.hpp" 39 #include "gc/z/zUtils.inline.hpp" 40 #include "gc/z/zWorkers.inline.hpp" 41 #include "logging/log.hpp" 42 #include "memory/iterator.inline.hpp" 43 #include "oops/objArrayOop.inline.hpp" 44 #include "oops/oop.inline.hpp" 45 #include "runtime/atomic.hpp" 46 #include "runtime/handshake.hpp" 47 #include "runtime/orderAccess.hpp" 48 #include "runtime/prefetch.inline.hpp" 49 #include "runtime/thread.hpp" 50 #include "utilities/align.hpp" 51 #include "utilities/globalDefinitions.hpp" 52 #include "utilities/ticks.hpp" 53 54 static const ZStatSubPhase ZSubPhaseConcurrentMark("Concurrent Mark"); 55 static const ZStatSubPhase ZSubPhaseConcurrentMarkTryFlush("Concurrent Mark Try Flush"); 56 static const ZStatSubPhase ZSubPhaseConcurrentMarkIdle("Concurrent Mark Idle"); 57 static const ZStatSubPhase ZSubPhaseConcurrentMarkTryTerminate("Concurrent Mark Try Terminate"); 58 static const ZStatSubPhase ZSubPhaseMarkTryComplete("Pause Mark Try Complete"); 59 60 ZMark::ZMark(ZWorkers* workers, ZPageTable* page_table) : 61 _workers(workers), 62 _page_table(page_table), 63 _allocator(), 64 _stripes(), 65 _terminate(), 66 _work_terminateflush(true), 67 _work_nproactiveflush(0), 68 _work_nterminateflush(0), 69 _nproactiveflush(0), 70 _nterminateflush(0), 71 _ntrycomplete(0), 72 _ncontinue(0), 73 _nworkers(0) {} 74 75 bool ZMark::is_initialized() const { 76 return _allocator.is_initialized(); 77 } 78 79 size_t ZMark::calculate_nstripes(uint nworkers) const { 80 // Calculate the number of stripes from the number of workers we use, 81 // where the number of stripes must be a power of two and we want to 82 // have at least one worker per stripe. 83 const size_t nstripes = ZUtils::round_down_power_of_2(nworkers); 84 return MIN2(nstripes, ZMarkStripesMax); 85 } 86 87 void ZMark::prepare_mark() { 88 // Increment global sequence number to invalidate 89 // marking information for all pages. 90 ZGlobalSeqNum++; 91 92 // Reset flush/continue counters 93 _nproactiveflush = 0; 94 _nterminateflush = 0; 95 _ntrycomplete = 0; 96 _ncontinue = 0; 97 98 // Set number of workers to use 99 _nworkers = _workers->nconcurrent(); 100 101 // Set number of mark stripes to use, based on number 102 // of workers we will use in the concurrent mark phase. 103 const size_t nstripes = calculate_nstripes(_nworkers); 104 _stripes.set_nstripes(nstripes); 105 106 // Update statistics 107 ZStatMark::set_at_mark_start(nstripes); 108 109 // Print worker/stripe distribution 110 LogTarget(Debug, gc, marking) log; 111 if (log.is_enabled()) { 112 log.print("Mark Worker/Stripe Distribution"); 113 for (uint worker_id = 0; worker_id < _nworkers; worker_id++) { 114 const ZMarkStripe* const stripe = _stripes.stripe_for_worker(_nworkers, worker_id); 115 const size_t stripe_id = _stripes.stripe_id(stripe); 116 log.print(" Worker %u(%u) -> Stripe " SIZE_FORMAT "(" SIZE_FORMAT ")", 117 worker_id, _nworkers, stripe_id, nstripes); 118 } 119 } 120 } 121 122 class ZMarkRootsIteratorClosure : public ZRootsIteratorClosure { 123 public: 124 ZMarkRootsIteratorClosure() { 125 ZThreadLocalAllocBuffer::reset_statistics(); 126 } 127 128 ~ZMarkRootsIteratorClosure() { 129 ZThreadLocalAllocBuffer::publish_statistics(); 130 } 131 132 virtual void do_thread(Thread* thread) { 133 // Update thread local address bad mask 134 ZThreadLocalData::set_address_bad_mask(thread, ZAddressBadMask); 135 136 // Mark invisible root 137 ZThreadLocalData::do_invisible_root(thread, ZBarrier::mark_barrier_on_invisible_root_oop_field); 138 139 // Retire TLAB 140 ZThreadLocalAllocBuffer::retire(thread); 141 } 142 143 virtual void do_oop(oop* p) { 144 ZBarrier::mark_barrier_on_root_oop_field(p); 145 } 146 147 virtual void do_oop(narrowOop* p) { 148 ShouldNotReachHere(); 149 } 150 }; 151 152 class ZMarkRootsTask : public ZTask { 153 private: 154 ZMark* const _mark; 155 ZRootsIterator _roots; 156 ZMarkRootsIteratorClosure _cl; 157 158 public: 159 ZMarkRootsTask(ZMark* mark) : 160 ZTask("ZMarkRootsTask"), 161 _mark(mark), 162 _roots(false /* visit_jvmti_weak_export */) {} 163 164 virtual void work() { 165 _roots.oops_do(&_cl); 166 167 // Flush and free worker stacks. Needed here since 168 // the set of workers executing during root scanning 169 // can be different from the set of workers executing 170 // during mark. 171 _mark->flush_and_free(); 172 } 173 }; 174 175 void ZMark::start() { 176 // Verification 177 if (ZVerifyMarking) { 178 verify_all_stacks_empty(); 179 } 180 181 // Prepare for concurrent mark 182 prepare_mark(); 183 184 // Mark roots 185 ZMarkRootsTask task(this); 186 _workers->run_parallel(&task); 187 } 188 189 void ZMark::prepare_work() { 190 assert(_nworkers == _workers->nconcurrent(), "Invalid number of workers"); 191 192 // Set number of active workers 193 _terminate.reset(_nworkers); 194 195 // Reset flush counters 196 _work_nproactiveflush = _work_nterminateflush = 0; 197 _work_terminateflush = true; 198 } 199 200 void ZMark::finish_work() { 201 // Accumulate proactive/terminate flush counters 202 _nproactiveflush += _work_nproactiveflush; 203 _nterminateflush += _work_nterminateflush; 204 } 205 206 bool ZMark::is_array(uintptr_t addr) const { 207 return ZOop::from_address(addr)->is_objArray(); 208 } 209 210 void ZMark::push_partial_array(uintptr_t addr, size_t size, bool finalizable) { 211 assert(is_aligned(addr, ZMarkPartialArrayMinSize), "Address misaligned"); 212 ZMarkThreadLocalStacks* const stacks = ZThreadLocalData::stacks(Thread::current()); 213 ZMarkStripe* const stripe = _stripes.stripe_for_addr(addr); 214 const uintptr_t offset = ZAddress::offset(addr) >> ZMarkPartialArrayMinSizeShift; 215 const uintptr_t length = size / oopSize; 216 const ZMarkStackEntry entry(offset, length, finalizable); 217 218 log_develop_trace(gc, marking)("Array push partial: " PTR_FORMAT " (" SIZE_FORMAT "), stripe: " SIZE_FORMAT, 219 addr, size, _stripes.stripe_id(stripe)); 220 221 stacks->push(&_allocator, &_stripes, stripe, entry, false /* publish */); 222 } 223 224 void ZMark::follow_small_array(uintptr_t addr, size_t size, bool finalizable) { 225 assert(size <= ZMarkPartialArrayMinSize, "Too large, should be split"); 226 const size_t length = size / oopSize; 227 228 log_develop_trace(gc, marking)("Array follow small: " PTR_FORMAT " (" SIZE_FORMAT ")", addr, size); 229 230 ZBarrier::mark_barrier_on_oop_array((oop*)addr, length, finalizable); 231 } 232 233 void ZMark::follow_large_array(uintptr_t addr, size_t size, bool finalizable) { 234 assert(size <= (size_t)arrayOopDesc::max_array_length(T_OBJECT) * oopSize, "Too large"); 235 assert(size > ZMarkPartialArrayMinSize, "Too small, should not be split"); 236 const uintptr_t start = addr; 237 const uintptr_t end = start + size; 238 239 // Calculate the aligned middle start/end/size, where the middle start 240 // should always be greater than the start (hence the +1 below) to make 241 // sure we always do some follow work, not just split the array into pieces. 242 const uintptr_t middle_start = align_up(start + 1, ZMarkPartialArrayMinSize); 243 const size_t middle_size = align_down(end - middle_start, ZMarkPartialArrayMinSize); 244 const uintptr_t middle_end = middle_start + middle_size; 245 246 log_develop_trace(gc, marking)("Array follow large: " PTR_FORMAT "-" PTR_FORMAT" (" SIZE_FORMAT "), " 247 "middle: " PTR_FORMAT "-" PTR_FORMAT " (" SIZE_FORMAT ")", 248 start, end, size, middle_start, middle_end, middle_size); 249 250 // Push unaligned trailing part 251 if (end > middle_end) { 252 const uintptr_t trailing_addr = middle_end; 253 const size_t trailing_size = end - middle_end; 254 push_partial_array(trailing_addr, trailing_size, finalizable); 255 } 256 257 // Push aligned middle part(s) 258 uintptr_t partial_addr = middle_end; 259 while (partial_addr > middle_start) { 260 const size_t parts = 2; 261 const size_t partial_size = align_up((partial_addr - middle_start) / parts, ZMarkPartialArrayMinSize); 262 partial_addr -= partial_size; 263 push_partial_array(partial_addr, partial_size, finalizable); 264 } 265 266 // Follow leading part 267 assert(start < middle_start, "Miscalculated middle start"); 268 const uintptr_t leading_addr = start; 269 const size_t leading_size = middle_start - start; 270 follow_small_array(leading_addr, leading_size, finalizable); 271 } 272 273 void ZMark::follow_array(uintptr_t addr, size_t size, bool finalizable) { 274 if (size <= ZMarkPartialArrayMinSize) { 275 follow_small_array(addr, size, finalizable); 276 } else { 277 follow_large_array(addr, size, finalizable); 278 } 279 } 280 281 void ZMark::follow_partial_array(ZMarkStackEntry entry, bool finalizable) { 282 const uintptr_t addr = ZAddress::good(entry.partial_array_offset() << ZMarkPartialArrayMinSizeShift); 283 const size_t size = entry.partial_array_length() * oopSize; 284 285 follow_array(addr, size, finalizable); 286 } 287 288 void ZMark::follow_array_object(objArrayOop obj, bool finalizable) { 289 if (finalizable) { 290 ZMarkBarrierOopClosure<true /* finalizable */> cl; 291 cl.do_klass(obj->klass()); 292 } else { 293 ZMarkBarrierOopClosure<false /* finalizable */> cl; 294 cl.do_klass(obj->klass()); 295 } 296 297 const uintptr_t addr = (uintptr_t)obj->base(); 298 const size_t size = (size_t)obj->length() * oopSize; 299 300 follow_array(addr, size, finalizable); 301 } 302 303 void ZMark::follow_object(oop obj, bool finalizable) { 304 if (finalizable) { 305 ZMarkBarrierOopClosure<true /* finalizable */> cl; 306 obj->oop_iterate(&cl); 307 } else { 308 ZMarkBarrierOopClosure<false /* finalizable */> cl; 309 obj->oop_iterate(&cl); 310 } 311 } 312 313 bool ZMark::try_mark_object(ZMarkCache* cache, uintptr_t addr, bool finalizable) { 314 ZPage* const page = _page_table->get(addr); 315 if (page->is_allocating()) { 316 // Newly allocated objects are implicitly marked 317 return false; 318 } 319 320 // Try mark object 321 bool inc_live = false; 322 const bool success = page->mark_object(addr, finalizable, inc_live); 323 if (inc_live) { 324 // Update live objects/bytes for page. We use the aligned object 325 // size since that is the actual number of bytes used on the page 326 // and alignment paddings can never be reclaimed. 327 const size_t size = ZUtils::object_size(addr); 328 const size_t aligned_size = align_up(size, page->object_alignment()); 329 cache->inc_live(page, aligned_size); 330 } 331 332 return success; 333 } 334 335 void ZMark::mark_and_follow(ZMarkCache* cache, ZMarkStackEntry entry) { 336 // Decode flags 337 const bool finalizable = entry.finalizable(); 338 const bool partial_array = entry.partial_array(); 339 340 if (partial_array) { 341 follow_partial_array(entry, finalizable); 342 return; 343 } 344 345 // Decode object address and follow flag 346 const uintptr_t addr = entry.object_address(); 347 348 if (!try_mark_object(cache, addr, finalizable)) { 349 // Already marked 350 return; 351 } 352 353 if (is_array(addr)) { 354 // Decode follow flag 355 const bool follow = entry.follow(); 356 357 // The follow flag is currently only relevant for object arrays 358 if (follow) { 359 follow_array_object(objArrayOop(ZOop::from_address(addr)), finalizable); 360 } 361 } else { 362 follow_object(ZOop::from_address(addr), finalizable); 363 } 364 } 365 366 template <typename T> 367 bool ZMark::drain(ZMarkStripe* stripe, ZMarkThreadLocalStacks* stacks, ZMarkCache* cache, T* timeout) { 368 ZMarkStackEntry entry; 369 370 // Drain stripe stacks 371 while (stacks->pop(&_allocator, &_stripes, stripe, entry)) { 372 mark_and_follow(cache, entry); 373 374 // Check timeout 375 if (timeout->has_expired()) { 376 // Timeout 377 return false; 378 } 379 } 380 381 // Success 382 return true; 383 } 384 385 template <typename T> 386 bool ZMark::drain_and_flush(ZMarkStripe* stripe, ZMarkThreadLocalStacks* stacks, ZMarkCache* cache, T* timeout) { 387 const bool success = drain(stripe, stacks, cache, timeout); 388 389 // Flush and publish worker stacks 390 stacks->flush(&_allocator, &_stripes); 391 392 return success; 393 } 394 395 bool ZMark::try_steal(ZMarkStripe* stripe, ZMarkThreadLocalStacks* stacks) { 396 // Try to steal a stack from another stripe 397 for (ZMarkStripe* victim_stripe = _stripes.stripe_next(stripe); 398 victim_stripe != stripe; 399 victim_stripe = _stripes.stripe_next(victim_stripe)) { 400 ZMarkStack* const stack = victim_stripe->steal_stack(); 401 if (stack != NULL) { 402 // Success, install the stolen stack 403 stacks->install(&_stripes, stripe, stack); 404 return true; 405 } 406 } 407 408 // Nothing to steal 409 return false; 410 } 411 412 void ZMark::idle() const { 413 ZStatTimer timer(ZSubPhaseConcurrentMarkIdle); 414 os::naked_short_sleep(1); 415 } 416 417 class ZMarkFlushAndFreeStacksClosure : public ThreadClosure { 418 private: 419 ZMark* const _mark; 420 bool _flushed; 421 422 public: 423 ZMarkFlushAndFreeStacksClosure(ZMark* mark) : 424 _mark(mark), 425 _flushed(false) {} 426 427 void do_thread(Thread* thread) { 428 if (_mark->flush_and_free(thread)) { 429 _flushed = true; 430 } 431 } 432 433 bool flushed() const { 434 return _flushed; 435 } 436 }; 437 438 class ZMarkFlushAndFreeStacksHandshake : public HandshakeOperation { 439 ZMarkFlushAndFreeStacksClosure* _cl; 440 public: 441 ZMarkFlushAndFreeStacksHandshake(ZMarkFlushAndFreeStacksClosure* cl) : _cl(cl) {} 442 const char* name() { return "ZMarkFlushAndFreeStacks"; } 443 void do_thread(JavaThread* jt) { 444 _cl->do_thread(jt); 445 } 446 }; 447 448 bool ZMark::flush(bool at_safepoint) { 449 ZMarkFlushAndFreeStacksClosure cl(this); 450 if (at_safepoint) { 451 Threads::threads_do(&cl); 452 } else { 453 ZMarkFlushAndFreeStacksHandshake zmf_hs(&cl); 454 Handshake::execute(&zmf_hs); 455 } 456 457 // Returns true if more work is available 458 return cl.flushed() || !_stripes.is_empty(); 459 } 460 461 bool ZMark::try_flush(volatile size_t* nflush) { 462 // Only flush if handshakes are enabled 463 if (!ThreadLocalHandshakes) { 464 return false; 465 } 466 467 Atomic::inc(nflush); 468 469 ZStatTimer timer(ZSubPhaseConcurrentMarkTryFlush); 470 return flush(false /* at_safepoint */); 471 } 472 473 bool ZMark::try_proactive_flush() { 474 // Only do proactive flushes from worker 0 475 if (ZThread::worker_id() != 0) { 476 return false; 477 } 478 479 if (Atomic::load(&_work_nproactiveflush) == ZMarkProactiveFlushMax || 480 Atomic::load(&_work_nterminateflush) != 0) { 481 // Limit reached or we're trying to terminate 482 return false; 483 } 484 485 return try_flush(&_work_nproactiveflush); 486 } 487 488 bool ZMark::try_terminate() { 489 ZStatTimer timer(ZSubPhaseConcurrentMarkTryTerminate); 490 491 if (_terminate.enter_stage0()) { 492 // Last thread entered stage 0, flush 493 if (Atomic::load(&_work_terminateflush) && 494 Atomic::load(&_work_nterminateflush) != ZMarkTerminateFlushMax) { 495 // Exit stage 0 to allow other threads to continue marking 496 _terminate.exit_stage0(); 497 498 // Flush before termination 499 if (!try_flush(&_work_nterminateflush)) { 500 // No more work available, skip further flush attempts 501 Atomic::store(false, &_work_terminateflush); 502 } 503 504 // Don't terminate, regardless of whether we successfully 505 // flushed out more work or not. We've already exited 506 // termination stage 0, to allow other threads to continue 507 // marking, so this thread has to return false and also 508 // make another round of attempted marking. 509 return false; 510 } 511 } 512 513 for (;;) { 514 if (_terminate.enter_stage1()) { 515 // Last thread entered stage 1, terminate 516 return true; 517 } 518 519 // Idle to give the other threads 520 // a chance to enter termination. 521 idle(); 522 523 if (!_terminate.try_exit_stage1()) { 524 // All workers in stage 1, terminate 525 return true; 526 } 527 528 if (_terminate.try_exit_stage0()) { 529 // More work available, don't terminate 530 return false; 531 } 532 } 533 } 534 535 class ZMarkNoTimeout : public StackObj { 536 public: 537 bool has_expired() { 538 return false; 539 } 540 }; 541 542 void ZMark::work_without_timeout(ZMarkCache* cache, ZMarkStripe* stripe, ZMarkThreadLocalStacks* stacks) { 543 ZStatTimer timer(ZSubPhaseConcurrentMark); 544 ZMarkNoTimeout no_timeout; 545 546 for (;;) { 547 drain_and_flush(stripe, stacks, cache, &no_timeout); 548 549 if (try_steal(stripe, stacks)) { 550 // Stole work 551 continue; 552 } 553 554 if (try_proactive_flush()) { 555 // Work available 556 continue; 557 } 558 559 if (try_terminate()) { 560 // Terminate 561 break; 562 } 563 } 564 } 565 566 class ZMarkTimeout : public StackObj { 567 private: 568 const Ticks _start; 569 const uint64_t _timeout; 570 const uint64_t _check_interval; 571 uint64_t _check_at; 572 uint64_t _check_count; 573 bool _expired; 574 575 public: 576 ZMarkTimeout(uint64_t timeout_in_millis) : 577 _start(Ticks::now()), 578 _timeout(_start.value() + TimeHelper::millis_to_counter(timeout_in_millis)), 579 _check_interval(200), 580 _check_at(_check_interval), 581 _check_count(0), 582 _expired(false) {} 583 584 ~ZMarkTimeout() { 585 const Tickspan duration = Ticks::now() - _start; 586 log_debug(gc, marking)("Mark With Timeout (%s): %s, " UINT64_FORMAT " oops, %.3fms", 587 ZThread::name(), _expired ? "Expired" : "Completed", 588 _check_count, TimeHelper::counter_to_millis(duration.value())); 589 } 590 591 bool has_expired() { 592 if (++_check_count == _check_at) { 593 _check_at += _check_interval; 594 if ((uint64_t)Ticks::now().value() >= _timeout) { 595 // Timeout 596 _expired = true; 597 } 598 } 599 600 return _expired; 601 } 602 }; 603 604 void ZMark::work_with_timeout(ZMarkCache* cache, ZMarkStripe* stripe, ZMarkThreadLocalStacks* stacks, uint64_t timeout_in_millis) { 605 ZStatTimer timer(ZSubPhaseMarkTryComplete); 606 ZMarkTimeout timeout(timeout_in_millis); 607 608 for (;;) { 609 if (!drain_and_flush(stripe, stacks, cache, &timeout)) { 610 // Timed out 611 break; 612 } 613 614 if (try_steal(stripe, stacks)) { 615 // Stole work 616 continue; 617 } 618 619 // Terminate 620 break; 621 } 622 } 623 624 void ZMark::work(uint64_t timeout_in_millis) { 625 ZMarkCache cache(_stripes.nstripes()); 626 ZMarkStripe* const stripe = _stripes.stripe_for_worker(_nworkers, ZThread::worker_id()); 627 ZMarkThreadLocalStacks* const stacks = ZThreadLocalData::stacks(Thread::current()); 628 629 if (timeout_in_millis == 0) { 630 work_without_timeout(&cache, stripe, stacks); 631 } else { 632 work_with_timeout(&cache, stripe, stacks, timeout_in_millis); 633 } 634 635 // Make sure stacks have been flushed 636 assert(stacks->is_empty(&_stripes), "Should be empty"); 637 638 // Free remaining stacks 639 stacks->free(&_allocator); 640 } 641 642 class ZMarkConcurrentRootsIteratorClosure : public ZRootsIteratorClosure { 643 public: 644 virtual void do_oop(oop* p) { 645 ZBarrier::mark_barrier_on_oop_field(p, false /* finalizable */); 646 } 647 648 virtual void do_oop(narrowOop* p) { 649 ShouldNotReachHere(); 650 } 651 }; 652 653 654 class ZMarkConcurrentRootsTask : public ZTask { 655 private: 656 SuspendibleThreadSetJoiner _sts_joiner; 657 ZConcurrentRootsIteratorClaimStrong _roots; 658 ZMarkConcurrentRootsIteratorClosure _cl; 659 660 public: 661 ZMarkConcurrentRootsTask(ZMark* mark) : 662 ZTask("ZMarkConcurrentRootsTask"), 663 _sts_joiner(), 664 _roots(), 665 _cl() { 666 ClassLoaderDataGraph_lock->lock(); 667 } 668 669 ~ZMarkConcurrentRootsTask() { 670 ClassLoaderDataGraph_lock->unlock(); 671 } 672 673 virtual void work() { 674 _roots.oops_do(&_cl); 675 } 676 }; 677 678 class ZMarkTask : public ZTask { 679 private: 680 ZMark* const _mark; 681 const uint64_t _timeout_in_millis; 682 683 public: 684 ZMarkTask(ZMark* mark, uint64_t timeout_in_millis = 0) : 685 ZTask("ZMarkTask"), 686 _mark(mark), 687 _timeout_in_millis(timeout_in_millis) { 688 _mark->prepare_work(); 689 } 690 691 ~ZMarkTask() { 692 _mark->finish_work(); 693 } 694 695 virtual void work() { 696 _mark->work(_timeout_in_millis); 697 } 698 }; 699 700 void ZMark::mark(bool initial) { 701 if (initial) { 702 ZMarkConcurrentRootsTask task(this); 703 _workers->run_concurrent(&task); 704 } 705 706 ZMarkTask task(this); 707 _workers->run_concurrent(&task); 708 } 709 710 bool ZMark::try_complete() { 711 _ntrycomplete++; 712 713 // Use nconcurrent number of worker threads to maintain the 714 // worker/stripe distribution used during concurrent mark. 715 ZMarkTask task(this, ZMarkCompleteTimeout); 716 _workers->run_concurrent(&task); 717 718 // Successful if all stripes are empty 719 return _stripes.is_empty(); 720 } 721 722 bool ZMark::try_end() { 723 // Flush all mark stacks 724 if (!flush(true /* at_safepoint */)) { 725 // Mark completed 726 return true; 727 } 728 729 // Try complete marking by doing a limited 730 // amount of mark work in this phase. 731 return try_complete(); 732 } 733 734 bool ZMark::end() { 735 // Try end marking 736 if (!try_end()) { 737 // Mark not completed 738 _ncontinue++; 739 return false; 740 } 741 742 // Verification 743 if (ZVerifyMarking) { 744 verify_all_stacks_empty(); 745 } 746 747 // Update statistics 748 ZStatMark::set_at_mark_end(_nproactiveflush, _nterminateflush, _ntrycomplete, _ncontinue); 749 750 // Mark completed 751 return true; 752 } 753 754 void ZMark::flush_and_free() { 755 Thread* const thread = Thread::current(); 756 flush_and_free(thread); 757 } 758 759 bool ZMark::flush_and_free(Thread* thread) { 760 ZMarkThreadLocalStacks* const stacks = ZThreadLocalData::stacks(thread); 761 const bool flushed = stacks->flush(&_allocator, &_stripes); 762 stacks->free(&_allocator); 763 return flushed; 764 } 765 766 class ZVerifyMarkStacksEmptyClosure : public ThreadClosure { 767 private: 768 const ZMarkStripeSet* const _stripes; 769 770 public: 771 ZVerifyMarkStacksEmptyClosure(const ZMarkStripeSet* stripes) : 772 _stripes(stripes) {} 773 774 void do_thread(Thread* thread) { 775 ZMarkThreadLocalStacks* const stacks = ZThreadLocalData::stacks(thread); 776 guarantee(stacks->is_empty(_stripes), "Should be empty"); 777 } 778 }; 779 780 void ZMark::verify_all_stacks_empty() const { 781 // Verify thread stacks 782 ZVerifyMarkStacksEmptyClosure cl(&_stripes); 783 Threads::threads_do(&cl); 784 785 // Verify stripe stacks 786 guarantee(_stripes.is_empty(), "Should be empty"); 787 }