1 /* 2 * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 */ 23 24 #include "precompiled.hpp" 25 #include "classfile/classLoaderDataGraph.hpp" 26 #include "gc/z/zBarrier.inline.hpp" 27 #include "gc/z/zMark.inline.hpp" 28 #include "gc/z/zMarkCache.inline.hpp" 29 #include "gc/z/zMarkStack.inline.hpp" 30 #include "gc/z/zMarkTerminate.inline.hpp" 31 #include "gc/z/zOopClosures.inline.hpp" 32 #include "gc/z/zPage.hpp" 33 #include "gc/z/zPageTable.inline.hpp" 34 #include "gc/z/zRootsIterator.hpp" 35 #include "gc/z/zStat.hpp" 36 #include "gc/z/zTask.hpp" 37 #include "gc/z/zThread.inline.hpp" 38 #include "gc/z/zThreadLocalAllocBuffer.hpp" 39 #include "gc/z/zUtils.inline.hpp" 40 #include "gc/z/zWorkers.inline.hpp" 41 #include "logging/log.hpp" 42 #include "memory/iterator.inline.hpp" 43 #include "oops/objArrayOop.inline.hpp" 44 #include "oops/oop.inline.hpp" 45 #include "runtime/atomic.hpp" 46 #include "runtime/handshake.hpp" 47 #include "runtime/prefetch.inline.hpp" 48 #include "runtime/thread.hpp" 49 #include "utilities/align.hpp" 50 #include "utilities/globalDefinitions.hpp" 51 #include "utilities/powerOfTwo.hpp" 52 #include "utilities/ticks.hpp" 53 54 static const ZStatSubPhase ZSubPhaseConcurrentMark("Concurrent Mark"); 55 static const ZStatSubPhase ZSubPhaseConcurrentMarkTryFlush("Concurrent Mark Try Flush"); 56 static const ZStatSubPhase ZSubPhaseConcurrentMarkIdle("Concurrent Mark Idle"); 57 static const ZStatSubPhase ZSubPhaseConcurrentMarkTryTerminate("Concurrent Mark Try Terminate"); 58 static const ZStatSubPhase ZSubPhaseMarkTryComplete("Pause Mark Try Complete"); 59 60 ZMark::ZMark(ZWorkers* workers, ZPageTable* page_table) : 61 _workers(workers), 62 _page_table(page_table), 63 _allocator(), 64 _stripes(), 65 _terminate(), 66 _work_terminateflush(true), 67 _work_nproactiveflush(0), 68 _work_nterminateflush(0), 69 _nproactiveflush(0), 70 _nterminateflush(0), 71 _ntrycomplete(0), 72 _ncontinue(0), 73 _nworkers(0) {} 74 75 bool ZMark::is_initialized() const { 76 return _allocator.is_initialized(); 77 } 78 79 size_t ZMark::calculate_nstripes(uint nworkers) const { 80 // Calculate the number of stripes from the number of workers we use, 81 // where the number of stripes must be a power of two and we want to 82 // have at least one worker per stripe. 83 const size_t nstripes = round_down_power_of_2(nworkers); 84 return MIN2(nstripes, ZMarkStripesMax); 85 } 86 87 void ZMark::prepare_mark() { 88 // Increment global sequence number to invalidate 89 // marking information for all pages. 90 ZGlobalSeqNum++; 91 92 // Reset flush/continue counters 93 _nproactiveflush = 0; 94 _nterminateflush = 0; 95 _ntrycomplete = 0; 96 _ncontinue = 0; 97 98 // Set number of workers to use 99 _nworkers = _workers->nconcurrent(); 100 101 // Set number of mark stripes to use, based on number 102 // of workers we will use in the concurrent mark phase. 103 const size_t nstripes = calculate_nstripes(_nworkers); 104 _stripes.set_nstripes(nstripes); 105 106 // Update statistics 107 ZStatMark::set_at_mark_start(nstripes); 108 109 // Print worker/stripe distribution 110 LogTarget(Debug, gc, marking) log; 111 if (log.is_enabled()) { 112 log.print("Mark Worker/Stripe Distribution"); 113 for (uint worker_id = 0; worker_id < _nworkers; worker_id++) { 114 const ZMarkStripe* const stripe = _stripes.stripe_for_worker(_nworkers, worker_id); 115 const size_t stripe_id = _stripes.stripe_id(stripe); 116 log.print(" Worker %u(%u) -> Stripe " SIZE_FORMAT "(" SIZE_FORMAT ")", 117 worker_id, _nworkers, stripe_id, nstripes); 118 } 119 } 120 } 121 122 class ZMarkRootsIteratorClosure : public ZRootsIteratorClosure { 123 public: 124 ZMarkRootsIteratorClosure() { 125 ZThreadLocalAllocBuffer::reset_statistics(); 126 } 127 128 ~ZMarkRootsIteratorClosure() { 129 ZThreadLocalAllocBuffer::publish_statistics(); 130 } 131 132 virtual void do_thread(Thread* thread) { 133 // Update thread local address bad mask 134 ZThreadLocalData::set_address_bad_mask(thread, ZAddressBadMask); 135 136 // Mark invisible root 137 ZThreadLocalData::do_invisible_root(thread, ZBarrier::mark_barrier_on_invisible_root_oop_field); 138 139 // Retire TLAB 140 ZThreadLocalAllocBuffer::retire(thread); 141 } 142 143 virtual void do_oop(oop* p) { 144 ZBarrier::mark_barrier_on_root_oop_field(p); 145 } 146 147 virtual void do_oop(narrowOop* p) { 148 ShouldNotReachHere(); 149 } 150 }; 151 152 class ZMarkRootsTask : public ZTask { 153 private: 154 ZMark* const _mark; 155 ZRootsIterator _roots; 156 ZMarkRootsIteratorClosure _cl; 157 158 public: 159 ZMarkRootsTask(ZMark* mark) : 160 ZTask("ZMarkRootsTask"), 161 _mark(mark), 162 _roots(false /* visit_jvmti_weak_export */, true /* disarm_nmethods */) {} 163 164 virtual void work() { 165 _roots.oops_do(&_cl); 166 167 // Flush and free worker stacks. Needed here since 168 // the set of workers executing during root scanning 169 // can be different from the set of workers executing 170 // during mark. 171 _mark->flush_and_free(); 172 } 173 }; 174 175 void ZMark::start() { 176 // Verification 177 if (ZVerifyMarking) { 178 verify_all_stacks_empty(); 179 } 180 181 // Prepare for concurrent mark 182 prepare_mark(); 183 184 // Mark roots 185 ZMarkRootsTask task(this); 186 _workers->run_parallel(&task); 187 } 188 189 void ZMark::prepare_work() { 190 assert(_nworkers == _workers->nconcurrent(), "Invalid number of workers"); 191 192 // Set number of active workers 193 _terminate.reset(_nworkers); 194 195 // Reset flush counters 196 _work_nproactiveflush = _work_nterminateflush = 0; 197 _work_terminateflush = true; 198 } 199 200 void ZMark::finish_work() { 201 // Accumulate proactive/terminate flush counters 202 _nproactiveflush += _work_nproactiveflush; 203 _nterminateflush += _work_nterminateflush; 204 } 205 206 bool ZMark::is_array(uintptr_t addr) const { 207 return ZOop::from_address(addr)->is_objArray(); 208 } 209 210 void ZMark::push_partial_array(uintptr_t addr, size_t size, bool finalizable) { 211 assert(is_aligned(addr, ZMarkPartialArrayMinSize), "Address misaligned"); 212 ZMarkThreadLocalStacks* const stacks = ZThreadLocalData::stacks(Thread::current()); 213 ZMarkStripe* const stripe = _stripes.stripe_for_addr(addr); 214 const uintptr_t offset = ZAddress::offset(addr) >> ZMarkPartialArrayMinSizeShift; 215 const uintptr_t length = size / oopSize; 216 const ZMarkStackEntry entry(offset, length, finalizable); 217 218 log_develop_trace(gc, marking)("Array push partial: " PTR_FORMAT " (" SIZE_FORMAT "), stripe: " SIZE_FORMAT, 219 addr, size, _stripes.stripe_id(stripe)); 220 221 stacks->push(&_allocator, &_stripes, stripe, entry, false /* publish */); 222 } 223 224 void ZMark::follow_small_array(uintptr_t addr, size_t size, bool finalizable) { 225 assert(size <= ZMarkPartialArrayMinSize, "Too large, should be split"); 226 const size_t length = size / oopSize; 227 228 log_develop_trace(gc, marking)("Array follow small: " PTR_FORMAT " (" SIZE_FORMAT ")", addr, size); 229 230 ZBarrier::mark_barrier_on_oop_array((oop*)addr, length, finalizable); 231 } 232 233 void ZMark::follow_large_array(uintptr_t addr, size_t size, bool finalizable) { 234 assert(size <= (size_t)arrayOopDesc::max_array_length(T_OBJECT) * oopSize, "Too large"); 235 assert(size > ZMarkPartialArrayMinSize, "Too small, should not be split"); 236 const uintptr_t start = addr; 237 const uintptr_t end = start + size; 238 239 // Calculate the aligned middle start/end/size, where the middle start 240 // should always be greater than the start (hence the +1 below) to make 241 // sure we always do some follow work, not just split the array into pieces. 242 const uintptr_t middle_start = align_up(start + 1, ZMarkPartialArrayMinSize); 243 const size_t middle_size = align_down(end - middle_start, ZMarkPartialArrayMinSize); 244 const uintptr_t middle_end = middle_start + middle_size; 245 246 log_develop_trace(gc, marking)("Array follow large: " PTR_FORMAT "-" PTR_FORMAT" (" SIZE_FORMAT "), " 247 "middle: " PTR_FORMAT "-" PTR_FORMAT " (" SIZE_FORMAT ")", 248 start, end, size, middle_start, middle_end, middle_size); 249 250 // Push unaligned trailing part 251 if (end > middle_end) { 252 const uintptr_t trailing_addr = middle_end; 253 const size_t trailing_size = end - middle_end; 254 push_partial_array(trailing_addr, trailing_size, finalizable); 255 } 256 257 // Push aligned middle part(s) 258 uintptr_t partial_addr = middle_end; 259 while (partial_addr > middle_start) { 260 const size_t parts = 2; 261 const size_t partial_size = align_up((partial_addr - middle_start) / parts, ZMarkPartialArrayMinSize); 262 partial_addr -= partial_size; 263 push_partial_array(partial_addr, partial_size, finalizable); 264 } 265 266 // Follow leading part 267 assert(start < middle_start, "Miscalculated middle start"); 268 const uintptr_t leading_addr = start; 269 const size_t leading_size = middle_start - start; 270 follow_small_array(leading_addr, leading_size, finalizable); 271 } 272 273 void ZMark::follow_array(uintptr_t addr, size_t size, bool finalizable) { 274 if (size <= ZMarkPartialArrayMinSize) { 275 follow_small_array(addr, size, finalizable); 276 } else { 277 follow_large_array(addr, size, finalizable); 278 } 279 } 280 281 void ZMark::follow_partial_array(ZMarkStackEntry entry, bool finalizable) { 282 const uintptr_t addr = ZAddress::good(entry.partial_array_offset() << ZMarkPartialArrayMinSizeShift); 283 const size_t size = entry.partial_array_length() * oopSize; 284 285 follow_array(addr, size, finalizable); 286 } 287 288 void ZMark::follow_array_object(objArrayOop obj, bool finalizable) { 289 if (finalizable) { 290 ZMarkBarrierOopClosure<true /* finalizable */> cl; 291 cl.do_klass(obj->klass()); 292 } else { 293 ZMarkBarrierOopClosure<false /* finalizable */> cl; 294 cl.do_klass(obj->klass()); 295 } 296 297 const uintptr_t addr = (uintptr_t)obj->base(); 298 const size_t size = (size_t)obj->length() * oopSize; 299 300 follow_array(addr, size, finalizable); 301 } 302 303 void ZMark::follow_object(oop obj, bool finalizable) { 304 if (finalizable) { 305 ZMarkBarrierOopClosure<true /* finalizable */> cl; 306 obj->oop_iterate(&cl); 307 } else { 308 ZMarkBarrierOopClosure<false /* finalizable */> cl; 309 obj->oop_iterate(&cl); 310 } 311 } 312 313 bool ZMark::try_mark_object(ZMarkCache* cache, uintptr_t addr, bool finalizable) { 314 ZPage* const page = _page_table->get(addr); 315 if (page->is_allocating()) { 316 // Newly allocated objects are implicitly marked 317 return false; 318 } 319 320 // Try mark object 321 bool inc_live = false; 322 const bool success = page->mark_object(addr, finalizable, inc_live); 323 if (inc_live) { 324 // Update live objects/bytes for page. We use the aligned object 325 // size since that is the actual number of bytes used on the page 326 // and alignment paddings can never be reclaimed. 327 const size_t size = ZUtils::object_size(addr); 328 const size_t aligned_size = align_up(size, page->object_alignment()); 329 cache->inc_live(page, aligned_size); 330 } 331 332 return success; 333 } 334 335 void ZMark::mark_and_follow(ZMarkCache* cache, ZMarkStackEntry entry) { 336 // Decode flags 337 const bool finalizable = entry.finalizable(); 338 const bool partial_array = entry.partial_array(); 339 340 if (partial_array) { 341 follow_partial_array(entry, finalizable); 342 return; 343 } 344 345 // Decode object address and follow flag 346 const uintptr_t addr = entry.object_address(); 347 348 if (!try_mark_object(cache, addr, finalizable)) { 349 // Already marked 350 return; 351 } 352 353 if (is_array(addr)) { 354 // Decode follow flag 355 const bool follow = entry.follow(); 356 357 // The follow flag is currently only relevant for object arrays 358 if (follow) { 359 follow_array_object(objArrayOop(ZOop::from_address(addr)), finalizable); 360 } 361 } else { 362 follow_object(ZOop::from_address(addr), finalizable); 363 } 364 } 365 366 template <typename T> 367 bool ZMark::drain(ZMarkStripe* stripe, ZMarkThreadLocalStacks* stacks, ZMarkCache* cache, T* timeout) { 368 ZMarkStackEntry entry; 369 370 // Drain stripe stacks 371 while (stacks->pop(&_allocator, &_stripes, stripe, entry)) { 372 mark_and_follow(cache, entry); 373 374 // Check timeout 375 if (timeout->has_expired()) { 376 // Timeout 377 return false; 378 } 379 } 380 381 // Success 382 return true; 383 } 384 385 template <typename T> 386 bool ZMark::drain_and_flush(ZMarkStripe* stripe, ZMarkThreadLocalStacks* stacks, ZMarkCache* cache, T* timeout) { 387 const bool success = drain(stripe, stacks, cache, timeout); 388 389 // Flush and publish worker stacks 390 stacks->flush(&_allocator, &_stripes); 391 392 return success; 393 } 394 395 bool ZMark::try_steal(ZMarkStripe* stripe, ZMarkThreadLocalStacks* stacks) { 396 // Try to steal a stack from another stripe 397 for (ZMarkStripe* victim_stripe = _stripes.stripe_next(stripe); 398 victim_stripe != stripe; 399 victim_stripe = _stripes.stripe_next(victim_stripe)) { 400 ZMarkStack* const stack = victim_stripe->steal_stack(); 401 if (stack != NULL) { 402 // Success, install the stolen stack 403 stacks->install(&_stripes, stripe, stack); 404 return true; 405 } 406 } 407 408 // Nothing to steal 409 return false; 410 } 411 412 void ZMark::idle() const { 413 ZStatTimer timer(ZSubPhaseConcurrentMarkIdle); 414 os::naked_short_sleep(1); 415 } 416 417 class ZMarkFlushAndFreeStacksClosure : public HandshakeClosure { 418 private: 419 ZMark* const _mark; 420 bool _flushed; 421 422 public: 423 ZMarkFlushAndFreeStacksClosure(ZMark* mark) : 424 HandshakeClosure("ZMarkFlushAndFreeStacks"), 425 _mark(mark), 426 _flushed(false) {} 427 428 void do_thread(Thread* thread) { 429 if (_mark->flush_and_free(thread)) { 430 _flushed = true; 431 } 432 } 433 434 bool flushed() const { 435 return _flushed; 436 } 437 }; 438 439 bool ZMark::flush(bool at_safepoint) { 440 ZMarkFlushAndFreeStacksClosure cl(this); 441 if (at_safepoint) { 442 Threads::threads_do(&cl); 443 } else { 444 Handshake::execute(&cl); 445 } 446 447 // Returns true if more work is available 448 return cl.flushed() || !_stripes.is_empty(); 449 } 450 451 bool ZMark::try_flush(volatile size_t* nflush) { 452 // Only flush if handshakes are enabled 453 if (!ThreadLocalHandshakes) { 454 return false; 455 } 456 457 Atomic::inc(nflush); 458 459 ZStatTimer timer(ZSubPhaseConcurrentMarkTryFlush); 460 return flush(false /* at_safepoint */); 461 } 462 463 bool ZMark::try_proactive_flush() { 464 // Only do proactive flushes from worker 0 465 if (ZThread::worker_id() != 0) { 466 return false; 467 } 468 469 if (Atomic::load(&_work_nproactiveflush) == ZMarkProactiveFlushMax || 470 Atomic::load(&_work_nterminateflush) != 0) { 471 // Limit reached or we're trying to terminate 472 return false; 473 } 474 475 return try_flush(&_work_nproactiveflush); 476 } 477 478 bool ZMark::try_terminate() { 479 ZStatTimer timer(ZSubPhaseConcurrentMarkTryTerminate); 480 481 if (_terminate.enter_stage0()) { 482 // Last thread entered stage 0, flush 483 if (Atomic::load(&_work_terminateflush) && 484 Atomic::load(&_work_nterminateflush) != ZMarkTerminateFlushMax) { 485 // Exit stage 0 to allow other threads to continue marking 486 _terminate.exit_stage0(); 487 488 // Flush before termination 489 if (!try_flush(&_work_nterminateflush)) { 490 // No more work available, skip further flush attempts 491 Atomic::store(&_work_terminateflush, false); 492 } 493 494 // Don't terminate, regardless of whether we successfully 495 // flushed out more work or not. We've already exited 496 // termination stage 0, to allow other threads to continue 497 // marking, so this thread has to return false and also 498 // make another round of attempted marking. 499 return false; 500 } 501 } 502 503 for (;;) { 504 if (_terminate.enter_stage1()) { 505 // Last thread entered stage 1, terminate 506 return true; 507 } 508 509 // Idle to give the other threads 510 // a chance to enter termination. 511 idle(); 512 513 if (!_terminate.try_exit_stage1()) { 514 // All workers in stage 1, terminate 515 return true; 516 } 517 518 if (_terminate.try_exit_stage0()) { 519 // More work available, don't terminate 520 return false; 521 } 522 } 523 } 524 525 class ZMarkNoTimeout : public StackObj { 526 public: 527 bool has_expired() { 528 return false; 529 } 530 }; 531 532 void ZMark::work_without_timeout(ZMarkCache* cache, ZMarkStripe* stripe, ZMarkThreadLocalStacks* stacks) { 533 ZStatTimer timer(ZSubPhaseConcurrentMark); 534 ZMarkNoTimeout no_timeout; 535 536 for (;;) { 537 drain_and_flush(stripe, stacks, cache, &no_timeout); 538 539 if (try_steal(stripe, stacks)) { 540 // Stole work 541 continue; 542 } 543 544 if (try_proactive_flush()) { 545 // Work available 546 continue; 547 } 548 549 if (try_terminate()) { 550 // Terminate 551 break; 552 } 553 } 554 } 555 556 class ZMarkTimeout : public StackObj { 557 private: 558 const Ticks _start; 559 const uint64_t _timeout; 560 const uint64_t _check_interval; 561 uint64_t _check_at; 562 uint64_t _check_count; 563 bool _expired; 564 565 public: 566 ZMarkTimeout(uint64_t timeout_in_millis) : 567 _start(Ticks::now()), 568 _timeout(_start.value() + TimeHelper::millis_to_counter(timeout_in_millis)), 569 _check_interval(200), 570 _check_at(_check_interval), 571 _check_count(0), 572 _expired(false) {} 573 574 ~ZMarkTimeout() { 575 const Tickspan duration = Ticks::now() - _start; 576 log_debug(gc, marking)("Mark With Timeout (%s): %s, " UINT64_FORMAT " oops, %.3fms", 577 ZThread::name(), _expired ? "Expired" : "Completed", 578 _check_count, TimeHelper::counter_to_millis(duration.value())); 579 } 580 581 bool has_expired() { 582 if (++_check_count == _check_at) { 583 _check_at += _check_interval; 584 if ((uint64_t)Ticks::now().value() >= _timeout) { 585 // Timeout 586 _expired = true; 587 } 588 } 589 590 return _expired; 591 } 592 }; 593 594 void ZMark::work_with_timeout(ZMarkCache* cache, ZMarkStripe* stripe, ZMarkThreadLocalStacks* stacks, uint64_t timeout_in_millis) { 595 ZStatTimer timer(ZSubPhaseMarkTryComplete); 596 ZMarkTimeout timeout(timeout_in_millis); 597 598 for (;;) { 599 if (!drain_and_flush(stripe, stacks, cache, &timeout)) { 600 // Timed out 601 break; 602 } 603 604 if (try_steal(stripe, stacks)) { 605 // Stole work 606 continue; 607 } 608 609 // Terminate 610 break; 611 } 612 } 613 614 void ZMark::work(uint64_t timeout_in_millis) { 615 ZMarkCache cache(_stripes.nstripes()); 616 ZMarkStripe* const stripe = _stripes.stripe_for_worker(_nworkers, ZThread::worker_id()); 617 ZMarkThreadLocalStacks* const stacks = ZThreadLocalData::stacks(Thread::current()); 618 619 if (timeout_in_millis == 0) { 620 work_without_timeout(&cache, stripe, stacks); 621 } else { 622 work_with_timeout(&cache, stripe, stacks, timeout_in_millis); 623 } 624 625 // Make sure stacks have been flushed 626 assert(stacks->is_empty(&_stripes), "Should be empty"); 627 628 // Free remaining stacks 629 stacks->free(&_allocator); 630 } 631 632 class ZMarkConcurrentRootsIteratorClosure : public ZRootsIteratorClosure { 633 public: 634 virtual void do_oop(oop* p) { 635 ZBarrier::mark_barrier_on_oop_field(p, false /* finalizable */); 636 } 637 638 virtual void do_oop(narrowOop* p) { 639 ShouldNotReachHere(); 640 } 641 }; 642 643 644 class ZMarkConcurrentRootsTask : public ZTask { 645 private: 646 SuspendibleThreadSetJoiner _sts_joiner; 647 ZConcurrentRootsIteratorClaimStrong _roots; 648 ZMarkConcurrentRootsIteratorClosure _cl; 649 650 public: 651 ZMarkConcurrentRootsTask(ZMark* mark) : 652 ZTask("ZMarkConcurrentRootsTask"), 653 _sts_joiner(), 654 _roots(), 655 _cl() { 656 ClassLoaderDataGraph_lock->lock(); 657 } 658 659 ~ZMarkConcurrentRootsTask() { 660 ClassLoaderDataGraph_lock->unlock(); 661 } 662 663 virtual void work() { 664 _roots.oops_do(&_cl); 665 } 666 }; 667 668 class ZMarkTask : public ZTask { 669 private: 670 ZMark* const _mark; 671 const uint64_t _timeout_in_millis; 672 673 public: 674 ZMarkTask(ZMark* mark, uint64_t timeout_in_millis = 0) : 675 ZTask("ZMarkTask"), 676 _mark(mark), 677 _timeout_in_millis(timeout_in_millis) { 678 _mark->prepare_work(); 679 } 680 681 ~ZMarkTask() { 682 _mark->finish_work(); 683 } 684 685 virtual void work() { 686 _mark->work(_timeout_in_millis); 687 } 688 }; 689 690 void ZMark::mark(bool initial) { 691 if (initial) { 692 ZMarkConcurrentRootsTask task(this); 693 _workers->run_concurrent(&task); 694 } 695 696 ZMarkTask task(this); 697 _workers->run_concurrent(&task); 698 } 699 700 bool ZMark::try_complete() { 701 _ntrycomplete++; 702 703 // Use nconcurrent number of worker threads to maintain the 704 // worker/stripe distribution used during concurrent mark. 705 ZMarkTask task(this, ZMarkCompleteTimeout); 706 _workers->run_concurrent(&task); 707 708 // Successful if all stripes are empty 709 return _stripes.is_empty(); 710 } 711 712 bool ZMark::try_end() { 713 // Flush all mark stacks 714 if (!flush(true /* at_safepoint */)) { 715 // Mark completed 716 return true; 717 } 718 719 // Try complete marking by doing a limited 720 // amount of mark work in this phase. 721 return try_complete(); 722 } 723 724 bool ZMark::end() { 725 // Try end marking 726 if (!try_end()) { 727 // Mark not completed 728 _ncontinue++; 729 return false; 730 } 731 732 // Verification 733 if (ZVerifyMarking) { 734 verify_all_stacks_empty(); 735 } 736 737 // Update statistics 738 ZStatMark::set_at_mark_end(_nproactiveflush, _nterminateflush, _ntrycomplete, _ncontinue); 739 740 // Mark completed 741 return true; 742 } 743 744 void ZMark::flush_and_free() { 745 Thread* const thread = Thread::current(); 746 flush_and_free(thread); 747 } 748 749 bool ZMark::flush_and_free(Thread* thread) { 750 ZMarkThreadLocalStacks* const stacks = ZThreadLocalData::stacks(thread); 751 const bool flushed = stacks->flush(&_allocator, &_stripes); 752 stacks->free(&_allocator); 753 return flushed; 754 } 755 756 class ZVerifyMarkStacksEmptyClosure : public ThreadClosure { 757 private: 758 const ZMarkStripeSet* const _stripes; 759 760 public: 761 ZVerifyMarkStacksEmptyClosure(const ZMarkStripeSet* stripes) : 762 _stripes(stripes) {} 763 764 void do_thread(Thread* thread) { 765 ZMarkThreadLocalStacks* const stacks = ZThreadLocalData::stacks(thread); 766 guarantee(stacks->is_empty(_stripes), "Should be empty"); 767 } 768 }; 769 770 void ZMark::verify_all_stacks_empty() const { 771 // Verify thread stacks 772 ZVerifyMarkStacksEmptyClosure cl(&_stripes); 773 Threads::threads_do(&cl); 774 775 // Verify stripe stacks 776 guarantee(_stripes.is_empty(), "Should be empty"); 777 }