1 /* 2 * Copyright (c) 2013, 2018, Red Hat, Inc. All rights reserved. 3 * 4 * This code is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License version 2 only, as 6 * published by the Free Software Foundation. 7 * 8 * This code is distributed in the hope that it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 11 * version 2 for more details (a copy is included in the LICENSE file that 12 * accompanied this code). 13 * 14 * You should have received a copy of the GNU General Public License version 15 * 2 along with this work; if not, write to the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 17 * 18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 19 * or visit www.oracle.com if you need additional information or have any 20 * questions. 21 * 22 */ 23 24 #ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_HPP 25 #define SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_HPP 26 27 #include "gc/shared/markBitMap.hpp" 28 #include "gc/shared/softRefPolicy.hpp" 29 #include "gc/shared/collectedHeap.hpp" 30 #include "gc/shenandoah/shenandoahAsserts.hpp" 31 #include "gc/shenandoah/shenandoahAllocRequest.hpp" 32 #include "gc/shenandoah/shenandoahLock.hpp" 33 #include "gc/shenandoah/shenandoahEvacOOMHandler.hpp" 34 #include "gc/shenandoah/shenandoahSharedVariables.hpp" 35 #include "services/memoryManager.hpp" 36 37 class ConcurrentGCTimer; 38 class ReferenceProcessor; 39 class ShenandoahAllocTracker; 40 class ShenandoahCollectorPolicy; 41 class ShenandoahControlThread; 42 class ShenandoahGCSession; 43 class ShenandoahGCStateResetter; 44 class ShenandoahHeuristics; 45 class ShenandoahMarkingContext; 46 class ShenandoahMarkCompact; 47 class ShenandoahMode; 48 class ShenandoahPhaseTimings; 49 class ShenandoahHeap; 50 class ShenandoahHeapRegion; 51 class ShenandoahHeapRegionClosure; 52 class ShenandoahCollectionSet; 53 class ShenandoahFreeSet; 54 class ShenandoahConcurrentMark; 55 class ShenandoahMarkCompact; 56 class ShenandoahMonitoringSupport; 57 class ShenandoahPacer; 58 class ShenandoahTraversalGC; 59 class ShenandoahVerifier; 60 class ShenandoahWorkGang; 61 class VMStructs; 62 63 class ShenandoahRegionIterator : public StackObj { 64 private: 65 ShenandoahHeap* _heap; 66 67 DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile size_t)); 68 volatile size_t _index; 69 DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, 0); 70 71 // No implicit copying: iterators should be passed by reference to capture the state 72 ShenandoahRegionIterator(const ShenandoahRegionIterator& that); 73 ShenandoahRegionIterator& operator=(const ShenandoahRegionIterator& o); 74 75 public: 76 ShenandoahRegionIterator(); 77 ShenandoahRegionIterator(ShenandoahHeap* heap); 78 79 // Reset iterator to default state 80 void reset(); 81 82 // Returns next region, or NULL if there are no more regions. 83 // This is multi-thread-safe. 84 inline ShenandoahHeapRegion* next(); 85 86 // This is *not* MT safe. However, in the absence of multithreaded access, it 87 // can be used to determine if there is more work to do. 88 bool has_next() const; 89 }; 90 91 class ShenandoahHeapRegionClosure : public StackObj { 92 public: 93 virtual void heap_region_do(ShenandoahHeapRegion* r) = 0; 94 virtual bool is_thread_safe() { return false; } 95 }; 96 97 #ifdef ASSERT 98 class ShenandoahAssertToSpaceClosure : public OopClosure { 99 private: 100 template <class T> 101 void do_oop_work(T* p); 102 public: 103 void do_oop(narrowOop* p); 104 void do_oop(oop* p); 105 }; 106 #endif 107 108 typedef ShenandoahLock ShenandoahHeapLock; 109 typedef ShenandoahLocker ShenandoahHeapLocker; 110 111 // Shenandoah GC is low-pause concurrent GC that uses Brooks forwarding pointers 112 // to encode forwarding data. See BrooksPointer for details on forwarding data encoding. 113 // See ShenandoahControlThread for GC cycle structure. 114 // 115 class ShenandoahHeap : public CollectedHeap { 116 friend class ShenandoahAsserts; 117 friend class VMStructs; 118 friend class ShenandoahGCSession; 119 friend class ShenandoahGCStateResetter; 120 121 // ---------- Locks that guard important data structures in Heap 122 // 123 private: 124 ShenandoahHeapLock _lock; 125 126 public: 127 ShenandoahHeapLock* lock() { 128 return &_lock; 129 } 130 131 void assert_heaplock_owned_by_current_thread() NOT_DEBUG_RETURN; 132 void assert_heaplock_not_owned_by_current_thread() NOT_DEBUG_RETURN; 133 void assert_heaplock_or_safepoint() NOT_DEBUG_RETURN; 134 135 // ---------- Initialization, termination, identification, printing routines 136 // 137 public: 138 static ShenandoahHeap* heap(); 139 static ShenandoahHeap* heap_no_check(); 140 141 const char* name() const { return "Shenandoah"; } 142 ShenandoahHeap::Name kind() const { return CollectedHeap::Shenandoah; } 143 144 ShenandoahHeap(ShenandoahCollectorPolicy* policy); 145 jint initialize(); 146 void post_initialize(); 147 void initialize_heuristics(); 148 149 void initialize_serviceability(); 150 151 void print_on(outputStream* st) const; 152 void print_extended_on(outputStream *st) const; 153 void print_tracing_info() const; 154 void print_gc_threads_on(outputStream* st) const; 155 void print_heap_regions_on(outputStream* st) const; 156 157 void stop(); 158 159 void prepare_for_verify(); 160 void verify(VerifyOption vo); 161 162 // ---------- Heap counters and metrics 163 // 164 private: 165 size_t _initial_size; 166 size_t _minimum_size; 167 DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile size_t)); 168 volatile size_t _used; 169 volatile size_t _committed; 170 volatile size_t _bytes_allocated_since_gc_start; 171 DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, 0); 172 173 public: 174 void increase_used(size_t bytes); 175 void decrease_used(size_t bytes); 176 void set_used(size_t bytes); 177 178 void increase_committed(size_t bytes); 179 void decrease_committed(size_t bytes); 180 void increase_allocated(size_t bytes); 181 182 size_t bytes_allocated_since_gc_start(); 183 void reset_bytes_allocated_since_gc_start(); 184 185 size_t min_capacity() const; 186 size_t max_capacity() const; 187 size_t initial_capacity() const; 188 size_t capacity() const; 189 size_t used() const; 190 size_t committed() const; 191 192 // ---------- Workers handling 193 // 194 private: 195 uint _max_workers; 196 ShenandoahWorkGang* _workers; 197 ShenandoahWorkGang* _safepoint_workers; 198 199 public: 200 uint max_workers(); 201 void assert_gc_workers(uint nworker) NOT_DEBUG_RETURN; 202 203 WorkGang* workers() const; 204 WorkGang* get_safepoint_workers(); 205 206 void gc_threads_do(ThreadClosure* tcl) const; 207 208 // ---------- Heap regions handling machinery 209 // 210 private: 211 MemRegion _heap_region; 212 bool _heap_region_special; 213 size_t _num_regions; 214 ShenandoahHeapRegion** _regions; 215 ShenandoahRegionIterator _update_refs_iterator; 216 217 public: 218 inline size_t num_regions() const { return _num_regions; } 219 inline bool is_heap_region_special() { return _heap_region_special; } 220 221 inline ShenandoahHeapRegion* const heap_region_containing(const void* addr) const; 222 inline size_t heap_region_index_containing(const void* addr) const; 223 224 inline ShenandoahHeapRegion* const get_region(size_t region_idx) const; 225 226 void heap_region_iterate(ShenandoahHeapRegionClosure* blk) const; 227 void parallel_heap_region_iterate(ShenandoahHeapRegionClosure* blk) const; 228 229 // ---------- GC state machinery 230 // 231 // GC state describes the important parts of collector state, that may be 232 // used to make barrier selection decisions in the native and generated code. 233 // Multiple bits can be set at once. 234 // 235 // Important invariant: when GC state is zero, the heap is stable, and no barriers 236 // are required. 237 // 238 public: 239 enum GCStateBitPos { 240 // Heap has forwarded objects: needs LRB barriers. 241 HAS_FORWARDED_BITPOS = 0, 242 243 // Heap is under marking: needs SATB barriers. 244 MARKING_BITPOS = 1, 245 246 // Heap is under evacuation: needs LRB barriers. (Set together with HAS_FORWARDED) 247 EVACUATION_BITPOS = 2, 248 249 // Heap is under updating: needs no additional barriers. 250 UPDATEREFS_BITPOS = 3, 251 252 // Heap is under traversal collection 253 TRAVERSAL_BITPOS = 4 254 }; 255 256 enum GCState { 257 STABLE = 0, 258 HAS_FORWARDED = 1 << HAS_FORWARDED_BITPOS, 259 MARKING = 1 << MARKING_BITPOS, 260 EVACUATION = 1 << EVACUATION_BITPOS, 261 UPDATEREFS = 1 << UPDATEREFS_BITPOS, 262 TRAVERSAL = 1 << TRAVERSAL_BITPOS 263 }; 264 265 private: 266 ShenandoahSharedBitmap _gc_state; 267 ShenandoahSharedFlag _degenerated_gc_in_progress; 268 ShenandoahSharedFlag _full_gc_in_progress; 269 ShenandoahSharedFlag _full_gc_move_in_progress; 270 ShenandoahSharedFlag _progress_last_gc; 271 272 void set_gc_state_all_threads(char state); 273 void set_gc_state_mask(uint mask, bool value); 274 275 public: 276 char gc_state() const; 277 static address gc_state_addr(); 278 279 void set_concurrent_mark_in_progress(bool in_progress); 280 void set_evacuation_in_progress(bool in_progress); 281 void set_update_refs_in_progress(bool in_progress); 282 void set_degenerated_gc_in_progress(bool in_progress); 283 void set_full_gc_in_progress(bool in_progress); 284 void set_full_gc_move_in_progress(bool in_progress); 285 void set_concurrent_traversal_in_progress(bool in_progress); 286 void set_has_forwarded_objects(bool cond); 287 288 inline bool is_stable() const; 289 inline bool is_idle() const; 290 inline bool is_concurrent_mark_in_progress() const; 291 inline bool is_update_refs_in_progress() const; 292 inline bool is_evacuation_in_progress() const; 293 inline bool is_degenerated_gc_in_progress() const; 294 inline bool is_full_gc_in_progress() const; 295 inline bool is_full_gc_move_in_progress() const; 296 inline bool is_concurrent_traversal_in_progress() const; 297 inline bool has_forwarded_objects() const; 298 inline bool is_gc_in_progress_mask(uint mask) const; 299 300 // ---------- GC cancellation and degeneration machinery 301 // 302 // Cancelled GC flag is used to notify concurrent phases that they should terminate. 303 // 304 public: 305 enum ShenandoahDegenPoint { 306 _degenerated_unset, 307 _degenerated_traversal, 308 _degenerated_outside_cycle, 309 _degenerated_mark, 310 _degenerated_evac, 311 _degenerated_updaterefs, 312 _DEGENERATED_LIMIT 313 }; 314 315 static const char* degen_point_to_string(ShenandoahDegenPoint point) { 316 switch (point) { 317 case _degenerated_unset: 318 return "<UNSET>"; 319 case _degenerated_traversal: 320 return "Traversal"; 321 case _degenerated_outside_cycle: 322 return "Outside of Cycle"; 323 case _degenerated_mark: 324 return "Mark"; 325 case _degenerated_evac: 326 return "Evacuation"; 327 case _degenerated_updaterefs: 328 return "Update Refs"; 329 default: 330 ShouldNotReachHere(); 331 return "ERROR"; 332 } 333 }; 334 335 private: 336 enum CancelState { 337 // Normal state. GC has not been cancelled and is open for cancellation. 338 // Worker threads can suspend for safepoint. 339 CANCELLABLE, 340 341 // GC has been cancelled. Worker threads can not suspend for 342 // safepoint but must finish their work as soon as possible. 343 CANCELLED, 344 345 // GC has not been cancelled and must not be cancelled. At least 346 // one worker thread checks for pending safepoint and may suspend 347 // if a safepoint is pending. 348 NOT_CANCELLED 349 }; 350 351 ShenandoahSharedEnumFlag<CancelState> _cancelled_gc; 352 inline bool try_cancel_gc(); 353 354 public: 355 static address cancelled_gc_addr(); 356 357 inline bool cancelled_gc() const; 358 inline bool check_cancelled_gc_and_yield(bool sts_active = true); 359 360 inline void clear_cancelled_gc(); 361 362 void cancel_gc(GCCause::Cause cause); 363 364 // ---------- GC operations entry points 365 // 366 public: 367 // Entry points to STW GC operations, these cause a related safepoint, that then 368 // call the entry method below 369 void vmop_entry_init_mark(); 370 void vmop_entry_final_mark(); 371 void vmop_entry_final_evac(); 372 void vmop_entry_init_updaterefs(); 373 void vmop_entry_final_updaterefs(); 374 void vmop_entry_init_traversal(); 375 void vmop_entry_final_traversal(); 376 void vmop_entry_full(GCCause::Cause cause); 377 void vmop_degenerated(ShenandoahDegenPoint point); 378 379 // Entry methods to normally STW GC operations. These set up logging, monitoring 380 // and workers for net VM operation 381 void entry_init_mark(); 382 void entry_final_mark(); 383 void entry_final_evac(); 384 void entry_init_updaterefs(); 385 void entry_final_updaterefs(); 386 void entry_init_traversal(); 387 void entry_final_traversal(); 388 void entry_full(GCCause::Cause cause); 389 void entry_degenerated(int point); 390 391 // Entry methods to normally concurrent GC operations. These set up logging, monitoring 392 // for concurrent operation. 393 void entry_reset(); 394 void entry_mark(); 395 void entry_preclean(); 396 void entry_cleanup(); 397 void entry_evac(); 398 void entry_updaterefs(); 399 void entry_traversal(); 400 void entry_uncommit(double shrink_before); 401 402 private: 403 // Actual work for the phases 404 void op_init_mark(); 405 void op_final_mark(); 406 void op_final_evac(); 407 void op_init_updaterefs(); 408 void op_final_updaterefs(); 409 void op_init_traversal(); 410 void op_final_traversal(); 411 void op_full(GCCause::Cause cause); 412 void op_degenerated(ShenandoahDegenPoint point); 413 void op_degenerated_fail(); 414 void op_degenerated_futile(); 415 416 void op_reset(); 417 void op_mark(); 418 void op_preclean(); 419 void op_cleanup(); 420 void op_conc_evac(); 421 void op_stw_evac(); 422 void op_updaterefs(); 423 void op_traversal(); 424 void op_uncommit(double shrink_before); 425 426 // Messages for GC trace events, they have to be immortal for 427 // passing around the logging/tracing systems 428 const char* init_mark_event_message() const; 429 const char* final_mark_event_message() const; 430 const char* conc_mark_event_message() const; 431 const char* init_traversal_event_message() const; 432 const char* final_traversal_event_message() const; 433 const char* conc_traversal_event_message() const; 434 const char* degen_event_message(ShenandoahDegenPoint point) const; 435 436 // ---------- GC subsystems 437 // 438 private: 439 ShenandoahControlThread* _control_thread; 440 ShenandoahCollectorPolicy* _shenandoah_policy; 441 ShenandoahMode* _gc_mode; 442 ShenandoahHeuristics* _heuristics; 443 ShenandoahFreeSet* _free_set; 444 ShenandoahConcurrentMark* _scm; 445 ShenandoahTraversalGC* _traversal_gc; 446 ShenandoahMarkCompact* _full_gc; 447 ShenandoahPacer* _pacer; 448 ShenandoahVerifier* _verifier; 449 450 ShenandoahAllocTracker* _alloc_tracker; 451 ShenandoahPhaseTimings* _phase_timings; 452 453 ShenandoahControlThread* control_thread() { return _control_thread; } 454 ShenandoahMarkCompact* full_gc() { return _full_gc; } 455 456 public: 457 ShenandoahCollectorPolicy* shenandoah_policy() const { return _shenandoah_policy; } 458 ShenandoahHeuristics* heuristics() const { return _heuristics; } 459 ShenandoahFreeSet* free_set() const { return _free_set; } 460 ShenandoahConcurrentMark* concurrent_mark() { return _scm; } 461 ShenandoahTraversalGC* traversal_gc() const { return _traversal_gc; } 462 bool is_traversal_mode() const { return _traversal_gc != NULL; } 463 ShenandoahPacer* pacer() const { return _pacer; } 464 465 ShenandoahPhaseTimings* phase_timings() const { return _phase_timings; } 466 ShenandoahAllocTracker* alloc_tracker() const { return _alloc_tracker; } 467 468 ShenandoahVerifier* verifier(); 469 470 // ---------- VM subsystem bindings 471 // 472 private: 473 ShenandoahMonitoringSupport* _monitoring_support; 474 MemoryPool* _memory_pool; 475 GCMemoryManager _stw_memory_manager; 476 GCMemoryManager _cycle_memory_manager; 477 ConcurrentGCTimer* _gc_timer; 478 SoftRefPolicy _soft_ref_policy; 479 480 public: 481 ShenandoahMonitoringSupport* monitoring_support() { return _monitoring_support; } 482 GCMemoryManager* cycle_memory_manager() { return &_cycle_memory_manager; } 483 GCMemoryManager* stw_memory_manager() { return &_stw_memory_manager; } 484 SoftRefPolicy* soft_ref_policy() { return &_soft_ref_policy; } 485 486 GrowableArray<GCMemoryManager*> memory_managers(); 487 GrowableArray<MemoryPool*> memory_pools(); 488 MemoryUsage memory_usage(); 489 GCTracer* tracer(); 490 GCTimer* gc_timer() const; 491 CollectorPolicy* collector_policy() const; 492 493 // ---------- Reference processing 494 // 495 private: 496 AlwaysTrueClosure _subject_to_discovery; 497 ReferenceProcessor* _ref_processor; 498 ShenandoahSharedFlag _process_references; 499 500 void ref_processing_init(); 501 502 public: 503 ReferenceProcessor* ref_processor() { return _ref_processor; } 504 void set_process_references(bool pr); 505 bool process_references() const; 506 507 // ---------- Class Unloading 508 // 509 private: 510 ShenandoahSharedFlag _unload_classes; 511 512 public: 513 void set_unload_classes(bool uc); 514 bool unload_classes() const; 515 516 // Delete entries for dead interned string and clean up unreferenced symbols 517 // in symbol table, possibly in parallel. 518 void unload_classes_and_cleanup_tables(bool full_gc); 519 520 // ---------- Generic interface hooks 521 // Minor things that super-interface expects us to implement to play nice with 522 // the rest of runtime. Some of the things here are not required to be implemented, 523 // and can be stubbed out. 524 // 525 public: 526 AdaptiveSizePolicy* size_policy() shenandoah_not_implemented_return(NULL); 527 bool is_maximal_no_gc() const shenandoah_not_implemented_return(false); 528 529 bool is_in(const void* p) const; 530 531 // All objects can potentially move 532 bool is_scavengable(oop obj) { return true; }; 533 534 void collect(GCCause::Cause cause); 535 void do_full_collection(bool clear_all_soft_refs); 536 537 // Used for parsing heap during error printing 538 HeapWord* block_start(const void* addr) const; 539 size_t block_size(const HeapWord* addr) const; 540 bool block_is_obj(const HeapWord* addr) const; 541 542 // Used for native heap walkers: heap dumpers, mostly 543 void object_iterate(ObjectClosure* cl); 544 void safe_object_iterate(ObjectClosure* cl); 545 546 // Used by RMI 547 jlong millis_since_last_gc(); 548 549 // ---------- Safepoint interface hooks 550 // 551 public: 552 void safepoint_synchronize_begin(); 553 void safepoint_synchronize_end(); 554 555 // ---------- Code roots handling hooks 556 // 557 public: 558 void register_nmethod(nmethod* nm); 559 void unregister_nmethod(nmethod* nm); 560 561 // ---------- Pinning hooks 562 // 563 public: 564 // Shenandoah supports per-object (per-region) pinning 565 bool supports_object_pinning() const { return true; } 566 567 oop pin_object(JavaThread* thread, oop obj); 568 void unpin_object(JavaThread* thread, oop obj); 569 570 void sync_pinned_region_status(); 571 void assert_pinned_region_status() NOT_DEBUG_RETURN; 572 573 // ---------- Allocation support 574 // 575 private: 576 HeapWord* allocate_memory_under_lock(ShenandoahAllocRequest& request, bool& in_new_region); 577 inline HeapWord* allocate_from_gclab(Thread* thread, size_t size); 578 HeapWord* allocate_from_gclab_slow(Thread* thread, size_t size); 579 HeapWord* allocate_new_gclab(size_t min_size, size_t word_size, size_t* actual_size); 580 void retire_and_reset_gclabs(); 581 582 public: 583 HeapWord* allocate_memory(ShenandoahAllocRequest& request); 584 HeapWord* mem_allocate(size_t size, bool* what); 585 MetaWord* satisfy_failed_metadata_allocation(ClassLoaderData* loader_data, 586 size_t size, 587 Metaspace::MetadataType mdtype); 588 589 void notify_mutator_alloc_words(size_t words, bool waste); 590 591 // Shenandoah supports TLAB allocation 592 bool supports_tlab_allocation() const { return true; } 593 594 HeapWord* allocate_new_tlab(size_t min_size, size_t requested_size, size_t* actual_size); 595 size_t tlab_capacity(Thread *thr) const; 596 size_t unsafe_max_tlab_alloc(Thread *thread) const; 597 size_t max_tlab_size() const; 598 size_t tlab_used(Thread* ignored) const; 599 600 void accumulate_statistics_tlabs(); 601 void resize_tlabs(); 602 603 void ensure_parsability(bool retire_tlabs); 604 void make_parsable(bool retire_tlabs); 605 606 // ---------- Marking support 607 // 608 private: 609 ShenandoahMarkingContext* _marking_context; 610 MemRegion _bitmap_region; 611 MemRegion _aux_bitmap_region; 612 MarkBitMap _verification_bit_map; 613 MarkBitMap _aux_bit_map; 614 615 size_t _bitmap_size; 616 size_t _bitmap_regions_per_slice; 617 size_t _bitmap_bytes_per_slice; 618 619 bool _bitmap_region_special; 620 bool _aux_bitmap_region_special; 621 622 // Used for buffering per-region liveness data. 623 // Needed since ShenandoahHeapRegion uses atomics to update liveness. 624 // 625 // The array has max-workers elements, each of which is an array of 626 // jushort * max_regions. The choice of jushort is not accidental: 627 // there is a tradeoff between static/dynamic footprint that translates 628 // into cache pressure (which is already high during marking), and 629 // too many atomic updates. size_t/jint is too large, jbyte is too small. 630 jushort** _liveness_cache; 631 632 public: 633 inline ShenandoahMarkingContext* complete_marking_context() const; 634 inline ShenandoahMarkingContext* marking_context() const; 635 inline void mark_complete_marking_context(); 636 inline void mark_incomplete_marking_context(); 637 638 template<class T> 639 inline void marked_object_iterate(ShenandoahHeapRegion* region, T* cl); 640 641 template<class T> 642 inline void marked_object_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit); 643 644 template<class T> 645 inline void marked_object_oop_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit); 646 647 void reset_mark_bitmap(); 648 649 // SATB barriers hooks 650 inline bool requires_marking(const void* entry) const; 651 void force_satb_flush_all_threads(); 652 653 // Support for bitmap uncommits 654 bool commit_bitmap_slice(ShenandoahHeapRegion *r); 655 bool uncommit_bitmap_slice(ShenandoahHeapRegion *r); 656 bool is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self = false); 657 658 // Liveness caching support 659 jushort* get_liveness_cache(uint worker_id); 660 void flush_liveness_cache(uint worker_id); 661 662 // ---------- Evacuation support 663 // 664 private: 665 ShenandoahCollectionSet* _collection_set; 666 ShenandoahEvacOOMHandler _oom_evac_handler; 667 668 void evacuate_and_update_roots(); 669 670 public: 671 static address in_cset_fast_test_addr(); 672 673 ShenandoahCollectionSet* collection_set() const { return _collection_set; } 674 675 template <class T> 676 inline bool in_collection_set(T obj) const; 677 678 // Avoid accidentally calling the method above with ShenandoahHeapRegion*, which would be *wrong*. 679 inline bool in_collection_set(ShenandoahHeapRegion* r) shenandoah_not_implemented_return(false); 680 681 // Evacuates object src. Returns the evacuated object, either evacuated 682 // by this thread, or by some other thread. 683 inline oop evacuate_object(oop src, Thread* thread); 684 685 // Call before/after evacuation. 686 void enter_evacuation(); 687 void leave_evacuation(); 688 689 // ---------- Helper functions 690 // 691 public: 692 template <class T> 693 inline oop evac_update_with_forwarded(T* p); 694 695 template <class T> 696 inline oop maybe_update_with_forwarded(T* p); 697 698 template <class T> 699 inline oop maybe_update_with_forwarded_not_null(T* p, oop obj); 700 701 template <class T> 702 inline oop update_with_forwarded_not_null(T* p, oop obj); 703 704 static inline oop cas_oop(oop n, narrowOop* addr, oop c); 705 static inline oop cas_oop(oop n, oop* addr, oop c); 706 static inline oop cas_oop(oop n, narrowOop* addr, narrowOop c); 707 708 void trash_humongous_region_at(ShenandoahHeapRegion *r); 709 710 void deduplicate_string(oop str); 711 712 void stop_concurrent_marking(); 713 714 private: 715 void trash_cset_regions(); 716 void update_heap_references(bool concurrent); 717 718 // ---------- Testing helpers functions 719 // 720 private: 721 ShenandoahSharedFlag _inject_alloc_failure; 722 723 void try_inject_alloc_failure(); 724 bool should_inject_alloc_failure(); 725 }; 726 727 #endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_HPP