1 /* 2 * Copyright (c) 2013, 2015, Red Hat, Inc. and/or its affiliates. 3 * 4 * This code is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License version 2 only, as 6 * published by the Free Software Foundation. 7 * 8 * This code is distributed in the hope that it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 11 * version 2 for more details (a copy is included in the LICENSE file that 12 * accompanied this code). 13 * 14 * You should have received a copy of the GNU General Public License version 15 * 2 along with this work; if not, write to the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 17 * 18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 19 * or visit www.oracle.com if you need additional information or have any 20 * questions. 21 * 22 */ 23 24 #ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_HPP 25 #define SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_HPP 26 27 #include "gc/shared/markBitMap.hpp" 28 #include "gc/shenandoah/shenandoahHeapLock.hpp" 29 #include "gc/shenandoah/shenandoahWorkGroup.hpp" 30 31 class ConcurrentGCTimer; 32 33 class ShenandoahAllocTracker; 34 class ShenandoahCollectorPolicy; 35 class ShenandoahConnectionMatrix; 36 class ShenandoahPhaseTimings; 37 class ShenandoahHeapRegion; 38 class ShenandoahHeapRegionClosure; 39 class ShenandoahHeapRegionSet; 40 class ShenandoahCollectionSet; 41 class ShenandoahFreeSet; 42 class ShenandoahConcurrentMark; 43 class ShenandoahPartialGC; 44 class ShenandoahVerifier; 45 class ShenandoahConcurrentThread; 46 class ShenandoahMonitoringSupport; 47 48 class ShenandoahUpdateRefsClosure: public OopClosure { 49 private: 50 ShenandoahHeap* _heap; 51 52 template <class T> 53 inline void do_oop_work(T* p); 54 55 public: 56 ShenandoahUpdateRefsClosure(); 57 inline void do_oop(oop* p); 58 inline void do_oop(narrowOop* p); 59 }; 60 61 #ifdef ASSERT 62 class ShenandoahAssertToSpaceClosure : public OopClosure { 63 private: 64 template <class T> 65 void do_oop_nv(T* p); 66 public: 67 void do_oop(narrowOop* p); 68 void do_oop(oop* p); 69 }; 70 #endif 71 72 class ShenandoahAlwaysTrueClosure : public BoolObjectClosure { 73 public: 74 bool do_object_b(oop p) { return true; } 75 }; 76 77 class ShenandoahForwardedIsAliveClosure: public BoolObjectClosure { 78 private: 79 ShenandoahHeap* _heap; 80 public: 81 ShenandoahForwardedIsAliveClosure(); 82 void init(ShenandoahHeap* heap) { 83 _heap = heap; 84 } 85 bool do_object_b(oop obj); 86 }; 87 88 class ShenandoahIsAliveClosure: public BoolObjectClosure { 89 private: 90 ShenandoahHeap* _heap; 91 public: 92 ShenandoahIsAliveClosure(); 93 void init(ShenandoahHeap* heap) { 94 _heap = heap; 95 } 96 bool do_object_b(oop obj); 97 }; 98 99 100 // // A "ShenandoahHeap" is an implementation of a java heap for HotSpot. 101 // // It uses a new pauseless GC algorithm based on Brooks pointers. 102 // // Derived from G1 103 104 // // 105 // // CollectedHeap 106 // // SharedHeap 107 // // ShenandoahHeap 108 109 class ShenandoahHeap : public CollectedHeap { 110 enum CancelState { 111 112 // Normal state. GC has not been cancelled and is open for cancellation. 113 // Worker threads can suspend for safepoint. 114 CANCELLABLE, 115 116 // GC has been cancelled. Worker threads can not suspend for 117 // safepoint but must finish their work as soon as possible. 118 CANCELLED, 119 120 // GC has not been cancelled and must not be cancelled. At least 121 // one worker thread checks for pending safepoint and may suspend 122 // if a safepoint is pending. 123 NOT_CANCELLED 124 125 }; 126 127 public: 128 enum ShenandoahCancelCause { 129 _oom_evacuation, 130 _vm_stop, 131 }; 132 private: 133 ShenandoahHeapLock _lock; 134 ShenandoahCollectorPolicy* _shenandoah_policy; 135 size_t _bitmap_size; 136 size_t _bitmap_words_per_region; 137 MemRegion _heap_region; 138 MemRegion _bitmap0_region; 139 MemRegion _bitmap1_region; 140 141 // Sortable array of regions 142 ShenandoahHeapRegionSet* _ordered_regions; 143 ShenandoahFreeSet* _free_regions; 144 ShenandoahCollectionSet* _collection_set; 145 146 ShenandoahConcurrentMark* _scm; 147 ShenandoahPartialGC* _partial_gc; 148 ShenandoahVerifier* _verifier; 149 150 ShenandoahConcurrentThread* _concurrent_gc_thread; 151 152 ShenandoahMonitoringSupport* _monitoring_support; 153 154 ShenandoahPhaseTimings* _phase_timings; 155 ShenandoahAllocTracker* _alloc_tracker; 156 157 size_t _num_regions; 158 size_t _initial_size; 159 160 uint _max_workers; 161 ShenandoahWorkGang* _workers; 162 ShenandoahWorkGang* _safepoint_workers; 163 164 volatile size_t _used; 165 volatile size_t _committed; 166 167 MarkBitMap _verification_bit_map; 168 MarkBitMap _mark_bit_map0; 169 MarkBitMap _mark_bit_map1; 170 MarkBitMap* _complete_mark_bit_map; 171 MarkBitMap* _next_mark_bit_map; 172 173 HeapWord** _complete_top_at_mark_starts; 174 HeapWord** _complete_top_at_mark_starts_base; 175 176 HeapWord** _next_top_at_mark_starts; 177 HeapWord** _next_top_at_mark_starts_base; 178 179 volatile jbyte _cancelled_concgc; 180 181 size_t _bytes_allocated_since_cm; 182 size_t _bytes_allocated_during_cm; 183 size_t _allocated_last_gc; 184 size_t _used_start_gc; 185 186 char _concurrent_mark_in_progress; 187 188 bool _full_gc_in_progress; 189 bool _update_refs_in_progress; 190 bool _concurrent_partial_in_progress; 191 192 unsigned int _evacuation_in_progress; 193 bool _need_update_refs; 194 bool _need_reset_bitmaps; 195 196 ReferenceProcessor* _ref_processor; 197 198 ShenandoahForwardedIsAliveClosure _forwarded_is_alive; 199 ShenandoahIsAliveClosure _is_alive; 200 201 ConcurrentGCTimer* _gc_timer; 202 203 // See allocate_memory() 204 volatile jbyte _heap_lock; 205 206 ShenandoahConnectionMatrix* _connection_matrix; 207 208 #ifdef ASSERT 209 Thread* volatile _heap_lock_owner; 210 int _heap_expansion_count; 211 #endif 212 213 public: 214 ShenandoahHeap(ShenandoahCollectorPolicy* policy); 215 216 const char* name() const /* override */; 217 HeapWord* allocate_new_tlab(size_t word_size) /* override */; 218 void print_on(outputStream* st) const /* override */; 219 void print_extended_on(outputStream *st) const /* override */; 220 221 ShenandoahHeap::Name kind() const /* override */{ 222 return CollectedHeap::ShenandoahHeap; 223 } 224 225 jint initialize() /* override */; 226 void post_initialize() /* override */; 227 size_t capacity() const /* override */; 228 size_t used() const /* override */; 229 size_t committed() const; 230 bool is_maximal_no_gc() const /* override */; 231 size_t max_capacity() const /* override */; 232 size_t initial_capacity() const /* override */; 233 bool is_in(const void* p) const /* override */; 234 bool is_scavengable(const void* addr) /* override */; 235 HeapWord* mem_allocate(size_t size, bool* what) /* override */; 236 bool can_elide_tlab_store_barriers() const /* override */; 237 oop new_store_pre_barrier(JavaThread* thread, oop new_obj) /* override */; 238 bool can_elide_initializing_store_barrier(oop new_obj) /* override */; 239 bool card_mark_must_follow_store() const /* override */; 240 void collect(GCCause::Cause cause) /* override */; 241 void do_full_collection(bool clear_all_soft_refs) /* override */; 242 AdaptiveSizePolicy* size_policy() /* override */; 243 CollectorPolicy* collector_policy() const /* override */; 244 void ensure_parsability(bool retire_tlabs) /* override */; 245 HeapWord* block_start(const void* addr) const /* override */; 246 size_t block_size(const HeapWord* addr) const /* override */; 247 bool block_is_obj(const HeapWord* addr) const /* override */; 248 jlong millis_since_last_gc() /* override */; 249 void prepare_for_verify() /* override */; 250 void print_gc_threads_on(outputStream* st) const /* override */; 251 void gc_threads_do(ThreadClosure* tcl) const /* override */; 252 void print_tracing_info() const /* override */; 253 void verify(VerifyOption vo) /* override */; 254 bool supports_tlab_allocation() const /* override */; 255 size_t tlab_capacity(Thread *thr) const /* override */; 256 void object_iterate(ObjectClosure* cl) /* override */; 257 void safe_object_iterate(ObjectClosure* cl) /* override */; 258 size_t unsafe_max_tlab_alloc(Thread *thread) const /* override */; 259 size_t max_tlab_size() const /* override */; 260 void resize_all_tlabs() /* override */; 261 void accumulate_statistics_all_gclabs() /* override */; 262 HeapWord* tlab_post_allocation_setup(HeapWord* obj) /* override */; 263 uint oop_extra_words() /* override */; 264 size_t tlab_used(Thread* ignored) const /* override */; 265 void stop() /* override */; 266 267 WorkGang* get_safepoint_workers() { return _safepoint_workers; } 268 269 #ifndef CC_INTERP 270 void compile_prepare_oop(MacroAssembler* masm, Register obj) /* override */; 271 #endif 272 273 void register_nmethod(nmethod* nm); 274 void unregister_nmethod(nmethod* nm); 275 276 void pin_object(oop o) /* override */; 277 void unpin_object(oop o) /* override */; 278 279 static ShenandoahHeap* heap(); 280 static ShenandoahHeap* heap_no_check(); 281 static size_t conservative_max_heap_alignment(); 282 static address in_cset_fast_test_addr(); 283 static address cancelled_concgc_addr(); 284 285 ShenandoahCollectorPolicy *shenandoahPolicy() const { return _shenandoah_policy; } 286 ShenandoahPhaseTimings* phase_timings() const { return _phase_timings; } 287 ShenandoahAllocTracker* alloc_tracker() const { return _alloc_tracker; } 288 289 inline ShenandoahHeapRegion* heap_region_containing(const void* addr) const; 290 inline size_t heap_region_index_containing(const void* addr) const; 291 inline bool requires_marking(const void* entry) const; 292 template <class T> 293 inline oop maybe_update_oop_ref(T* p); 294 295 template <class T> 296 inline oop evac_update_oop_ref(T* p, bool& evac); 297 298 void trash_cset_regions(); 299 300 void start_concurrent_marking(); 301 void stop_concurrent_marking(); 302 inline bool concurrent_mark_in_progress() const; 303 static address concurrent_mark_in_progress_addr(); 304 305 void set_concurrent_partial_in_progress(bool in_progress); 306 inline bool is_concurrent_partial_in_progress() const; 307 308 void prepare_for_concurrent_evacuation(); 309 void evacuate_and_update_roots(); 310 // Fixup roots after concurrent cycle failed 311 void fixup_roots(); 312 313 void update_heap_references(ShenandoahHeapRegionSet* regions, bool concurrent); 314 void concurrent_update_heap_references(); 315 void prepare_update_refs(); 316 void finish_update_refs(); 317 318 void roots_iterate(OopClosure* cl); 319 320 private: 321 void set_evacuation_in_progress(bool in_progress); 322 323 public: 324 inline bool is_evacuation_in_progress() const; 325 void set_evacuation_in_progress_concurrently(bool in_progress); 326 void set_evacuation_in_progress_at_safepoint(bool in_progress); 327 static address evacuation_in_progress_addr(); 328 329 void set_full_gc_in_progress(bool in_progress); 330 bool is_full_gc_in_progress() const; 331 332 void set_update_refs_in_progress(bool in_progress); 333 bool is_update_refs_in_progress() const; 334 static address update_refs_in_progress_addr(); 335 336 inline bool need_update_refs() const; 337 void set_need_update_refs(bool update_refs); 338 339 inline bool region_in_collection_set(size_t region_index) const; 340 341 // Mainly there to avoid accidentally calling the templated 342 // method below with ShenandoahHeapRegion* which would be *wrong*. 343 inline bool in_collection_set(ShenandoahHeapRegion* r) const; 344 345 template <class T> 346 inline bool in_collection_set(T obj) const; 347 348 inline bool allocated_after_next_mark_start(HeapWord* addr) const; 349 void set_next_top_at_mark_start(HeapWord* region_base, HeapWord* addr); 350 HeapWord* next_top_at_mark_start(HeapWord* region_base); 351 352 inline bool allocated_after_complete_mark_start(HeapWord* addr) const; 353 void set_complete_top_at_mark_start(HeapWord* region_base, HeapWord* addr); 354 HeapWord* complete_top_at_mark_start(HeapWord* region_base); 355 356 // Evacuates object src. Returns the evacuated object if this thread 357 // succeeded, otherwise rolls back the evacuation and returns the 358 // evacuated object by the competing thread. 'succeeded' is an out 359 // param and set to true if this thread succeeded, otherwise to false. 360 inline oop evacuate_object(oop src, Thread* thread, bool& evacuated); 361 inline bool cancelled_concgc() const; 362 inline bool check_cancelled_concgc_and_yield(bool sts_active = true); 363 inline bool try_cancel_concgc(); 364 inline void clear_cancelled_concgc(); 365 366 ShenandoahHeapRegionSet* regions() const { return _ordered_regions;} 367 ShenandoahFreeSet* free_regions() const { return _free_regions; } 368 ShenandoahCollectionSet* collection_set() const { return _collection_set; } 369 void clear_free_regions(); 370 void add_free_region(ShenandoahHeapRegion* r); 371 372 ShenandoahConnectionMatrix* connection_matrix() const; 373 374 void increase_used(size_t bytes); 375 void decrease_used(size_t bytes); 376 377 void set_used(size_t bytes); 378 379 void increase_committed(size_t bytes); 380 void decrease_committed(size_t bytes); 381 382 void handle_heap_shrinkage(); 383 384 size_t garbage(); 385 386 void reset_next_mark_bitmap(WorkGang* gang); 387 void reset_complete_mark_bitmap(WorkGang* gang); 388 389 MarkBitMap* complete_mark_bit_map(); 390 MarkBitMap* next_mark_bit_map(); 391 inline bool is_marked_complete(oop obj) const; 392 inline bool mark_next(oop obj) const; 393 inline bool is_marked_next(oop obj) const; 394 bool is_next_bitmap_clear(); 395 bool is_next_bitmap_clear_range(HeapWord* start, HeapWord* end); 396 bool is_complete_bitmap_clear_range(HeapWord* start, HeapWord* end); 397 398 bool commit_bitmaps(ShenandoahHeapRegion* r); 399 bool uncommit_bitmaps(ShenandoahHeapRegion* r); 400 401 template <class T> 402 inline oop update_oop_ref_not_null(T* p, oop obj); 403 404 template <class T> 405 inline oop maybe_update_oop_ref_not_null(T* p, oop obj); 406 407 void print_heap_regions_on(outputStream* st) const; 408 409 size_t bytes_allocated_since_cm(); 410 void set_bytes_allocated_since_cm(size_t bytes); 411 412 size_t trash_humongous_region_at(ShenandoahHeapRegion *r); 413 414 ShenandoahMonitoringSupport* monitoring_support(); 415 ShenandoahConcurrentMark* concurrentMark() { return _scm;} 416 ShenandoahPartialGC* partial_gc(); 417 ShenandoahVerifier* verifier(); 418 419 ReferenceProcessor* ref_processor() { return _ref_processor;} 420 421 WorkGang* workers() const { return _workers;} 422 423 uint max_workers(); 424 425 void assert_gc_workers(uint nworker) PRODUCT_RETURN; 426 427 void do_evacuation(); 428 ShenandoahHeapRegion* next_compaction_region(const ShenandoahHeapRegion* r); 429 430 void heap_region_iterate(ShenandoahHeapRegionClosure* blk, bool skip_cset_regions = false, bool skip_humongous_continuation = false) const; 431 432 // Delete entries for dead interned string and clean up unreferenced symbols 433 // in symbol table, possibly in parallel. 434 void unload_classes_and_cleanup_tables(bool full_gc); 435 436 inline size_t num_regions() const { return _num_regions; } 437 438 BoolObjectClosure* is_alive_closure(); 439 440 private: 441 template<class T> 442 inline void marked_object_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit); 443 444 template<class T> 445 inline void marked_object_oop_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit); 446 447 public: 448 template<class T> 449 inline void marked_object_iterate(ShenandoahHeapRegion* region, T* cl); 450 451 template<class T> 452 inline void marked_object_safe_iterate(ShenandoahHeapRegion* region, T* cl); 453 454 template<class T> 455 inline void marked_object_oop_iterate(ShenandoahHeapRegion* region, T* cl); 456 457 template<class T> 458 inline void marked_object_oop_safe_iterate(ShenandoahHeapRegion* region, T* cl); 459 460 GCTimer* gc_timer() const; 461 462 void swap_mark_bitmaps(); 463 464 void cancel_concgc(GCCause::Cause cause); 465 void cancel_concgc(ShenandoahCancelCause cause); 466 467 ShenandoahHeapLock* lock() { return &_lock; } 468 void assert_heaplock_owned_by_current_thread() PRODUCT_RETURN; 469 void assert_heaplock_not_owned_by_current_thread() PRODUCT_RETURN; 470 void assert_heaplock_or_safepoint() PRODUCT_RETURN; 471 472 public: 473 typedef enum { 474 _alloc_shared, // Allocate common, outside of TLAB 475 _alloc_shared_gc, // Allocate common, outside of GCLAB 476 _alloc_tlab, // Allocate TLAB 477 _alloc_gclab, // Allocate GCLAB 478 _ALLOC_LIMIT, 479 } AllocType; 480 481 static const char* alloc_type_to_string(AllocType type) { 482 switch (type) { 483 case _alloc_shared: 484 return "Shared"; 485 case _alloc_shared_gc: 486 return "Shared GC"; 487 case _alloc_tlab: 488 return "TLAB"; 489 case _alloc_gclab: 490 return "GCLAB"; 491 default: 492 ShouldNotReachHere(); 493 return ""; 494 } 495 } 496 private: 497 HeapWord* allocate_new_lab(size_t word_size, AllocType type); 498 HeapWord* allocate_memory_under_lock(size_t word_size, AllocType type, bool &new_region); 499 HeapWord* allocate_memory(size_t word_size, AllocType type); 500 // Shenandoah functionality. 501 inline HeapWord* allocate_from_gclab(Thread* thread, size_t size); 502 HeapWord* allocate_from_gclab_slow(Thread* thread, size_t size); 503 HeapWord* allocate_new_gclab(size_t word_size); 504 505 template<class T> 506 inline void do_marked_object(MarkBitMap* bitmap, T* cl, oop obj); 507 508 ShenandoahConcurrentThread* concurrent_thread() { return _concurrent_gc_thread; } 509 510 inline bool mark_next_no_checks(oop obj) const; 511 512 public: 513 inline oop atomic_compare_exchange_oop(oop n, narrowOop* addr, oop c); 514 inline oop atomic_compare_exchange_oop(oop n, oop* addr, oop c); 515 516 private: 517 void ref_processing_init(); 518 519 GCTracer* tracer(); 520 521 void set_concurrent_mark_in_progress(bool in_progress); 522 523 void oom_during_evacuation(); 524 525 HeapWord* allocate_large_memory(size_t word_size); 526 527 const char* cancel_cause_to_string(ShenandoahCancelCause cause); 528 529 private: 530 size_t* _recycled_regions; 531 532 uint64_t _alloc_seq_at_last_gc_start; 533 uint64_t _alloc_seq_at_last_gc_end; 534 size_t _used_at_last_gc; 535 536 public: 537 void recycle_trash_assist(size_t limit); 538 void recycle_trash(); 539 540 uint64_t alloc_seq_at_last_gc_end() const { return _alloc_seq_at_last_gc_end; } 541 uint64_t alloc_seq_at_last_gc_start() const { return _alloc_seq_at_last_gc_start;} 542 size_t used_at_last_gc() const { return _used_at_last_gc;} 543 544 void set_alloc_seq_gc_start(); 545 void set_alloc_seq_gc_end(); 546 547 void set_used_at_last_gc() {_used_at_last_gc = used();} 548 }; 549 550 #endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_HPP