1 /* 2 * Copyright (c) 2013, 2015, Red Hat, Inc. and/or its affiliates. 3 * 4 * This code is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License version 2 only, as 6 * published by the Free Software Foundation. 7 * 8 * This code is distributed in the hope that it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 11 * version 2 for more details (a copy is included in the LICENSE file that 12 * accompanied this code). 13 * 14 * You should have received a copy of the GNU General Public License version 15 * 2 along with this work; if not, write to the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 17 * 18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 19 * or visit www.oracle.com if you need additional information or have any 20 * questions. 21 * 22 */ 23 24 #ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_HPP 25 #define SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_HPP 26 27 #include "gc/shared/cmBitMap.hpp" 28 #include "gc/shenandoah/shenandoahWorkGroup.hpp" 29 30 class ConcurrentGCTimer; 31 32 class ShenandoahCollectorPolicy; 33 class ShenandoahConnectionMatrix; 34 class ShenandoahHeapRegion; 35 class ShenandoahHeapRegionClosure; 36 class ShenandoahHeapRegionSet; 37 class ShenandoahCollectionSet; 38 class ShenandoahFreeSet; 39 class ShenandoahConcurrentMark; 40 class ShenandoahConcurrentThread; 41 class ShenandoahMonitoringSupport; 42 43 class SCMUpdateRefsClosure: public OopClosure { 44 private: 45 ShenandoahHeap* _heap; 46 47 template <class T> 48 inline void do_oop_work(T* p); 49 50 public: 51 SCMUpdateRefsClosure(); 52 53 public: 54 inline void do_oop(oop* p); 55 inline void do_oop(narrowOop* p); 56 }; 57 58 #ifdef ASSERT 59 class AssertToSpaceClosure : public OopClosure { 60 private: 61 template <class T> 62 void do_oop_nv(T* p); 63 public: 64 void do_oop(narrowOop* p); 65 void do_oop(oop* p); 66 }; 67 #endif 68 69 class ShenandoahAlwaysTrueClosure : public BoolObjectClosure { 70 public: 71 bool do_object_b(oop p) { return true; } 72 }; 73 74 75 class ShenandoahForwardedIsAliveClosure: public BoolObjectClosure { 76 77 private: 78 ShenandoahHeap* _heap; 79 public: 80 ShenandoahForwardedIsAliveClosure(); 81 void init(ShenandoahHeap* heap); 82 bool do_object_b(oop obj); 83 }; 84 85 86 // // A "ShenandoahHeap" is an implementation of a java heap for HotSpot. 87 // // It uses a new pauseless GC algorithm based on Brooks pointers. 88 // // Derived from G1 89 90 // // 91 // // CollectedHeap 92 // // SharedHeap 93 // // ShenandoahHeap 94 95 class ShenandoahHeap : public CollectedHeap { 96 enum LockState { unlocked = 0, locked = 1 }; 97 98 public: 99 class ShenandoahHeapLock : public StackObj { 100 private: 101 ShenandoahHeap* _heap; 102 103 public: 104 ShenandoahHeapLock(ShenandoahHeap* heap) : _heap(heap) { 105 while (OrderAccess::load_acquire(& _heap->_heap_lock) == locked || Atomic::cmpxchg(locked, &_heap->_heap_lock, unlocked) == locked) { 106 SpinPause(); 107 } 108 assert(_heap->_heap_lock == locked, "sanity"); 109 110 #ifdef ASSERT 111 assert(_heap->_heap_lock_owner == NULL, "must not be owned"); 112 _heap->_heap_lock_owner = Thread::current(); 113 #endif 114 } 115 116 ~ShenandoahHeapLock() { 117 #ifdef ASSERT 118 _heap->assert_heaplock_owned_by_current_thread(); 119 _heap->_heap_lock_owner = NULL; 120 #endif 121 OrderAccess::release_store_fence(&_heap->_heap_lock, unlocked); 122 } 123 124 }; 125 126 public: 127 enum ShenandoahCancelCause { 128 _oom_evacuation, 129 _vm_stop, 130 }; 131 private: 132 133 static ShenandoahHeap* _pgc; 134 ShenandoahCollectorPolicy* _shenandoah_policy; 135 VirtualSpace _storage; 136 ShenandoahHeapRegion* _first_region; 137 HeapWord* _first_region_bottom; 138 139 // Sortable array of regions 140 ShenandoahHeapRegionSet* _ordered_regions; 141 ShenandoahHeapRegionSet* _sorted_regions; 142 ShenandoahFreeSet* _free_regions; 143 ShenandoahCollectionSet* _collection_set; 144 145 ShenandoahHeapRegion* _currentAllocationRegion; 146 ShenandoahConcurrentMark* _scm; 147 148 149 150 ShenandoahConcurrentThread* _concurrent_gc_thread; 151 152 ShenandoahMonitoringSupport* _monitoring_support; 153 154 size_t _num_regions; 155 size_t _max_regions; 156 size_t _initialSize; 157 #ifndef NDEBUG 158 uint _numAllocs; 159 #endif 160 uint _max_workers; 161 162 ShenandoahWorkGang* _workers; 163 164 165 volatile size_t _used; 166 167 CMBitMap _mark_bit_map0; 168 CMBitMap _mark_bit_map1; 169 CMBitMap* _complete_mark_bit_map; 170 CMBitMap* _next_mark_bit_map; 171 172 bool* _in_cset_fast_test; 173 bool* _in_cset_fast_test_base; 174 size_t _in_cset_fast_test_length; 175 176 HeapWord** _complete_top_at_mark_starts; 177 HeapWord** _complete_top_at_mark_starts_base; 178 179 HeapWord** _next_top_at_mark_starts; 180 HeapWord** _next_top_at_mark_starts_base; 181 182 volatile jbyte _cancelled_concgc; 183 184 size_t _bytes_allocated_since_cm; 185 size_t _bytes_allocated_during_cm; 186 size_t _bytes_allocated_during_cm_start; 187 size_t _max_allocated_gc; 188 size_t _allocated_last_gc; 189 size_t _used_start_gc; 190 191 unsigned int _concurrent_mark_in_progress; 192 193 bool _full_gc_in_progress; 194 195 unsigned int _evacuation_in_progress; 196 bool _need_update_refs; 197 bool _need_reset_bitmaps; 198 199 ReferenceProcessor* _ref_processor; 200 201 ShenandoahForwardedIsAliveClosure isAlive; 202 203 ConcurrentGCTimer* _gc_timer; 204 205 // See allocate_memory() 206 volatile jbyte _heap_lock; 207 208 ShenandoahConnectionMatrix* _connection_matrix; 209 210 #ifdef ASSERT 211 volatile Thread* _heap_lock_owner; 212 #endif 213 214 public: 215 ShenandoahHeap(ShenandoahCollectorPolicy* policy); 216 217 HeapWord *first_region_bottom() { return _first_region_bottom; } 218 219 const char* name() const /* override */; 220 HeapWord* allocate_new_tlab(size_t word_size) /* override */; 221 void print_on(outputStream* st) const /* override */; 222 223 ShenandoahHeap::Name kind() const /* override */{ 224 return CollectedHeap::ShenandoahHeap; 225 } 226 227 jint initialize() /* override */; 228 void post_initialize() /* override */; 229 size_t capacity() const /* override */; 230 size_t used() const /* override */; 231 bool is_maximal_no_gc() const /* override */; 232 size_t max_capacity() const /* override */; 233 size_t min_capacity() const /* override */; 234 bool is_in(const void* p) const /* override */; 235 bool is_scavengable(const void* addr) /* override */; 236 HeapWord* mem_allocate(size_t size, bool* what) /* override */; 237 bool can_elide_tlab_store_barriers() const /* override */; 238 oop new_store_pre_barrier(JavaThread* thread, oop new_obj) /* override */; 239 bool can_elide_initializing_store_barrier(oop new_obj) /* override */; 240 bool card_mark_must_follow_store() const /* override */; 241 void collect(GCCause::Cause) /* override */; 242 void do_full_collection(bool clear_all_soft_refs) /* override */; 243 AdaptiveSizePolicy* size_policy() /* override */; 244 CollectorPolicy* collector_policy() const /* override */; 245 void ensure_parsability(bool retire_tlabs) /* override */; 246 HeapWord* block_start(const void* addr) const /* override */; 247 size_t block_size(const HeapWord* addr) const /* override */; 248 bool block_is_obj(const HeapWord* addr) const /* override */; 249 jlong millis_since_last_gc() /* override */; 250 void prepare_for_verify() /* override */; 251 void print_gc_threads_on(outputStream* st) const /* override */; 252 void gc_threads_do(ThreadClosure* tcl) const /* override */; 253 void print_tracing_info() const /* override */; 254 void verify(VerifyOption vo) /* override */; 255 bool supports_tlab_allocation() const /* override */; 256 size_t tlab_capacity(Thread *thr) const /* override */; 257 void object_iterate(ObjectClosure* cl) /* override */; 258 void safe_object_iterate(ObjectClosure* cl) /* override */; 259 size_t unsafe_max_tlab_alloc(Thread *thread) const /* override */; 260 size_t max_tlab_size() const /* override */; 261 void resize_all_tlabs() /* override */; 262 void accumulate_statistics_all_gclabs() /* override */; 263 HeapWord* tlab_post_allocation_setup(HeapWord* obj) /* override */; 264 uint oop_extra_words() /* override */; 265 size_t tlab_used(Thread* ignored) const /* override */; 266 void stop() /* override */; 267 268 #ifndef CC_INTERP 269 void compile_prepare_oop(MacroAssembler* masm, Register obj) /* override */; 270 #endif 271 272 void register_nmethod(nmethod* nm); 273 void unregister_nmethod(nmethod* nm); 274 275 void pin_object(oop o) /* override */; 276 void unpin_object(oop o) /* override */; 277 278 static ShenandoahHeap* heap(); 279 static ShenandoahHeap* heap_no_check(); 280 static size_t conservative_max_heap_alignment(); 281 static address in_cset_fast_test_addr(); 282 static address cancelled_concgc_addr(); 283 284 ShenandoahCollectorPolicy *shenandoahPolicy() { return _shenandoah_policy;} 285 286 inline ShenandoahHeapRegion* heap_region_containing(const void* addr) const; 287 inline uint heap_region_index_containing(const void* addr) const; 288 inline bool requires_marking(const void* entry) const; 289 template <class T> 290 inline oop maybe_update_oop_ref(T* p); 291 292 void recycle_dirty_regions(); 293 294 void start_concurrent_marking(); 295 void stop_concurrent_marking(); 296 inline bool concurrent_mark_in_progress(); 297 static address concurrent_mark_in_progress_addr(); 298 299 void prepare_for_concurrent_evacuation(); 300 void evacuate_and_update_roots(); 301 302 void verify_matrix(); 303 private: 304 void set_evacuation_in_progress(bool in_progress); 305 public: 306 inline bool is_evacuation_in_progress(); 307 void set_evacuation_in_progress_concurrently(bool in_progress); 308 void set_evacuation_in_progress_at_safepoint(bool in_progress); 309 310 void set_full_gc_in_progress(bool in_progress); 311 bool is_full_gc_in_progress() const; 312 313 inline bool need_update_refs() const; 314 void set_need_update_refs(bool update_refs); 315 316 inline bool region_in_collection_set(size_t region_index) const; 317 318 void set_region_in_collection_set(size_t region_index, bool b); 319 320 // Mainly there to avoid accidentally calling the templated 321 // method below with ShenandoahHeapRegion* which would be *wrong*. 322 inline bool in_collection_set(ShenandoahHeapRegion* r) const; 323 324 template <class T> 325 inline bool in_collection_set(T obj) const; 326 327 void clear_cset_fast_test(); 328 329 inline bool allocated_after_next_mark_start(HeapWord* addr) const; 330 void set_next_top_at_mark_start(HeapWord* region_base, HeapWord* addr); 331 HeapWord* next_top_at_mark_start(HeapWord* region_base); 332 333 inline bool allocated_after_complete_mark_start(HeapWord* addr) const; 334 void set_complete_top_at_mark_start(HeapWord* region_base, HeapWord* addr); 335 HeapWord* complete_top_at_mark_start(HeapWord* region_base); 336 337 inline oop evacuate_object(oop src, Thread* thread); 338 inline bool cancelled_concgc() const; 339 inline void set_cancelled_concgc(bool v); 340 inline bool try_cancel_concgc() const; 341 void clear_cancelled_concgc(); 342 343 ShenandoahHeapRegionSet* regions() { return _ordered_regions;} 344 ShenandoahFreeSet* free_regions(); 345 void clear_free_regions(); 346 void add_free_region(ShenandoahHeapRegion* r); 347 348 ShenandoahConnectionMatrix* connection_matrix(); 349 350 void increase_used(size_t bytes); 351 void decrease_used(size_t bytes); 352 353 void set_used(size_t bytes); 354 size_t calculateUsed(); 355 356 size_t garbage(); 357 358 void reset_next_mark_bitmap(WorkGang* gang); 359 void reset_complete_mark_bitmap(WorkGang* gang); 360 361 CMBitMap* complete_mark_bit_map(); 362 CMBitMap* next_mark_bit_map(); 363 inline bool is_marked_complete(oop obj) const; 364 inline bool mark_next(oop obj) const; 365 inline bool is_marked_next(oop obj) const; 366 bool is_next_bitmap_clear(); 367 bool is_complete_bitmap_clear_range(HeapWord* start, HeapWord* end); 368 369 void parallel_evacuate_region(ShenandoahHeapRegion* from_region); 370 371 template <class T> 372 inline oop update_oop_ref_not_null(T* p, oop obj); 373 374 template <class T> 375 inline oop maybe_update_oop_ref_not_null(T* p, oop obj); 376 377 void print_heap_regions(outputStream* st = tty) const; 378 void print_all_refs(const char* prefix); 379 void print_heap_locations(HeapWord* start, HeapWord* end); 380 381 void calculate_matrix(int* connections); 382 void print_matrix(int* connections); 383 384 size_t bytes_allocated_since_cm(); 385 void set_bytes_allocated_since_cm(size_t bytes); 386 387 size_t max_allocated_gc(); 388 389 void reclaim_humongous_region_at(ShenandoahHeapRegion* r); 390 391 VirtualSpace* storage() const; 392 393 ShenandoahMonitoringSupport* monitoring_support(); 394 ShenandoahConcurrentMark* concurrentMark() { return _scm;} 395 396 ReferenceProcessor* ref_processor() { return _ref_processor;} 397 398 WorkGang* workers() const { return _workers;} 399 400 uint max_workers(); 401 402 void do_evacuation(); 403 ShenandoahHeapRegion* next_compaction_region(const ShenandoahHeapRegion* r); 404 405 void heap_region_iterate(ShenandoahHeapRegionClosure* blk, bool skip_dirty_regions = false, bool skip_humongous_continuation = false) const; 406 407 void verify_heap_after_evacuation(); 408 409 // Delete entries for dead interned string and clean up unreferenced symbols 410 // in symbol table, possibly in parallel. 411 void unlink_string_and_symbol_table(BoolObjectClosure* is_alive, bool unlink_strings = true, bool unlink_symbols = true); 412 413 size_t num_regions(); 414 size_t max_regions(); 415 416 // TODO: consider moving this into ShenandoahHeapRegion. 417 418 template<class T> 419 inline void marked_object_iterate(ShenandoahHeapRegion* region, T* cl); 420 421 GCTimer* gc_timer() const; 422 423 void swap_mark_bitmaps(); 424 425 void cancel_concgc(GCCause::Cause cause); 426 void cancel_concgc(ShenandoahCancelCause cause); 427 428 void assert_heaplock_owned_by_current_thread() PRODUCT_RETURN; 429 void assert_heaplock_or_safepoint() PRODUCT_RETURN; 430 431 private: 432 HeapWord* allocate_new_tlab(size_t word_size, bool mark); 433 HeapWord* allocate_memory_under_lock(size_t word_size); 434 HeapWord* allocate_memory(size_t word_size, bool evacuating); 435 // Shenandoah functionality. 436 inline HeapWord* allocate_from_gclab(Thread* thread, size_t size); 437 HeapWord* allocate_from_gclab_slow(Thread* thread, size_t size); 438 HeapWord* allocate_new_gclab(size_t word_size); 439 440 void roots_iterate(OopClosure* cl); 441 442 template<class T> 443 inline void do_marked_object(CMBitMap* bitmap, T* cl, oop obj); 444 445 ShenandoahConcurrentThread* concurrent_thread() { return _concurrent_gc_thread; } 446 447 inline bool mark_next_no_checks(oop obj) const; 448 449 void parallel_evacuate(); 450 451 inline oop atomic_compare_exchange_oop(oop n, narrowOop* addr, oop c); 452 inline oop atomic_compare_exchange_oop(oop n, oop* addr, oop c); 453 454 void evacuate_region(ShenandoahHeapRegion* from_region, ShenandoahHeapRegion* to_region); 455 456 #ifdef ASSERT 457 void verify_evacuated_region(ShenandoahHeapRegion* from_region); 458 #endif 459 460 inline void copy_object(oop p, HeapWord* s, size_t words); 461 void verify_copy(oop p, oop c); 462 void verify_heap_size_consistency(); 463 void verify_heap_after_marking(); 464 void verify_heap_after_update_refs(); 465 void verify_regions_after_update_refs(); 466 467 void ref_processing_init(); 468 469 GCTracer* tracer(); 470 ShenandoahCollectionSet* collection_set() { return _collection_set; } 471 472 bool call_from_write_barrier(bool evacuating); 473 void grow_heap_by(size_t num_regions); 474 void ensure_new_regions(size_t num_new_regions); 475 476 void verify_evacuation(ShenandoahHeapRegion* from_region); 477 void set_concurrent_mark_in_progress(bool in_progress); 478 479 void oom_during_evacuation(); 480 481 void verify_live(); 482 void verify_liveness_after_concurrent_mark(); 483 484 HeapWord* allocate_memory_work(size_t word_size); 485 HeapWord* allocate_large_memory(size_t word_size); 486 487 #ifdef ASSERT 488 void set_from_region_protection(bool protect); 489 #endif 490 491 const char* cancel_cause_to_string(ShenandoahCancelCause cause); 492 493 }; 494 495 #endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_HPP