1 /* 2 * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_SHARED_GENCOLLECTEDHEAP_HPP 26 #define SHARE_VM_GC_SHARED_GENCOLLECTEDHEAP_HPP 27 28 #include "gc/shared/collectedHeap.hpp" 29 #include "gc/shared/collectorPolicy.hpp" 30 #include "gc/shared/generation.hpp" 31 #include "gc/shared/oopStorageParState.hpp" 32 #include "gc/shared/softRefGenPolicy.hpp" 33 34 class AdaptiveSizePolicy; 35 class GCPolicyCounters; 36 class GenerationSpec; 37 class StrongRootsScope; 38 class SubTasksDone; 39 class WorkGang; 40 41 // A "GenCollectedHeap" is a CollectedHeap that uses generational 42 // collection. It has two generations, young and old. 43 class GenCollectedHeap : public CollectedHeap { 44 friend class GenCollectorPolicy; 45 friend class Generation; 46 friend class DefNewGeneration; 47 friend class TenuredGeneration; 48 friend class ConcurrentMarkSweepGeneration; 49 friend class CMSCollector; 50 friend class GenMarkSweep; 51 friend class VM_GenCollectForAllocation; 52 friend class VM_GenCollectFull; 53 friend class VM_GenCollectFullConcurrent; 54 friend class VM_GC_HeapInspection; 55 friend class VM_HeapDumper; 56 friend class HeapInspection; 57 friend class GCCauseSetter; 58 friend class VMStructs; 59 public: 60 friend class VM_PopulateDumpSharedSpace; 61 62 enum GenerationType { 63 YoungGen, 64 OldGen 65 }; 66 67 protected: 68 Generation* _young_gen; 69 Generation* _old_gen; 70 71 private: 72 GenerationSpec* _young_gen_spec; 73 GenerationSpec* _old_gen_spec; 74 75 // The singleton CardTable Remembered Set. 76 CardTableRS* _rem_set; 77 78 // The generational collector policy. 79 GenCollectorPolicy* _gen_policy; 80 81 SoftRefGenPolicy _soft_ref_gen_policy; 82 83 // The sizing of the heap is controlled by a sizing policy. 84 AdaptiveSizePolicy* _size_policy; 85 86 GCPolicyCounters* _gc_policy_counters; 87 88 // Indicates that the most recent previous incremental collection failed. 89 // The flag is cleared when an action is taken that might clear the 90 // condition that caused that incremental collection to fail. 91 bool _incremental_collection_failed; 92 93 // In support of ExplicitGCInvokesConcurrent functionality 94 unsigned int _full_collections_completed; 95 96 // Collects the given generation. 97 void collect_generation(Generation* gen, bool full, size_t size, bool is_tlab, 98 bool run_verification, bool clear_soft_refs, 99 bool restore_marks_for_biased_locking); 100 101 // Reserve aligned space for the heap as needed by the contained generations. 102 char* allocate(size_t alignment, ReservedSpace* heap_rs); 103 104 // Initialize ("weak") refs processing support 105 void ref_processing_init(); 106 107 protected: 108 109 // The set of potentially parallel tasks in root scanning. 110 enum GCH_strong_roots_tasks { 111 GCH_PS_Universe_oops_do, 112 GCH_PS_JNIHandles_oops_do, 113 GCH_PS_ObjectSynchronizer_oops_do, 114 GCH_PS_FlatProfiler_oops_do, 115 GCH_PS_Management_oops_do, 116 GCH_PS_SystemDictionary_oops_do, 117 GCH_PS_ClassLoaderDataGraph_oops_do, 118 GCH_PS_jvmti_oops_do, 119 GCH_PS_CodeCache_oops_do, 120 GCH_PS_aot_oops_do, 121 GCH_PS_younger_gens, 122 // Leave this one last. 123 GCH_PS_NumElements 124 }; 125 126 // Data structure for claiming the (potentially) parallel tasks in 127 // (gen-specific) roots processing. 128 SubTasksDone* _process_strong_tasks; 129 130 GCMemoryManager* _young_manager; 131 GCMemoryManager* _old_manager; 132 133 // Helper functions for allocation 134 HeapWord* attempt_allocation(size_t size, 135 bool is_tlab, 136 bool first_only); 137 138 // Helper function for two callbacks below. 139 // Considers collection of the first max_level+1 generations. 140 void do_collection(bool full, 141 bool clear_all_soft_refs, 142 size_t size, 143 bool is_tlab, 144 GenerationType max_generation); 145 146 // Callback from VM_GenCollectForAllocation operation. 147 // This function does everything necessary/possible to satisfy an 148 // allocation request that failed in the youngest generation that should 149 // have handled it (including collection, expansion, etc.) 150 HeapWord* satisfy_failed_allocation(size_t size, bool is_tlab); 151 152 // Callback from VM_GenCollectFull operation. 153 // Perform a full collection of the first max_level+1 generations. 154 virtual void do_full_collection(bool clear_all_soft_refs); 155 void do_full_collection(bool clear_all_soft_refs, GenerationType max_generation); 156 157 // Does the "cause" of GC indicate that 158 // we absolutely __must__ clear soft refs? 159 bool must_clear_all_soft_refs(); 160 161 GenCollectedHeap(GenCollectorPolicy *policy, 162 Generation::Name young, 163 Generation::Name old, 164 const char* policy_counters_name); 165 166 public: 167 168 // Returns JNI_OK on success 169 virtual jint initialize(); 170 virtual CardTableRS* create_rem_set(const MemRegion& reserved_region); 171 172 void initialize_size_policy(size_t init_eden_size, 173 size_t init_promo_size, 174 size_t init_survivor_size); 175 176 // Does operations required after initialization has been done. 177 void post_initialize(); 178 179 Generation* young_gen() const { return _young_gen; } 180 Generation* old_gen() const { return _old_gen; } 181 182 bool is_young_gen(const Generation* gen) const { return gen == _young_gen; } 183 bool is_old_gen(const Generation* gen) const { return gen == _old_gen; } 184 185 GenerationSpec* young_gen_spec() const; 186 GenerationSpec* old_gen_spec() const; 187 188 // The generational collector policy. 189 GenCollectorPolicy* gen_policy() const { return _gen_policy; } 190 191 virtual CollectorPolicy* collector_policy() const { return gen_policy(); } 192 193 virtual SoftRefPolicy* soft_ref_policy() { return &_soft_ref_gen_policy; } 194 195 // Adaptive size policy 196 virtual AdaptiveSizePolicy* size_policy() { 197 return _size_policy; 198 } 199 200 // Performance Counter support 201 GCPolicyCounters* counters() { return _gc_policy_counters; } 202 203 // Return the (conservative) maximum heap alignment 204 static size_t conservative_max_heap_alignment() { 205 return Generation::GenGrain; 206 } 207 208 size_t capacity() const; 209 size_t used() const; 210 211 // Save the "used_region" for both generations. 212 void save_used_regions(); 213 214 size_t max_capacity() const; 215 216 HeapWord* mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded); 217 218 // We may support a shared contiguous allocation area, if the youngest 219 // generation does. 220 bool supports_inline_contig_alloc() const; 221 HeapWord* volatile* top_addr() const; 222 HeapWord** end_addr() const; 223 224 // Perform a full collection of the heap; intended for use in implementing 225 // "System.gc". This implies as full a collection as the CollectedHeap 226 // supports. Caller does not hold the Heap_lock on entry. 227 virtual void collect(GCCause::Cause cause); 228 229 // The same as above but assume that the caller holds the Heap_lock. 230 void collect_locked(GCCause::Cause cause); 231 232 // Perform a full collection of generations up to and including max_generation. 233 // Mostly used for testing purposes. Caller does not hold the Heap_lock on entry. 234 void collect(GCCause::Cause cause, GenerationType max_generation); 235 236 // Returns "TRUE" iff "p" points into the committed areas of the heap. 237 // The methods is_in(), is_in_closed_subset() and is_in_youngest() may 238 // be expensive to compute in general, so, to prevent 239 // their inadvertent use in product jvm's, we restrict their use to 240 // assertion checking or verification only. 241 bool is_in(const void* p) const; 242 243 // Returns true if the reference is to an object in the reserved space 244 // for the young generation. 245 // Assumes the the young gen address range is less than that of the old gen. 246 bool is_in_young(oop p); 247 248 #ifdef ASSERT 249 bool is_in_partial_collection(const void* p); 250 #endif 251 252 virtual bool is_scavengable(oop obj) { 253 return is_in_young(obj); 254 } 255 256 // Optimized nmethod scanning support routines 257 virtual void register_nmethod(nmethod* nm); 258 virtual void verify_nmethod(nmethod* nmethod); 259 260 // Iteration functions. 261 void oop_iterate_no_header(OopClosure* cl); 262 void oop_iterate(ExtendedOopClosure* cl); 263 void object_iterate(ObjectClosure* cl); 264 void safe_object_iterate(ObjectClosure* cl); 265 Space* space_containing(const void* addr) const; 266 267 // A CollectedHeap is divided into a dense sequence of "blocks"; that is, 268 // each address in the (reserved) heap is a member of exactly 269 // one block. The defining characteristic of a block is that it is 270 // possible to find its size, and thus to progress forward to the next 271 // block. (Blocks may be of different sizes.) Thus, blocks may 272 // represent Java objects, or they might be free blocks in a 273 // free-list-based heap (or subheap), as long as the two kinds are 274 // distinguishable and the size of each is determinable. 275 276 // Returns the address of the start of the "block" that contains the 277 // address "addr". We say "blocks" instead of "object" since some heaps 278 // may not pack objects densely; a chunk may either be an object or a 279 // non-object. 280 virtual HeapWord* block_start(const void* addr) const; 281 282 // Requires "addr" to be the start of a chunk, and returns its size. 283 // "addr + size" is required to be the start of a new chunk, or the end 284 // of the active area of the heap. Assumes (and verifies in non-product 285 // builds) that addr is in the allocated part of the heap and is 286 // the start of a chunk. 287 virtual size_t block_size(const HeapWord* addr) const; 288 289 // Requires "addr" to be the start of a block, and returns "TRUE" iff 290 // the block is an object. Assumes (and verifies in non-product 291 // builds) that addr is in the allocated part of the heap and is 292 // the start of a chunk. 293 virtual bool block_is_obj(const HeapWord* addr) const; 294 295 // Section on TLAB's. 296 virtual bool supports_tlab_allocation() const; 297 virtual size_t tlab_capacity(Thread* thr) const; 298 virtual size_t tlab_used(Thread* thr) const; 299 virtual size_t unsafe_max_tlab_alloc(Thread* thr) const; 300 virtual HeapWord* allocate_new_tlab(size_t min_size, 301 size_t requested_size, 302 size_t* actual_size); 303 304 // The "requestor" generation is performing some garbage collection 305 // action for which it would be useful to have scratch space. The 306 // requestor promises to allocate no more than "max_alloc_words" in any 307 // older generation (via promotion say.) Any blocks of space that can 308 // be provided are returned as a list of ScratchBlocks, sorted by 309 // decreasing size. 310 ScratchBlock* gather_scratch(Generation* requestor, size_t max_alloc_words); 311 // Allow each generation to reset any scratch space that it has 312 // contributed as it needs. 313 void release_scratch(); 314 315 // Ensure parsability: override 316 virtual void ensure_parsability(bool retire_tlabs); 317 318 // Time in ms since the longest time a collector ran in 319 // in any generation. 320 virtual jlong millis_since_last_gc(); 321 322 // Total number of full collections completed. 323 unsigned int total_full_collections_completed() { 324 assert(_full_collections_completed <= _total_full_collections, 325 "Can't complete more collections than were started"); 326 return _full_collections_completed; 327 } 328 329 // Update above counter, as appropriate, at the end of a stop-world GC cycle 330 unsigned int update_full_collections_completed(); 331 // Update above counter, as appropriate, at the end of a concurrent GC cycle 332 unsigned int update_full_collections_completed(unsigned int count); 333 334 // Update "time of last gc" for all generations to "now". 335 void update_time_of_last_gc(jlong now) { 336 _young_gen->update_time_of_last_gc(now); 337 _old_gen->update_time_of_last_gc(now); 338 } 339 340 // Update the gc statistics for each generation. 341 void update_gc_stats(Generation* current_generation, bool full) { 342 _old_gen->update_gc_stats(current_generation, full); 343 } 344 345 bool no_gc_in_progress() { return !is_gc_active(); } 346 347 // Override. 348 void prepare_for_verify(); 349 350 // Override. 351 void verify(VerifyOption option); 352 353 // Override. 354 virtual void print_on(outputStream* st) const; 355 virtual void print_gc_threads_on(outputStream* st) const; 356 virtual void gc_threads_do(ThreadClosure* tc) const; 357 virtual void print_tracing_info() const; 358 359 void print_heap_change(size_t young_prev_used, size_t old_prev_used) const; 360 361 // The functions below are helper functions that a subclass of 362 // "CollectedHeap" can use in the implementation of its virtual 363 // functions. 364 365 class GenClosure : public StackObj { 366 public: 367 virtual void do_generation(Generation* gen) = 0; 368 }; 369 370 // Apply "cl.do_generation" to all generations in the heap 371 // If "old_to_young" determines the order. 372 void generation_iterate(GenClosure* cl, bool old_to_young); 373 374 // Return "true" if all generations have reached the 375 // maximal committed limit that they can reach, without a garbage 376 // collection. 377 virtual bool is_maximal_no_gc() const; 378 379 // This function returns the CardTableRS object that allows us to scan 380 // generations in a fully generational heap. 381 CardTableRS* rem_set() { return _rem_set; } 382 383 // Convenience function to be used in situations where the heap type can be 384 // asserted to be this type. 385 static GenCollectedHeap* heap(); 386 387 // The ScanningOption determines which of the roots 388 // the closure is applied to: 389 // "SO_None" does none; 390 enum ScanningOption { 391 SO_None = 0x0, 392 SO_AllCodeCache = 0x8, 393 SO_ScavengeCodeCache = 0x10 394 }; 395 396 protected: 397 void process_roots(StrongRootsScope* scope, 398 ScanningOption so, 399 OopClosure* strong_roots, 400 CLDClosure* strong_cld_closure, 401 CLDClosure* weak_cld_closure, 402 CodeBlobToOopClosure* code_roots); 403 404 void process_string_table_roots(StrongRootsScope* scope, 405 OopClosure* root_closure, 406 OopStorage::ParState<false, false>* par_state_string); 407 408 // Accessor for memory state verification support 409 NOT_PRODUCT( 410 virtual size_t skip_header_HeapWords() { return 0; } 411 ) 412 413 virtual void gc_prologue(bool full); 414 virtual void gc_epilogue(bool full); 415 416 public: 417 void young_process_roots(StrongRootsScope* scope, 418 OopsInGenClosure* root_closure, 419 OopsInGenClosure* old_gen_closure, 420 CLDClosure* cld_closure, 421 OopStorage::ParState<false, false>* par_state_string = NULL); 422 423 void full_process_roots(StrongRootsScope* scope, 424 bool is_adjust_phase, 425 ScanningOption so, 426 bool only_strong_roots, 427 OopsInGenClosure* root_closure, 428 CLDClosure* cld_closure, 429 OopStorage::ParState<false, false>* par_state_string = NULL); 430 431 // Apply "root_closure" to all the weak roots of the system. 432 // These include JNI weak roots, string table, 433 // and referents of reachable weak refs. 434 void gen_process_weak_roots(OopClosure* root_closure); 435 436 // Set the saved marks of generations, if that makes sense. 437 // In particular, if any generation might iterate over the oops 438 // in other generations, it should call this method. 439 void save_marks(); 440 441 // Returns "true" iff no allocations have occurred since the last 442 // call to "save_marks". 443 bool no_allocs_since_save_marks(); 444 445 // Returns true if an incremental collection is likely to fail. 446 // We optionally consult the young gen, if asked to do so; 447 // otherwise we base our answer on whether the previous incremental 448 // collection attempt failed with no corrective action as of yet. 449 bool incremental_collection_will_fail(bool consult_young) { 450 // The first disjunct remembers if an incremental collection failed, even 451 // when we thought (second disjunct) that it would not. 452 return incremental_collection_failed() || 453 (consult_young && !_young_gen->collection_attempt_is_safe()); 454 } 455 456 // If a generation bails out of an incremental collection, 457 // it sets this flag. 458 bool incremental_collection_failed() const { 459 return _incremental_collection_failed; 460 } 461 void set_incremental_collection_failed() { 462 _incremental_collection_failed = true; 463 } 464 void clear_incremental_collection_failed() { 465 _incremental_collection_failed = false; 466 } 467 468 // Promotion of obj into gen failed. Try to promote obj to higher 469 // gens in ascending order; return the new location of obj if successful. 470 // Otherwise, try expand-and-allocate for obj in both the young and old 471 // generation; return the new location of obj if successful. Otherwise, return NULL. 472 oop handle_failed_promotion(Generation* old_gen, 473 oop obj, 474 size_t obj_size); 475 476 477 private: 478 // Return true if an allocation should be attempted in the older generation 479 // if it fails in the younger generation. Return false, otherwise. 480 bool should_try_older_generation_allocation(size_t word_size) const; 481 482 // Try to allocate space by expanding the heap. 483 HeapWord* expand_heap_and_allocate(size_t size, bool is_tlab); 484 485 HeapWord* mem_allocate_work(size_t size, 486 bool is_tlab, 487 bool* gc_overhead_limit_was_exceeded); 488 489 // Override 490 void check_for_non_bad_heap_word_value(HeapWord* addr, 491 size_t size) PRODUCT_RETURN; 492 493 #if INCLUDE_SERIALGC 494 // For use by mark-sweep. As implemented, mark-sweep-compact is global 495 // in an essential way: compaction is performed across generations, by 496 // iterating over spaces. 497 void prepare_for_compaction(); 498 #endif 499 500 // Perform a full collection of the generations up to and including max_generation. 501 // This is the low level interface used by the public versions of 502 // collect() and collect_locked(). Caller holds the Heap_lock on entry. 503 void collect_locked(GCCause::Cause cause, GenerationType max_generation); 504 505 // Save the tops of the spaces in all generations 506 void record_gen_tops_before_GC() PRODUCT_RETURN; 507 }; 508 509 #endif // SHARE_VM_GC_SHARED_GENCOLLECTEDHEAP_HPP