1 /* 2 * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_SHARED_GENCOLLECTEDHEAP_HPP 26 #define SHARE_VM_GC_SHARED_GENCOLLECTEDHEAP_HPP 27 28 #include "gc/shared/adaptiveSizePolicy.hpp" 29 #include "gc/shared/cardTableModRefBS.hpp" 30 #include "gc/shared/collectedHeap.hpp" 31 #include "gc/shared/collectorPolicy.hpp" 32 #include "gc/shared/generation.hpp" 33 34 class StrongRootsScope; 35 class SubTasksDone; 36 class WorkGang; 37 38 // A "GenCollectedHeap" is a CollectedHeap that uses generational 39 // collection. It has two generations, young and old. 40 class GenCollectedHeap : public CollectedHeap { 41 friend class GenCollectorPolicy; 42 friend class Generation; 43 friend class DefNewGeneration; 44 friend class TenuredGeneration; 45 friend class ConcurrentMarkSweepGeneration; 46 friend class CMSCollector; 47 friend class GenMarkSweep; 48 friend class VM_GenCollectForAllocation; 49 friend class VM_GenCollectFull; 50 friend class VM_GenCollectFullConcurrent; 51 friend class VM_GC_HeapInspection; 52 friend class VM_HeapDumper; 53 friend class HeapInspection; 54 friend class GCCauseSetter; 55 friend class VMStructs; 56 public: 57 friend class VM_PopulateDumpSharedSpace; 58 59 enum GenerationType { 60 YoungGen, 61 OldGen 62 }; 63 64 private: 65 Generation* _young_gen; 66 Generation* _old_gen; 67 68 // The singleton CardTable Remembered Set. 69 CardTableRS* _rem_set; 70 71 // The generational collector policy. 72 GenCollectorPolicy* _gen_policy; 73 74 // Indicates that the most recent previous incremental collection failed. 75 // The flag is cleared when an action is taken that might clear the 76 // condition that caused that incremental collection to fail. 77 bool _incremental_collection_failed; 78 79 // In support of ExplicitGCInvokesConcurrent functionality 80 unsigned int _full_collections_completed; 81 82 // Data structure for claiming the (potentially) parallel tasks in 83 // (gen-specific) roots processing. 84 SubTasksDone* _process_strong_tasks; 85 86 // Collects the given generation. 87 void collect_generation(Generation* gen, bool full, size_t size, bool is_tlab, 88 bool run_verification, bool clear_soft_refs, 89 bool restore_marks_for_biased_locking); 90 91 // In block contents verification, the number of header words to skip 92 NOT_PRODUCT(static size_t _skip_header_HeapWords;) 93 94 WorkGang* _workers; 95 96 protected: 97 // Helper functions for allocation 98 HeapWord* attempt_allocation(size_t size, 99 bool is_tlab, 100 bool first_only); 101 102 // Helper function for two callbacks below. 103 // Considers collection of the first max_level+1 generations. 104 void do_collection(bool full, 105 bool clear_all_soft_refs, 106 size_t size, 107 bool is_tlab, 108 GenerationType max_generation); 109 110 // Callback from VM_GenCollectForAllocation operation. 111 // This function does everything necessary/possible to satisfy an 112 // allocation request that failed in the youngest generation that should 113 // have handled it (including collection, expansion, etc.) 114 HeapWord* satisfy_failed_allocation(size_t size, bool is_tlab); 115 116 // Callback from VM_GenCollectFull operation. 117 // Perform a full collection of the first max_level+1 generations. 118 virtual void do_full_collection(bool clear_all_soft_refs); 119 void do_full_collection(bool clear_all_soft_refs, GenerationType max_generation); 120 121 // Does the "cause" of GC indicate that 122 // we absolutely __must__ clear soft refs? 123 bool must_clear_all_soft_refs(); 124 125 public: 126 GenCollectedHeap(GenCollectorPolicy *policy); 127 128 WorkGang* workers() const { return _workers; } 129 130 // Returns JNI_OK on success 131 virtual jint initialize(); 132 133 // Reserve aligned space for the heap as needed by the contained generations. 134 char* allocate(size_t alignment, ReservedSpace* heap_rs); 135 136 // Does operations required after initialization has been done. 137 void post_initialize(); 138 139 // Initialize ("weak") refs processing support 140 virtual void ref_processing_init(); 141 142 virtual Name kind() const { 143 return CollectedHeap::GenCollectedHeap; 144 } 145 146 virtual const char* name() const { 147 if (UseConcMarkSweepGC) { 148 return "Concurrent Mark Sweep"; 149 } else { 150 return "Serial"; 151 } 152 } 153 154 Generation* young_gen() const { return _young_gen; } 155 Generation* old_gen() const { return _old_gen; } 156 157 bool is_young_gen(const Generation* gen) const { return gen == _young_gen; } 158 bool is_old_gen(const Generation* gen) const { return gen == _old_gen; } 159 160 // The generational collector policy. 161 GenCollectorPolicy* gen_policy() const { return _gen_policy; } 162 163 virtual CollectorPolicy* collector_policy() const { return gen_policy(); } 164 165 // Adaptive size policy 166 virtual AdaptiveSizePolicy* size_policy() { 167 return gen_policy()->size_policy(); 168 } 169 170 // Return the (conservative) maximum heap alignment 171 static size_t conservative_max_heap_alignment() { 172 return Generation::GenGrain; 173 } 174 175 size_t capacity() const; 176 size_t used() const; 177 178 // Save the "used_region" for both generations. 179 void save_used_regions(); 180 181 size_t max_capacity() const; 182 183 HeapWord* mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded); 184 185 // We may support a shared contiguous allocation area, if the youngest 186 // generation does. 187 bool supports_inline_contig_alloc() const; 188 HeapWord* volatile* top_addr() const; 189 HeapWord** end_addr() const; 190 191 // Perform a full collection of the heap; intended for use in implementing 192 // "System.gc". This implies as full a collection as the CollectedHeap 193 // supports. Caller does not hold the Heap_lock on entry. 194 void collect(GCCause::Cause cause); 195 196 // The same as above but assume that the caller holds the Heap_lock. 197 void collect_locked(GCCause::Cause cause); 198 199 // Perform a full collection of generations up to and including max_generation. 200 // Mostly used for testing purposes. Caller does not hold the Heap_lock on entry. 201 void collect(GCCause::Cause cause, GenerationType max_generation); 202 203 // Returns "TRUE" iff "p" points into the committed areas of the heap. 204 // The methods is_in(), is_in_closed_subset() and is_in_youngest() may 205 // be expensive to compute in general, so, to prevent 206 // their inadvertent use in product jvm's, we restrict their use to 207 // assertion checking or verification only. 208 bool is_in(const void* p) const; 209 210 // override 211 bool is_in_closed_subset(const void* p) const { 212 if (UseConcMarkSweepGC) { 213 return is_in_reserved(p); 214 } else { 215 return is_in(p); 216 } 217 } 218 219 // Returns true if the reference is to an object in the reserved space 220 // for the young generation. 221 // Assumes the the young gen address range is less than that of the old gen. 222 bool is_in_young(oop p); 223 224 #ifdef ASSERT 225 bool is_in_partial_collection(const void* p); 226 #endif 227 228 virtual bool is_scavengable(const void* addr) { 229 return is_in_young((oop)addr); 230 } 231 232 // Iteration functions. 233 void oop_iterate_no_header(OopClosure* cl); 234 void oop_iterate(ExtendedOopClosure* cl); 235 void object_iterate(ObjectClosure* cl); 236 void safe_object_iterate(ObjectClosure* cl); 237 Space* space_containing(const void* addr) const; 238 239 // A CollectedHeap is divided into a dense sequence of "blocks"; that is, 240 // each address in the (reserved) heap is a member of exactly 241 // one block. The defining characteristic of a block is that it is 242 // possible to find its size, and thus to progress forward to the next 243 // block. (Blocks may be of different sizes.) Thus, blocks may 244 // represent Java objects, or they might be free blocks in a 245 // free-list-based heap (or subheap), as long as the two kinds are 246 // distinguishable and the size of each is determinable. 247 248 // Returns the address of the start of the "block" that contains the 249 // address "addr". We say "blocks" instead of "object" since some heaps 250 // may not pack objects densely; a chunk may either be an object or a 251 // non-object. 252 virtual HeapWord* block_start(const void* addr) const; 253 254 // Requires "addr" to be the start of a chunk, and returns its size. 255 // "addr + size" is required to be the start of a new chunk, or the end 256 // of the active area of the heap. Assumes (and verifies in non-product 257 // builds) that addr is in the allocated part of the heap and is 258 // the start of a chunk. 259 virtual size_t block_size(const HeapWord* addr) const; 260 261 // Requires "addr" to be the start of a block, and returns "TRUE" iff 262 // the block is an object. Assumes (and verifies in non-product 263 // builds) that addr is in the allocated part of the heap and is 264 // the start of a chunk. 265 virtual bool block_is_obj(const HeapWord* addr) const; 266 267 // Section on TLAB's. 268 virtual bool supports_tlab_allocation() const; 269 virtual size_t tlab_capacity(Thread* thr) const; 270 virtual size_t tlab_used(Thread* thr) const; 271 virtual size_t unsafe_max_tlab_alloc(Thread* thr) const; 272 virtual HeapWord* allocate_new_tlab(size_t size); 273 274 // The "requestor" generation is performing some garbage collection 275 // action for which it would be useful to have scratch space. The 276 // requestor promises to allocate no more than "max_alloc_words" in any 277 // older generation (via promotion say.) Any blocks of space that can 278 // be provided are returned as a list of ScratchBlocks, sorted by 279 // decreasing size. 280 ScratchBlock* gather_scratch(Generation* requestor, size_t max_alloc_words); 281 // Allow each generation to reset any scratch space that it has 282 // contributed as it needs. 283 void release_scratch(); 284 285 // Ensure parsability: override 286 virtual void ensure_parsability(bool retire_tlabs); 287 288 // Time in ms since the longest time a collector ran in 289 // in any generation. 290 virtual jlong millis_since_last_gc(); 291 292 // Total number of full collections completed. 293 unsigned int total_full_collections_completed() { 294 assert(_full_collections_completed <= _total_full_collections, 295 "Can't complete more collections than were started"); 296 return _full_collections_completed; 297 } 298 299 // Update above counter, as appropriate, at the end of a stop-world GC cycle 300 unsigned int update_full_collections_completed(); 301 // Update above counter, as appropriate, at the end of a concurrent GC cycle 302 unsigned int update_full_collections_completed(unsigned int count); 303 304 // Update "time of last gc" for all generations to "now". 305 void update_time_of_last_gc(jlong now) { 306 _young_gen->update_time_of_last_gc(now); 307 _old_gen->update_time_of_last_gc(now); 308 } 309 310 // Update the gc statistics for each generation. 311 void update_gc_stats(Generation* current_generation, bool full) { 312 _old_gen->update_gc_stats(current_generation, full); 313 } 314 315 bool no_gc_in_progress() { return !is_gc_active(); } 316 317 // Override. 318 void prepare_for_verify(); 319 320 // Override. 321 void verify(VerifyOption option); 322 323 // Override. 324 virtual void print_on(outputStream* st) const; 325 virtual void print_gc_threads_on(outputStream* st) const; 326 virtual void gc_threads_do(ThreadClosure* tc) const; 327 virtual void print_tracing_info() const; 328 virtual void print_on_error(outputStream* st) const; 329 330 void print_heap_change(size_t young_prev_used, size_t old_prev_used) const; 331 332 // The functions below are helper functions that a subclass of 333 // "CollectedHeap" can use in the implementation of its virtual 334 // functions. 335 336 class GenClosure : public StackObj { 337 public: 338 virtual void do_generation(Generation* gen) = 0; 339 }; 340 341 // Apply "cl.do_generation" to all generations in the heap 342 // If "old_to_young" determines the order. 343 void generation_iterate(GenClosure* cl, bool old_to_young); 344 345 // Return "true" if all generations have reached the 346 // maximal committed limit that they can reach, without a garbage 347 // collection. 348 virtual bool is_maximal_no_gc() const; 349 350 // This function returns the CardTableRS object that allows us to scan 351 // generations in a fully generational heap. 352 CardTableRS* rem_set() { return _rem_set; } 353 354 // Convenience function to be used in situations where the heap type can be 355 // asserted to be this type. 356 static GenCollectedHeap* heap(); 357 358 // The ScanningOption determines which of the roots 359 // the closure is applied to: 360 // "SO_None" does none; 361 enum ScanningOption { 362 SO_None = 0x0, 363 SO_AllCodeCache = 0x8, 364 SO_ScavengeCodeCache = 0x10 365 }; 366 367 private: 368 void process_roots(StrongRootsScope* scope, 369 ScanningOption so, 370 OopClosure* strong_roots, 371 OopClosure* weak_roots, 372 CLDClosure* strong_cld_closure, 373 CLDClosure* weak_cld_closure, 374 CodeBlobToOopClosure* code_roots); 375 376 void process_string_table_roots(StrongRootsScope* scope, 377 OopClosure* root_closure); 378 379 public: 380 void young_process_roots(StrongRootsScope* scope, 381 OopsInGenClosure* root_closure, 382 OopsInGenClosure* old_gen_closure, 383 CLDClosure* cld_closure); 384 385 // If "young_gen_as_roots" is false, younger generations are 386 // not scanned as roots; in this case, the caller must be arranging to 387 // scan the younger generations itself. (For example, a generation might 388 // explicitly mark reachable objects in younger generations, to avoid 389 // excess storage retention.) 390 void cms_process_roots(StrongRootsScope* scope, 391 bool young_gen_as_roots, 392 ScanningOption so, 393 bool only_strong_roots, 394 OopsInGenClosure* root_closure, 395 CLDClosure* cld_closure); 396 397 void full_process_roots(StrongRootsScope* scope, 398 bool is_adjust_phase, 399 ScanningOption so, 400 bool only_strong_roots, 401 OopsInGenClosure* root_closure, 402 CLDClosure* cld_closure); 403 404 // Apply "root_closure" to all the weak roots of the system. 405 // These include JNI weak roots, string table, 406 // and referents of reachable weak refs. 407 void gen_process_weak_roots(OopClosure* root_closure); 408 409 // Set the saved marks of generations, if that makes sense. 410 // In particular, if any generation might iterate over the oops 411 // in other generations, it should call this method. 412 void save_marks(); 413 414 // Apply "cur->do_oop" or "older->do_oop" to all the oops in objects 415 // allocated since the last call to save_marks in generations at or above 416 // "level". The "cur" closure is 417 // applied to references in the generation at "level", and the "older" 418 // closure to older generations. 419 #define GCH_SINCE_SAVE_MARKS_ITERATE_DECL(OopClosureType, nv_suffix) \ 420 void oop_since_save_marks_iterate(GenerationType start_gen, \ 421 OopClosureType* cur, \ 422 OopClosureType* older); 423 424 ALL_SINCE_SAVE_MARKS_CLOSURES(GCH_SINCE_SAVE_MARKS_ITERATE_DECL) 425 426 #undef GCH_SINCE_SAVE_MARKS_ITERATE_DECL 427 428 // Returns "true" iff no allocations have occurred since the last 429 // call to "save_marks". 430 bool no_allocs_since_save_marks(); 431 432 // Returns true if an incremental collection is likely to fail. 433 // We optionally consult the young gen, if asked to do so; 434 // otherwise we base our answer on whether the previous incremental 435 // collection attempt failed with no corrective action as of yet. 436 bool incremental_collection_will_fail(bool consult_young) { 437 // The first disjunct remembers if an incremental collection failed, even 438 // when we thought (second disjunct) that it would not. 439 return incremental_collection_failed() || 440 (consult_young && !_young_gen->collection_attempt_is_safe()); 441 } 442 443 // If a generation bails out of an incremental collection, 444 // it sets this flag. 445 bool incremental_collection_failed() const { 446 return _incremental_collection_failed; 447 } 448 void set_incremental_collection_failed() { 449 _incremental_collection_failed = true; 450 } 451 void clear_incremental_collection_failed() { 452 _incremental_collection_failed = false; 453 } 454 455 // Promotion of obj into gen failed. Try to promote obj to higher 456 // gens in ascending order; return the new location of obj if successful. 457 // Otherwise, try expand-and-allocate for obj in both the young and old 458 // generation; return the new location of obj if successful. Otherwise, return NULL. 459 oop handle_failed_promotion(Generation* old_gen, 460 oop obj, 461 size_t obj_size); 462 463 private: 464 // Accessor for memory state verification support 465 NOT_PRODUCT( 466 static size_t skip_header_HeapWords() { return _skip_header_HeapWords; } 467 ) 468 469 // Override 470 void check_for_non_bad_heap_word_value(HeapWord* addr, 471 size_t size) PRODUCT_RETURN; 472 473 // For use by mark-sweep. As implemented, mark-sweep-compact is global 474 // in an essential way: compaction is performed across generations, by 475 // iterating over spaces. 476 void prepare_for_compaction(); 477 478 // Perform a full collection of the generations up to and including max_generation. 479 // This is the low level interface used by the public versions of 480 // collect() and collect_locked(). Caller holds the Heap_lock on entry. 481 void collect_locked(GCCause::Cause cause, GenerationType max_generation); 482 483 // Returns success or failure. 484 bool create_cms_collector(); 485 486 // In support of ExplicitGCInvokesConcurrent functionality 487 bool should_do_concurrent_full_gc(GCCause::Cause cause); 488 void collect_mostly_concurrent(GCCause::Cause cause); 489 490 // Save the tops of the spaces in all generations 491 void record_gen_tops_before_GC() PRODUCT_RETURN; 492 493 protected: 494 void gc_prologue(bool full); 495 void gc_epilogue(bool full); 496 497 public: 498 void stop(); 499 void safepoint_synchronize_begin(); 500 void safepoint_synchronize_end(); 501 502 CardTableModRefBS *barrier_set() { 503 return barrier_set_cast<CardTableModRefBS>(CollectedHeap::barrier_set()); 504 } 505 }; 506 507 #endif // SHARE_VM_GC_SHARED_GENCOLLECTEDHEAP_HPP