1 /* 2 * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_SHARED_GENCOLLECTEDHEAP_HPP 26 #define SHARE_VM_GC_SHARED_GENCOLLECTEDHEAP_HPP 27 28 #include "gc/shared/adaptiveSizePolicy.hpp" 29 #include "gc/shared/collectedHeap.hpp" 30 #include "gc/shared/collectorPolicy.hpp" 31 #include "gc/shared/generation.hpp" 32 33 class StrongRootsScope; 34 class SubTasksDone; 35 class WorkGang; 36 37 // A "GenCollectedHeap" is a CollectedHeap that uses generational 38 // collection. It has two generations, young and old. 39 class GenCollectedHeap : public CollectedHeap { 40 friend class GenCollectorPolicy; 41 friend class Generation; 42 friend class DefNewGeneration; 43 friend class TenuredGeneration; 44 friend class ConcurrentMarkSweepGeneration; 45 friend class CMSCollector; 46 friend class GenMarkSweep; 47 friend class VM_GenCollectForAllocation; 48 friend class VM_GenCollectFull; 49 friend class VM_GenCollectFullConcurrent; 50 friend class VM_GC_HeapInspection; 51 friend class VM_HeapDumper; 52 friend class HeapInspection; 53 friend class GCCauseSetter; 54 friend class VMStructs; 55 public: 56 friend class VM_PopulateDumpSharedSpace; 57 58 enum GenerationType { 59 YoungGen, 60 OldGen 61 }; 62 63 private: 64 Generation* _young_gen; 65 Generation* _old_gen; 66 67 // The singleton CardTable Remembered Set. 68 CardTableRS* _rem_set; 69 70 // The generational collector policy. 71 GenCollectorPolicy* _gen_policy; 72 73 // Indicates that the most recent previous incremental collection failed. 74 // The flag is cleared when an action is taken that might clear the 75 // condition that caused that incremental collection to fail. 76 bool _incremental_collection_failed; 77 78 // In support of ExplicitGCInvokesConcurrent functionality 79 unsigned int _full_collections_completed; 80 81 // Data structure for claiming the (potentially) parallel tasks in 82 // (gen-specific) roots processing. 83 SubTasksDone* _process_strong_tasks; 84 85 // Collects the given generation. 86 void collect_generation(Generation* gen, bool full, size_t size, bool is_tlab, 87 bool run_verification, bool clear_soft_refs, 88 bool restore_marks_for_biased_locking); 89 90 // In block contents verification, the number of header words to skip 91 NOT_PRODUCT(static size_t _skip_header_HeapWords;) 92 93 WorkGang* _workers; 94 95 protected: 96 // Helper functions for allocation 97 HeapWord* attempt_allocation(size_t size, 98 bool is_tlab, 99 bool first_only); 100 101 // Helper function for two callbacks below. 102 // Considers collection of the first max_level+1 generations. 103 void do_collection(bool full, 104 bool clear_all_soft_refs, 105 size_t size, 106 bool is_tlab, 107 GenerationType max_generation); 108 109 // Callback from VM_GenCollectForAllocation operation. 110 // This function does everything necessary/possible to satisfy an 111 // allocation request that failed in the youngest generation that should 112 // have handled it (including collection, expansion, etc.) 113 HeapWord* satisfy_failed_allocation(size_t size, bool is_tlab); 114 115 // Callback from VM_GenCollectFull operation. 116 // Perform a full collection of the first max_level+1 generations. 117 virtual void do_full_collection(bool clear_all_soft_refs); 118 void do_full_collection(bool clear_all_soft_refs, GenerationType max_generation); 119 120 // Does the "cause" of GC indicate that 121 // we absolutely __must__ clear soft refs? 122 bool must_clear_all_soft_refs(); 123 124 public: 125 GenCollectedHeap(GenCollectorPolicy *policy); 126 127 WorkGang* workers() const { return _workers; } 128 129 // Returns JNI_OK on success 130 virtual jint initialize(); 131 132 // Reserve aligned space for the heap as needed by the contained generations. 133 char* allocate(size_t alignment, ReservedSpace* heap_rs); 134 135 // Does operations required after initialization has been done. 136 void post_initialize(); 137 138 // Initialize ("weak") refs processing support 139 virtual void ref_processing_init(); 140 141 virtual Name kind() const { 142 return CollectedHeap::GenCollectedHeap; 143 } 144 145 Generation* young_gen() const { return _young_gen; } 146 Generation* old_gen() const { return _old_gen; } 147 148 bool is_young_gen(const Generation* gen) const { return gen == _young_gen; } 149 bool is_old_gen(const Generation* gen) const { return gen == _old_gen; } 150 151 // The generational collector policy. 152 GenCollectorPolicy* gen_policy() const { return _gen_policy; } 153 154 virtual CollectorPolicy* collector_policy() const { return gen_policy(); } 155 156 // Adaptive size policy 157 virtual AdaptiveSizePolicy* size_policy() { 158 return gen_policy()->size_policy(); 159 } 160 161 // Return the (conservative) maximum heap alignment 162 static size_t conservative_max_heap_alignment() { 163 return Generation::GenGrain; 164 } 165 166 size_t capacity() const; 167 size_t used() const; 168 169 // Save the "used_region" for both generations. 170 void save_used_regions(); 171 172 size_t max_capacity() const; 173 174 HeapWord* mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded); 175 176 // We may support a shared contiguous allocation area, if the youngest 177 // generation does. 178 bool supports_inline_contig_alloc() const; 179 HeapWord** top_addr() const; 180 HeapWord** end_addr() const; 181 182 // Perform a full collection of the heap; intended for use in implementing 183 // "System.gc". This implies as full a collection as the CollectedHeap 184 // supports. Caller does not hold the Heap_lock on entry. 185 void collect(GCCause::Cause cause); 186 187 // The same as above but assume that the caller holds the Heap_lock. 188 void collect_locked(GCCause::Cause cause); 189 190 // Perform a full collection of generations up to and including max_generation. 191 // Mostly used for testing purposes. Caller does not hold the Heap_lock on entry. 192 void collect(GCCause::Cause cause, GenerationType max_generation); 193 194 // Returns "TRUE" iff "p" points into the committed areas of the heap. 195 // The methods is_in(), is_in_closed_subset() and is_in_youngest() may 196 // be expensive to compute in general, so, to prevent 197 // their inadvertent use in product jvm's, we restrict their use to 198 // assertion checking or verification only. 199 bool is_in(const void* p) const; 200 201 // override 202 bool is_in_closed_subset(const void* p) const { 203 if (UseConcMarkSweepGC) { 204 return is_in_reserved(p); 205 } else { 206 return is_in(p); 207 } 208 } 209 210 // Returns true if the reference is to an object in the reserved space 211 // for the young generation. 212 // Assumes the the young gen address range is less than that of the old gen. 213 bool is_in_young(oop p); 214 215 #ifdef ASSERT 216 bool is_in_partial_collection(const void* p); 217 #endif 218 219 virtual bool is_scavengable(const void* addr) { 220 return is_in_young((oop)addr); 221 } 222 223 // Iteration functions. 224 void oop_iterate_no_header(OopClosure* cl); 225 void oop_iterate(ExtendedOopClosure* cl); 226 void object_iterate(ObjectClosure* cl); 227 void safe_object_iterate(ObjectClosure* cl); 228 Space* space_containing(const void* addr) const; 229 230 // A CollectedHeap is divided into a dense sequence of "blocks"; that is, 231 // each address in the (reserved) heap is a member of exactly 232 // one block. The defining characteristic of a block is that it is 233 // possible to find its size, and thus to progress forward to the next 234 // block. (Blocks may be of different sizes.) Thus, blocks may 235 // represent Java objects, or they might be free blocks in a 236 // free-list-based heap (or subheap), as long as the two kinds are 237 // distinguishable and the size of each is determinable. 238 239 // Returns the address of the start of the "block" that contains the 240 // address "addr". We say "blocks" instead of "object" since some heaps 241 // may not pack objects densely; a chunk may either be an object or a 242 // non-object. 243 virtual HeapWord* block_start(const void* addr) const; 244 245 // Requires "addr" to be the start of a chunk, and returns its size. 246 // "addr + size" is required to be the start of a new chunk, or the end 247 // of the active area of the heap. Assumes (and verifies in non-product 248 // builds) that addr is in the allocated part of the heap and is 249 // the start of a chunk. 250 virtual size_t block_size(const HeapWord* addr) const; 251 252 // Requires "addr" to be the start of a block, and returns "TRUE" iff 253 // the block is an object. Assumes (and verifies in non-product 254 // builds) that addr is in the allocated part of the heap and is 255 // the start of a chunk. 256 virtual bool block_is_obj(const HeapWord* addr) const; 257 258 // Section on TLAB's. 259 virtual bool supports_tlab_allocation() const; 260 virtual size_t tlab_capacity(Thread* thr) const; 261 virtual size_t tlab_used(Thread* thr) const; 262 virtual size_t unsafe_max_tlab_alloc(Thread* thr) const; 263 virtual HeapWord* allocate_new_tlab(size_t size); 264 265 // Can a compiler initialize a new object without store barriers? 266 // This permission only extends from the creation of a new object 267 // via a TLAB up to the first subsequent safepoint. 268 virtual bool can_elide_tlab_store_barriers() const { 269 return true; 270 } 271 272 virtual bool card_mark_must_follow_store() const { 273 return UseConcMarkSweepGC; 274 } 275 276 // We don't need barriers for stores to objects in the 277 // young gen and, a fortiori, for initializing stores to 278 // objects therein. This applies to DefNew+Tenured and ParNew+CMS 279 // only and may need to be re-examined in case other 280 // kinds of collectors are implemented in the future. 281 virtual bool can_elide_initializing_store_barrier(oop new_obj) { 282 return is_in_young(new_obj); 283 } 284 285 // The "requestor" generation is performing some garbage collection 286 // action for which it would be useful to have scratch space. The 287 // requestor promises to allocate no more than "max_alloc_words" in any 288 // older generation (via promotion say.) Any blocks of space that can 289 // be provided are returned as a list of ScratchBlocks, sorted by 290 // decreasing size. 291 ScratchBlock* gather_scratch(Generation* requestor, size_t max_alloc_words); 292 // Allow each generation to reset any scratch space that it has 293 // contributed as it needs. 294 void release_scratch(); 295 296 // Ensure parsability: override 297 virtual void ensure_parsability(bool retire_tlabs); 298 299 // Time in ms since the longest time a collector ran in 300 // in any generation. 301 virtual jlong millis_since_last_gc(); 302 303 // Total number of full collections completed. 304 unsigned int total_full_collections_completed() { 305 assert(_full_collections_completed <= _total_full_collections, 306 "Can't complete more collections than were started"); 307 return _full_collections_completed; 308 } 309 310 // Update above counter, as appropriate, at the end of a stop-world GC cycle 311 unsigned int update_full_collections_completed(); 312 // Update above counter, as appropriate, at the end of a concurrent GC cycle 313 unsigned int update_full_collections_completed(unsigned int count); 314 315 // Update "time of last gc" for all generations to "now". 316 void update_time_of_last_gc(jlong now) { 317 _young_gen->update_time_of_last_gc(now); 318 _old_gen->update_time_of_last_gc(now); 319 } 320 321 // Update the gc statistics for each generation. 322 void update_gc_stats(Generation* current_generation, bool full) { 323 _old_gen->update_gc_stats(current_generation, full); 324 } 325 326 bool no_gc_in_progress() { return !is_gc_active(); } 327 328 // Override. 329 void prepare_for_verify(); 330 331 // Override. 332 void verify(bool silent, VerifyOption option); 333 334 // Override. 335 virtual void print_on(outputStream* st) const; 336 virtual void print_gc_threads_on(outputStream* st) const; 337 virtual void gc_threads_do(ThreadClosure* tc) const; 338 virtual void print_tracing_info() const; 339 virtual void print_on_error(outputStream* st) const; 340 341 // PrintGC, PrintGCDetails support 342 void print_heap_change(size_t prev_used) const; 343 344 // The functions below are helper functions that a subclass of 345 // "CollectedHeap" can use in the implementation of its virtual 346 // functions. 347 348 class GenClosure : public StackObj { 349 public: 350 virtual void do_generation(Generation* gen) = 0; 351 }; 352 353 // Apply "cl.do_generation" to all generations in the heap 354 // If "old_to_young" determines the order. 355 void generation_iterate(GenClosure* cl, bool old_to_young); 356 357 // Return "true" if all generations have reached the 358 // maximal committed limit that they can reach, without a garbage 359 // collection. 360 virtual bool is_maximal_no_gc() const; 361 362 // This function returns the CardTableRS object that allows us to scan 363 // generations in a fully generational heap. 364 CardTableRS* rem_set() { return _rem_set; } 365 366 // Convenience function to be used in situations where the heap type can be 367 // asserted to be this type. 368 static GenCollectedHeap* heap(); 369 370 // Invoke the "do_oop" method of one of the closures "not_older_gens" 371 // or "older_gens" on root locations for the generations depending on 372 // the type. (The "older_gens" closure is used for scanning references 373 // from older generations; "not_older_gens" is used everywhere else.) 374 // If "younger_gens_as_roots" is false, younger generations are 375 // not scanned as roots; in this case, the caller must be arranging to 376 // scan the younger generations itself. (For example, a generation might 377 // explicitly mark reachable objects in younger generations, to avoid 378 // excess storage retention.) 379 // The "so" argument determines which of the roots 380 // the closure is applied to: 381 // "SO_None" does none; 382 enum ScanningOption { 383 SO_None = 0x0, 384 SO_AllCodeCache = 0x8, 385 SO_ScavengeCodeCache = 0x10 386 }; 387 388 private: 389 void process_roots(StrongRootsScope* scope, 390 ScanningOption so, 391 OopClosure* strong_roots, 392 OopClosure* weak_roots, 393 CLDClosure* strong_cld_closure, 394 CLDClosure* weak_cld_closure, 395 CodeBlobClosure* code_roots); 396 397 public: 398 static const bool StrongAndWeakRoots = false; 399 static const bool StrongRootsOnly = true; 400 401 void gen_process_roots(StrongRootsScope* scope, 402 GenerationType type, 403 bool young_gen_as_roots, 404 ScanningOption so, 405 bool only_strong_roots, 406 OopsInGenClosure* not_older_gens, 407 OopsInGenClosure* older_gens, 408 CLDClosure* cld_closure); 409 410 // Apply "root_closure" to all the weak roots of the system. 411 // These include JNI weak roots, string table, 412 // and referents of reachable weak refs. 413 void gen_process_weak_roots(OopClosure* root_closure); 414 415 // Set the saved marks of generations, if that makes sense. 416 // In particular, if any generation might iterate over the oops 417 // in other generations, it should call this method. 418 void save_marks(); 419 420 // Apply "cur->do_oop" or "older->do_oop" to all the oops in objects 421 // allocated since the last call to save_marks in generations at or above 422 // "level". The "cur" closure is 423 // applied to references in the generation at "level", and the "older" 424 // closure to older generations. 425 #define GCH_SINCE_SAVE_MARKS_ITERATE_DECL(OopClosureType, nv_suffix) \ 426 void oop_since_save_marks_iterate(GenerationType start_gen, \ 427 OopClosureType* cur, \ 428 OopClosureType* older); 429 430 ALL_SINCE_SAVE_MARKS_CLOSURES(GCH_SINCE_SAVE_MARKS_ITERATE_DECL) 431 432 #undef GCH_SINCE_SAVE_MARKS_ITERATE_DECL 433 434 // Returns "true" iff no allocations have occurred since the last 435 // call to "save_marks". 436 bool no_allocs_since_save_marks(); 437 438 // Returns true if an incremental collection is likely to fail. 439 // We optionally consult the young gen, if asked to do so; 440 // otherwise we base our answer on whether the previous incremental 441 // collection attempt failed with no corrective action as of yet. 442 bool incremental_collection_will_fail(bool consult_young) { 443 // The first disjunct remembers if an incremental collection failed, even 444 // when we thought (second disjunct) that it would not. 445 return incremental_collection_failed() || 446 (consult_young && !_young_gen->collection_attempt_is_safe()); 447 } 448 449 // If a generation bails out of an incremental collection, 450 // it sets this flag. 451 bool incremental_collection_failed() const { 452 return _incremental_collection_failed; 453 } 454 void set_incremental_collection_failed() { 455 _incremental_collection_failed = true; 456 } 457 void clear_incremental_collection_failed() { 458 _incremental_collection_failed = false; 459 } 460 461 // Promotion of obj into gen failed. Try to promote obj to higher 462 // gens in ascending order; return the new location of obj if successful. 463 // Otherwise, try expand-and-allocate for obj in both the young and old 464 // generation; return the new location of obj if successful. Otherwise, return NULL. 465 oop handle_failed_promotion(Generation* old_gen, 466 oop obj, 467 size_t obj_size); 468 469 private: 470 // Accessor for memory state verification support 471 NOT_PRODUCT( 472 static size_t skip_header_HeapWords() { return _skip_header_HeapWords; } 473 ) 474 475 // Override 476 void check_for_non_bad_heap_word_value(HeapWord* addr, 477 size_t size) PRODUCT_RETURN; 478 479 // For use by mark-sweep. As implemented, mark-sweep-compact is global 480 // in an essential way: compaction is performed across generations, by 481 // iterating over spaces. 482 void prepare_for_compaction(); 483 484 // Perform a full collection of the generations up to and including max_generation. 485 // This is the low level interface used by the public versions of 486 // collect() and collect_locked(). Caller holds the Heap_lock on entry. 487 void collect_locked(GCCause::Cause cause, GenerationType max_generation); 488 489 // Returns success or failure. 490 bool create_cms_collector(); 491 492 // In support of ExplicitGCInvokesConcurrent functionality 493 bool should_do_concurrent_full_gc(GCCause::Cause cause); 494 void collect_mostly_concurrent(GCCause::Cause cause); 495 496 // Save the tops of the spaces in all generations 497 void record_gen_tops_before_GC() PRODUCT_RETURN; 498 499 protected: 500 void gc_prologue(bool full); 501 void gc_epilogue(bool full); 502 503 public: 504 void stop(); 505 }; 506 507 #endif // SHARE_VM_GC_SHARED_GENCOLLECTEDHEAP_HPP