1 /* 2 * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_MEMORY_GENCOLLECTEDHEAP_HPP 26 #define SHARE_VM_MEMORY_GENCOLLECTEDHEAP_HPP 27 28 #include "gc_implementation/shared/adaptiveSizePolicy.hpp" 29 #include "memory/collectorPolicy.hpp" 30 #include "memory/generation.hpp" 31 #include "memory/sharedHeap.hpp" 32 33 class SubTasksDone; 34 35 // A "GenCollectedHeap" is a SharedHeap that uses generational 36 // collection. It has two generations, young and old. 37 class GenCollectedHeap : public SharedHeap { 38 friend class GenCollectorPolicy; 39 friend class Generation; 40 friend class DefNewGeneration; 41 friend class TenuredGeneration; 42 friend class ConcurrentMarkSweepGeneration; 43 friend class CMSCollector; 44 friend class GenMarkSweep; 45 friend class VM_GenCollectForAllocation; 46 friend class VM_GenCollectFull; 47 friend class VM_GenCollectFullConcurrent; 48 friend class VM_GC_HeapInspection; 49 friend class VM_HeapDumper; 50 friend class HeapInspection; 51 friend class GCCauseSetter; 52 friend class VMStructs; 53 public: 54 friend class VM_PopulateDumpSharedSpace; 55 56 protected: 57 // Fields: 58 static GenCollectedHeap* _gch; 59 60 private: 61 Generation* _young_gen; 62 Generation* _old_gen; 63 64 // The generational collector policy. 65 GenCollectorPolicy* _gen_policy; 66 67 // Indicates that the most recent previous incremental collection failed. 68 // The flag is cleared when an action is taken that might clear the 69 // condition that caused that incremental collection to fail. 70 bool _incremental_collection_failed; 71 72 // In support of ExplicitGCInvokesConcurrent functionality 73 unsigned int _full_collections_completed; 74 75 // Data structure for claiming the (potentially) parallel tasks in 76 // (gen-specific) roots processing. 77 SubTasksDone* _gen_process_roots_tasks; 78 SubTasksDone* gen_process_roots_tasks() { return _gen_process_roots_tasks; } 79 80 void collect_generation(Generation* gen, bool full, size_t size, bool is_tlab, 81 bool run_verification, bool clear_soft_refs); 82 83 // In block contents verification, the number of header words to skip 84 NOT_PRODUCT(static size_t _skip_header_HeapWords;) 85 86 protected: 87 // Helper functions for allocation 88 HeapWord* attempt_allocation(size_t size, 89 bool is_tlab, 90 bool first_only); 91 92 // Helper function for two callbacks below. 93 // Considers collection of the first max_level+1 generations. 94 void do_collection(bool full, 95 bool clear_all_soft_refs, 96 size_t size, 97 bool is_tlab, 98 Generation::Type max_generation); 99 100 // Callback from VM_GenCollectForAllocation operation. 101 // This function does everything necessary/possible to satisfy an 102 // allocation request that failed in the youngest generation that should 103 // have handled it (including collection, expansion, etc.) 104 HeapWord* satisfy_failed_allocation(size_t size, bool is_tlab); 105 106 // Callback from VM_GenCollectFull operation. 107 // Perform a full collection of the first max_level+1 generations. 108 virtual void do_full_collection(bool clear_all_soft_refs); 109 void do_full_collection(bool clear_all_soft_refs, Generation::Type max_gen); 110 111 // Does the "cause" of GC indicate that 112 // we absolutely __must__ clear soft refs? 113 bool must_clear_all_soft_refs(); 114 115 public: 116 GenCollectedHeap(GenCollectorPolicy *policy); 117 118 GCStats* gc_stats(Generation* gen) const; 119 120 // Returns JNI_OK on success 121 virtual jint initialize(); 122 123 char* allocate(size_t alignment, 124 size_t* _total_reserved, int* _n_covered_regions, 125 ReservedSpace* heap_rs); 126 127 // Does operations required after initialization has been done. 128 void post_initialize(); 129 130 // Initialize ("weak") refs processing support 131 virtual void ref_processing_init(); 132 133 virtual CollectedHeap::Name kind() const { 134 return CollectedHeap::GenCollectedHeap; 135 } 136 137 Generation* young_gen() const { return _young_gen; } 138 Generation* old_gen() const { return _old_gen; } 139 140 // The generational collector policy. 141 GenCollectorPolicy* gen_policy() const { return _gen_policy; } 142 143 virtual CollectorPolicy* collector_policy() const { return (CollectorPolicy*) gen_policy(); } 144 145 // Adaptive size policy 146 virtual AdaptiveSizePolicy* size_policy() { 147 return gen_policy()->size_policy(); 148 } 149 150 // Return the (conservative) maximum heap alignment 151 static size_t conservative_max_heap_alignment() { 152 return Generation::GenGrain; 153 } 154 155 size_t capacity() const; 156 size_t used() const; 157 158 // Save the "used_region" for both generations. 159 void save_used_regions(); 160 161 size_t max_capacity() const; 162 163 HeapWord* mem_allocate(size_t size, 164 bool* gc_overhead_limit_was_exceeded); 165 166 // We may support a shared contiguous allocation area, if the youngest 167 // generation does. 168 bool supports_inline_contig_alloc() const; 169 HeapWord** top_addr() const; 170 HeapWord** end_addr() const; 171 172 // Does this heap support heap inspection? (+PrintClassHistogram) 173 virtual bool supports_heap_inspection() const { return true; } 174 175 // Perform a full collection of the heap; intended for use in implementing 176 // "System.gc". This implies as full a collection as the CollectedHeap 177 // supports. Caller does not hold the Heap_lock on entry. 178 void collect(GCCause::Cause cause); 179 180 // The same as above but assume that the caller holds the Heap_lock. 181 void collect_locked(GCCause::Cause cause); 182 183 // Perform a full collection of generations up to and including max_gen. 184 // Mostly used for testing purposes. Caller does not hold the Heap_lock on entry. 185 void collect(GCCause::Cause cause, Generation::Type max_gen); 186 187 // Returns "TRUE" iff "p" points into the committed areas of the heap. 188 // The methods is_in(), is_in_closed_subset() and is_in_youngest() may 189 // be expensive to compute in general, so, to prevent 190 // their inadvertent use in product jvm's, we restrict their use to 191 // assertion checking or verification only. 192 bool is_in(const void* p) const; 193 194 // override 195 bool is_in_closed_subset(const void* p) const { 196 if (UseConcMarkSweepGC) { 197 return is_in_reserved(p); 198 } else { 199 return is_in(p); 200 } 201 } 202 203 // Returns true if the reference is to an object in the reserved space 204 // for the young generation. 205 // Assumes the the young gen address range is less than that of the old gen. 206 bool is_in_young(oop p); 207 208 #ifdef ASSERT 209 virtual bool is_in_partial_collection(const void* p); 210 #endif 211 212 virtual bool is_scavengable(const void* addr) { 213 return is_in_young((oop)addr); 214 } 215 216 // Iteration functions. 217 void oop_iterate(ExtendedOopClosure* cl); 218 void object_iterate(ObjectClosure* cl); 219 void safe_object_iterate(ObjectClosure* cl); 220 Space* space_containing(const void* addr) const; 221 222 // A CollectedHeap is divided into a dense sequence of "blocks"; that is, 223 // each address in the (reserved) heap is a member of exactly 224 // one block. The defining characteristic of a block is that it is 225 // possible to find its size, and thus to progress forward to the next 226 // block. (Blocks may be of different sizes.) Thus, blocks may 227 // represent Java objects, or they might be free blocks in a 228 // free-list-based heap (or subheap), as long as the two kinds are 229 // distinguishable and the size of each is determinable. 230 231 // Returns the address of the start of the "block" that contains the 232 // address "addr". We say "blocks" instead of "object" since some heaps 233 // may not pack objects densely; a chunk may either be an object or a 234 // non-object. 235 virtual HeapWord* block_start(const void* addr) const; 236 237 // Requires "addr" to be the start of a chunk, and returns its size. 238 // "addr + size" is required to be the start of a new chunk, or the end 239 // of the active area of the heap. Assumes (and verifies in non-product 240 // builds) that addr is in the allocated part of the heap and is 241 // the start of a chunk. 242 virtual size_t block_size(const HeapWord* addr) const; 243 244 // Requires "addr" to be the start of a block, and returns "TRUE" iff 245 // the block is an object. Assumes (and verifies in non-product 246 // builds) that addr is in the allocated part of the heap and is 247 // the start of a chunk. 248 virtual bool block_is_obj(const HeapWord* addr) const; 249 250 // Section on TLAB's. 251 virtual bool supports_tlab_allocation() const; 252 virtual size_t tlab_capacity(Thread* thr) const; 253 virtual size_t tlab_used(Thread* thr) const; 254 virtual size_t unsafe_max_tlab_alloc(Thread* thr) const; 255 virtual HeapWord* allocate_new_tlab(size_t size); 256 257 // Can a compiler initialize a new object without store barriers? 258 // This permission only extends from the creation of a new object 259 // via a TLAB up to the first subsequent safepoint. 260 virtual bool can_elide_tlab_store_barriers() const { 261 return true; 262 } 263 264 virtual bool card_mark_must_follow_store() const { 265 return UseConcMarkSweepGC; 266 } 267 268 // We don't need barriers for stores to objects in the 269 // young gen and, a fortiori, for initializing stores to 270 // objects therein. This applies to {DefNew,ParNew}+{Tenured,CMS} 271 // only and may need to be re-examined in case other 272 // kinds of collectors are implemented in the future. 273 virtual bool can_elide_initializing_store_barrier(oop new_obj) { 274 // We wanted to assert that:- 275 // assert(UseParNewGC || UseSerialGC || UseConcMarkSweepGC, 276 // "Check can_elide_initializing_store_barrier() for this collector"); 277 // but unfortunately the flag UseSerialGC need not necessarily always 278 // be set when DefNew+Tenured are being used. 279 return is_in_young(new_obj); 280 } 281 282 // The "requestor" generation is performing some garbage collection 283 // action for which it would be useful to have scratch space. The 284 // requestor promises to allocate no more than "max_alloc_words" in any 285 // older generation (via promotion say.) Any blocks of space that can 286 // be provided are returned as a list of ScratchBlocks, sorted by 287 // decreasing size. 288 ScratchBlock* gather_scratch(Generation* requestor, size_t max_alloc_words); 289 // Allow each generation to reset any scratch space that it has 290 // contributed as it needs. 291 void release_scratch(); 292 293 // Ensure parsability: override 294 virtual void ensure_parsability(bool retire_tlabs); 295 296 // Time in ms since the longest time a collector ran in 297 // in any generation. 298 virtual jlong millis_since_last_gc(); 299 300 // Total number of full collections completed. 301 unsigned int total_full_collections_completed() { 302 assert(_full_collections_completed <= _total_full_collections, 303 "Can't complete more collections than were started"); 304 return _full_collections_completed; 305 } 306 307 // Update above counter, as appropriate, at the end of a stop-world GC cycle 308 unsigned int update_full_collections_completed(); 309 // Update above counter, as appropriate, at the end of a concurrent GC cycle 310 unsigned int update_full_collections_completed(unsigned int count); 311 312 // Update "time of last gc" for all generations to "now". 313 void update_time_of_last_gc(jlong now) { 314 _young_gen->update_time_of_last_gc(now); 315 _old_gen->update_time_of_last_gc(now); 316 } 317 318 // Update the gc statistics for each generation. 319 void update_gc_stats(Generation* current_generation, bool full) { 320 _old_gen->update_gc_stats(current_generation, full); 321 } 322 323 // Override. 324 bool no_gc_in_progress() { return !is_gc_active(); } 325 326 // Override. 327 void prepare_for_verify(); 328 329 // Override. 330 void verify(bool silent, VerifyOption option); 331 332 // Override. 333 virtual void print_on(outputStream* st) const; 334 virtual void print_gc_threads_on(outputStream* st) const; 335 virtual void gc_threads_do(ThreadClosure* tc) const; 336 virtual void print_tracing_info() const; 337 virtual void print_on_error(outputStream* st) const; 338 339 // PrintGC, PrintGCDetails support 340 void print_heap_change(size_t prev_used) const; 341 342 // The functions below are helper functions that a subclass of 343 // "CollectedHeap" can use in the implementation of its virtual 344 // functions. 345 346 class GenClosure : public StackObj { 347 public: 348 virtual void do_generation(Generation* gen) = 0; 349 }; 350 351 // Apply "cl.do_generation" to all generations in the heap 352 // If "old_to_young" determines the order. 353 void generation_iterate(GenClosure* cl, bool old_to_young); 354 355 void space_iterate(SpaceClosure* cl); 356 357 // Return "true" if all generations have reached the 358 // maximal committed limit that they can reach, without a garbage 359 // collection. 360 virtual bool is_maximal_no_gc() const; 361 362 // Convenience function to be used in situations where the heap type can be 363 // asserted to be this type. 364 static GenCollectedHeap* heap(); 365 366 void set_par_threads(uint t); 367 368 // Invoke the "do_oop" method of one of the closures "not_older_gens" 369 // or "older_gens" on root locations for the generations depending on 370 // the type. (The "older_gens" closure is used for scanning references 371 // from older generations; "not_older_gens" is used everywhere else.) 372 // If "younger_gens_as_roots" is false, younger generations are 373 // not scanned as roots; in this case, the caller must be arranging to 374 // scan the younger generations itself. (For example, a generation might 375 // explicitly mark reachable objects in younger generations, to avoid 376 // excess storage retention.) 377 // The "so" argument determines which of the roots 378 // the closure is applied to: 379 // "SO_None" does none; 380 private: 381 void gen_process_roots(Generation::Type type, 382 bool younger_gens_as_roots, 383 bool activate_scope, 384 SharedHeap::ScanningOption so, 385 OopsInGenClosure* not_older_gens, 386 OopsInGenClosure* weak_roots, 387 OopsInGenClosure* older_gens, 388 CLDClosure* cld_closure, 389 CLDClosure* weak_cld_closure, 390 CodeBlobClosure* code_closure); 391 392 public: 393 static const bool StrongAndWeakRoots = false; 394 static const bool StrongRootsOnly = true; 395 396 void gen_process_roots(Generation::Type type, 397 bool younger_gens_as_roots, 398 bool activate_scope, 399 SharedHeap::ScanningOption so, 400 bool only_strong_roots, 401 OopsInGenClosure* not_older_gens, 402 OopsInGenClosure* older_gens, 403 CLDClosure* cld_closure); 404 405 // Apply "root_closure" to all the weak roots of the system. 406 // These include JNI weak roots, string table, 407 // and referents of reachable weak refs. 408 void gen_process_weak_roots(OopClosure* root_closure); 409 410 // Set the saved marks of generations, if that makes sense. 411 // In particular, if any generation might iterate over the oops 412 // in other generations, it should call this method. 413 void save_marks(); 414 415 // Apply "cur->do_oop" or "older->do_oop" to all the oops in objects 416 // allocated since the last call to save_marks in generations at or above 417 // "level". The "cur" closure is 418 // applied to references in the generation at "level", and the "older" 419 // closure to older generations. 420 #define GCH_SINCE_SAVE_MARKS_ITERATE_DECL(OopClosureType, nv_suffix) \ 421 void oop_since_save_marks_iterate(Generation::Type start_gen, \ 422 OopClosureType* cur, \ 423 OopClosureType* older); 424 425 ALL_SINCE_SAVE_MARKS_CLOSURES(GCH_SINCE_SAVE_MARKS_ITERATE_DECL) 426 427 #undef GCH_SINCE_SAVE_MARKS_ITERATE_DECL 428 429 // Returns "true" iff no allocations have occurred since the last 430 // call to "save_marks". 431 bool no_allocs_since_save_marks(bool include_young); 432 433 // Returns true if an incremental collection is likely to fail. 434 // We optionally consult the young gen, if asked to do so; 435 // otherwise we base our answer on whether the previous incremental 436 // collection attempt failed with no corrective action as of yet. 437 bool incremental_collection_will_fail(bool consult_young) { 438 // The first disjunct remembers if an incremental collection failed, even 439 // when we thought (second disjunct) that it would not. 440 return incremental_collection_failed() || 441 (consult_young && !_young_gen->collection_attempt_is_safe()); 442 } 443 444 // If a generation bails out of an incremental collection, 445 // it sets this flag. 446 bool incremental_collection_failed() const { 447 return _incremental_collection_failed; 448 } 449 void set_incremental_collection_failed() { 450 _incremental_collection_failed = true; 451 } 452 void clear_incremental_collection_failed() { 453 _incremental_collection_failed = false; 454 } 455 456 // Promotion of obj into gen failed. Try to promote obj to higher 457 // gens in ascending order; return the new location of obj if successful. 458 // Otherwise, try expand-and-allocate for obj in both the young and old 459 // generation; return the new location of obj if successful. Otherwise, return NULL. 460 oop handle_failed_promotion(Generation* old_gen, 461 oop obj, 462 size_t obj_size); 463 464 private: 465 // Accessor for memory state verification support 466 NOT_PRODUCT( 467 static size_t skip_header_HeapWords() { return _skip_header_HeapWords; } 468 ) 469 470 // Override 471 void check_for_non_bad_heap_word_value(HeapWord* addr, 472 size_t size) PRODUCT_RETURN; 473 474 // For use by mark-sweep. As implemented, mark-sweep-compact is global 475 // in an essential way: compaction is performed across generations, by 476 // iterating over spaces. 477 void prepare_for_compaction(); 478 479 // Perform a full collection of the generations up to and including max_gen. 480 // This is the low level interface used by the public versions of 481 // collect() and collect_locked(). Caller holds the Heap_lock on entry. 482 void collect_locked(GCCause::Cause cause, Generation::Type max_gen); 483 484 // Returns success or failure. 485 bool create_cms_collector(); 486 487 // In support of ExplicitGCInvokesConcurrent functionality 488 bool should_do_concurrent_full_gc(GCCause::Cause cause); 489 void collect_mostly_concurrent(GCCause::Cause cause); 490 491 // Save the tops of the spaces in all generations 492 void record_gen_tops_before_GC() PRODUCT_RETURN; 493 494 protected: 495 virtual void gc_prologue(bool full); 496 virtual void gc_epilogue(bool full); 497 }; 498 499 #endif // SHARE_VM_MEMORY_GENCOLLECTEDHEAP_HPP