1 /* 2 * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_MEMORY_GENCOLLECTEDHEAP_HPP 26 #define SHARE_VM_MEMORY_GENCOLLECTEDHEAP_HPP 27 28 #include "gc_implementation/shared/adaptiveSizePolicy.hpp" 29 #include "memory/collectorPolicy.hpp" 30 #include "memory/generation.hpp" 31 #include "memory/sharedHeap.hpp" 32 33 class SubTasksDone; 34 35 // A "GenCollectedHeap" is a SharedHeap that uses generational 36 // collection. It is represented with a sequence of Generation's. 37 class GenCollectedHeap : public SharedHeap { 38 friend class GenCollectorPolicy; 39 friend class Generation; 40 friend class DefNewGeneration; 41 friend class TenuredGeneration; 42 friend class ConcurrentMarkSweepGeneration; 43 friend class CMSCollector; 44 friend class GenMarkSweep; 45 friend class VM_GenCollectForAllocation; 46 friend class VM_GenCollectFull; 47 friend class VM_GenCollectFullConcurrent; 48 friend class VM_GC_HeapInspection; 49 friend class VM_HeapDumper; 50 friend class HeapInspection; 51 friend class GCCauseSetter; 52 friend class VMStructs; 53 public: 54 enum SomeConstants { 55 max_gens = 10 56 }; 57 58 friend class VM_PopulateDumpSharedSpace; 59 60 protected: 61 // Fields: 62 static GenCollectedHeap* _gch; 63 64 private: 65 int _n_gens; 66 Generation* _gens[max_gens]; 67 GenerationSpec** _gen_specs; 68 69 // The generational collector policy. 70 GenCollectorPolicy* _gen_policy; 71 72 // Indicates that the most recent previous incremental collection failed. 73 // The flag is cleared when an action is taken that might clear the 74 // condition that caused that incremental collection to fail. 75 bool _incremental_collection_failed; 76 77 // In support of ExplicitGCInvokesConcurrent functionality 78 unsigned int _full_collections_completed; 79 80 // Data structure for claiming the (potentially) parallel tasks in 81 // (gen-specific) strong roots processing. 82 SubTasksDone* _gen_process_strong_tasks; 83 SubTasksDone* gen_process_strong_tasks() { return _gen_process_strong_tasks; } 84 85 // In block contents verification, the number of header words to skip 86 NOT_PRODUCT(static size_t _skip_header_HeapWords;) 87 88 protected: 89 // Directs each generation up to and including "collectedGen" to recompute 90 // its desired size. 91 void compute_new_generation_sizes(int collectedGen); 92 93 // Helper functions for allocation 94 HeapWord* attempt_allocation(size_t size, 95 bool is_tlab, 96 bool first_only); 97 98 // Helper function for two callbacks below. 99 // Considers collection of the first max_level+1 generations. 100 void do_collection(bool full, 101 bool clear_all_soft_refs, 102 size_t size, 103 bool is_tlab, 104 int max_level); 105 106 // Callback from VM_GenCollectForAllocation operation. 107 // This function does everything necessary/possible to satisfy an 108 // allocation request that failed in the youngest generation that should 109 // have handled it (including collection, expansion, etc.) 110 HeapWord* satisfy_failed_allocation(size_t size, bool is_tlab); 111 112 // Callback from VM_GenCollectFull operation. 113 // Perform a full collection of the first max_level+1 generations. 114 virtual void do_full_collection(bool clear_all_soft_refs); 115 void do_full_collection(bool clear_all_soft_refs, int max_level); 116 117 // Does the "cause" of GC indicate that 118 // we absolutely __must__ clear soft refs? 119 bool must_clear_all_soft_refs(); 120 121 public: 122 GenCollectedHeap(GenCollectorPolicy *policy); 123 124 GCStats* gc_stats(int level) const; 125 126 // Returns JNI_OK on success 127 virtual jint initialize(); 128 char* allocate(size_t alignment, 129 size_t* _total_reserved, int* _n_covered_regions, 130 ReservedSpace* heap_rs); 131 132 // Does operations required after initialization has been done. 133 void post_initialize(); 134 135 // Initialize ("weak") refs processing support 136 virtual void ref_processing_init(); 137 138 virtual CollectedHeap::Name kind() const { 139 return CollectedHeap::GenCollectedHeap; 140 } 141 142 // The generational collector policy. 143 GenCollectorPolicy* gen_policy() const { return _gen_policy; } 144 virtual CollectorPolicy* collector_policy() const { return (CollectorPolicy*) gen_policy(); } 145 146 // Adaptive size policy 147 virtual AdaptiveSizePolicy* size_policy() { 148 return gen_policy()->size_policy(); 149 } 150 151 size_t capacity() const; 152 size_t used() const; 153 154 // Save the "used_region" for generations level and lower. 155 void save_used_regions(int level); 156 157 size_t max_capacity() const; 158 159 HeapWord* mem_allocate(size_t size, 160 bool* gc_overhead_limit_was_exceeded); 161 162 // We may support a shared contiguous allocation area, if the youngest 163 // generation does. 164 bool supports_inline_contig_alloc() const; 165 HeapWord** top_addr() const; 166 HeapWord** end_addr() const; 167 168 // Return an estimate of the maximum allocation that could be performed 169 // without triggering any collection activity. In a generational 170 // collector, for example, this is probably the largest allocation that 171 // could be supported in the youngest generation. It is "unsafe" because 172 // no locks are taken; the result should be treated as an approximation, 173 // not a guarantee. 174 size_t unsafe_max_alloc(); 175 176 // Does this heap support heap inspection? (+PrintClassHistogram) 177 virtual bool supports_heap_inspection() const { return true; } 178 179 // Perform a full collection of the heap; intended for use in implementing 180 // "System.gc". This implies as full a collection as the CollectedHeap 181 // supports. Caller does not hold the Heap_lock on entry. 182 void collect(GCCause::Cause cause); 183 184 // The same as above but assume that the caller holds the Heap_lock. 185 void collect_locked(GCCause::Cause cause); 186 187 // Perform a full collection of the first max_level+1 generations. 188 // Mostly used for testing purposes. Caller does not hold the Heap_lock on entry. 189 void collect(GCCause::Cause cause, int max_level); 190 191 // Returns "TRUE" iff "p" points into the committed areas of the heap. 192 // The methods is_in(), is_in_closed_subset() and is_in_youngest() may 193 // be expensive to compute in general, so, to prevent 194 // their inadvertent use in product jvm's, we restrict their use to 195 // assertion checking or verification only. 196 bool is_in(const void* p) const; 197 198 // override 199 bool is_in_closed_subset(const void* p) const { 200 if (UseConcMarkSweepGC) { 201 return is_in_reserved(p); 202 } else { 203 return is_in(p); 204 } 205 } 206 207 // Returns true if the reference is to an object in the reserved space 208 // for the young generation. 209 // Assumes the the young gen address range is less than that of the old gen. 210 bool is_in_young(oop p); 211 212 #ifdef ASSERT 213 virtual bool is_in_partial_collection(const void* p); 214 #endif 215 216 virtual bool is_scavengable(const void* addr) { 217 return is_in_young((oop)addr); 218 } 219 220 // Iteration functions. 221 void oop_iterate(ExtendedOopClosure* cl); 222 void oop_iterate(MemRegion mr, ExtendedOopClosure* cl); 223 void object_iterate(ObjectClosure* cl); 224 void safe_object_iterate(ObjectClosure* cl); 225 Space* space_containing(const void* addr) const; 226 227 // A CollectedHeap is divided into a dense sequence of "blocks"; that is, 228 // each address in the (reserved) heap is a member of exactly 229 // one block. The defining characteristic of a block is that it is 230 // possible to find its size, and thus to progress forward to the next 231 // block. (Blocks may be of different sizes.) Thus, blocks may 232 // represent Java objects, or they might be free blocks in a 233 // free-list-based heap (or subheap), as long as the two kinds are 234 // distinguishable and the size of each is determinable. 235 236 // Returns the address of the start of the "block" that contains the 237 // address "addr". We say "blocks" instead of "object" since some heaps 238 // may not pack objects densely; a chunk may either be an object or a 239 // non-object. 240 virtual HeapWord* block_start(const void* addr) const; 241 242 // Requires "addr" to be the start of a chunk, and returns its size. 243 // "addr + size" is required to be the start of a new chunk, or the end 244 // of the active area of the heap. Assumes (and verifies in non-product 245 // builds) that addr is in the allocated part of the heap and is 246 // the start of a chunk. 247 virtual size_t block_size(const HeapWord* addr) const; 248 249 // Requires "addr" to be the start of a block, and returns "TRUE" iff 250 // the block is an object. Assumes (and verifies in non-product 251 // builds) that addr is in the allocated part of the heap and is 252 // the start of a chunk. 253 virtual bool block_is_obj(const HeapWord* addr) const; 254 255 // Section on TLAB's. 256 virtual bool supports_tlab_allocation() const; 257 virtual size_t tlab_capacity(Thread* thr) const; 258 virtual size_t unsafe_max_tlab_alloc(Thread* thr) const; 259 virtual HeapWord* allocate_new_tlab(size_t size); 260 261 // Can a compiler initialize a new object without store barriers? 262 // This permission only extends from the creation of a new object 263 // via a TLAB up to the first subsequent safepoint. 264 virtual bool can_elide_tlab_store_barriers() const { 265 return true; 266 } 267 268 virtual bool card_mark_must_follow_store() const { 269 return UseConcMarkSweepGC; 270 } 271 272 // We don't need barriers for stores to objects in the 273 // young gen and, a fortiori, for initializing stores to 274 // objects therein. This applies to {DefNew,ParNew}+{Tenured,CMS} 275 // only and may need to be re-examined in case other 276 // kinds of collectors are implemented in the future. 277 virtual bool can_elide_initializing_store_barrier(oop new_obj) { 278 // We wanted to assert that:- 279 // assert(UseParNewGC || UseSerialGC || UseConcMarkSweepGC, 280 // "Check can_elide_initializing_store_barrier() for this collector"); 281 // but unfortunately the flag UseSerialGC need not necessarily always 282 // be set when DefNew+Tenured are being used. 283 return is_in_young(new_obj); 284 } 285 286 // The "requestor" generation is performing some garbage collection 287 // action for which it would be useful to have scratch space. The 288 // requestor promises to allocate no more than "max_alloc_words" in any 289 // older generation (via promotion say.) Any blocks of space that can 290 // be provided are returned as a list of ScratchBlocks, sorted by 291 // decreasing size. 292 ScratchBlock* gather_scratch(Generation* requestor, size_t max_alloc_words); 293 // Allow each generation to reset any scratch space that it has 294 // contributed as it needs. 295 void release_scratch(); 296 297 // Ensure parsability: override 298 virtual void ensure_parsability(bool retire_tlabs); 299 300 // Time in ms since the longest time a collector ran in 301 // in any generation. 302 virtual jlong millis_since_last_gc(); 303 304 // Total number of full collections completed. 305 unsigned int total_full_collections_completed() { 306 assert(_full_collections_completed <= _total_full_collections, 307 "Can't complete more collections than were started"); 308 return _full_collections_completed; 309 } 310 311 // Update above counter, as appropriate, at the end of a stop-world GC cycle 312 unsigned int update_full_collections_completed(); 313 // Update above counter, as appropriate, at the end of a concurrent GC cycle 314 unsigned int update_full_collections_completed(unsigned int count); 315 316 // Update "time of last gc" for all constituent generations 317 // to "now". 318 void update_time_of_last_gc(jlong now) { 319 for (int i = 0; i < _n_gens; i++) { 320 _gens[i]->update_time_of_last_gc(now); 321 } 322 } 323 324 // Update the gc statistics for each generation. 325 // "level" is the level of the lastest collection 326 void update_gc_stats(int current_level, bool full) { 327 for (int i = 0; i < _n_gens; i++) { 328 _gens[i]->update_gc_stats(current_level, full); 329 } 330 } 331 332 // Override. 333 bool no_gc_in_progress() { return !is_gc_active(); } 334 335 // Override. 336 void prepare_for_verify(); 337 338 // Override. 339 void verify(bool silent, VerifyOption option); 340 341 // Override. 342 virtual void print_on(outputStream* st) const; 343 virtual void print_gc_threads_on(outputStream* st) const; 344 virtual void gc_threads_do(ThreadClosure* tc) const; 345 virtual void print_tracing_info() const; 346 virtual void print_on_error(outputStream* st) const; 347 348 // PrintGC, PrintGCDetails support 349 void print_heap_change(size_t prev_used) const; 350 351 // The functions below are helper functions that a subclass of 352 // "CollectedHeap" can use in the implementation of its virtual 353 // functions. 354 355 class GenClosure : public StackObj { 356 public: 357 virtual void do_generation(Generation* gen) = 0; 358 }; 359 360 // Apply "cl.do_generation" to all generations in the heap 361 // If "old_to_young" determines the order. 362 void generation_iterate(GenClosure* cl, bool old_to_young); 363 364 void space_iterate(SpaceClosure* cl); 365 366 // Return "true" if all generations have reached the 367 // maximal committed limit that they can reach, without a garbage 368 // collection. 369 virtual bool is_maximal_no_gc() const; 370 371 // Return the generation before "gen", or else NULL. 372 Generation* prev_gen(Generation* gen) const { 373 int l = gen->level(); 374 if (l == 0) return NULL; 375 else return _gens[l-1]; 376 } 377 378 // Return the generation after "gen", or else NULL. 379 Generation* next_gen(Generation* gen) const { 380 int l = gen->level() + 1; 381 if (l == _n_gens) return NULL; 382 else return _gens[l]; 383 } 384 385 Generation* get_gen(int i) const { 386 if (i >= 0 && i < _n_gens) 387 return _gens[i]; 388 else 389 return NULL; 390 } 391 392 int n_gens() const { 393 assert(_n_gens == gen_policy()->number_of_generations(), "Sanity"); 394 return _n_gens; 395 } 396 397 // Convenience function to be used in situations where the heap type can be 398 // asserted to be this type. 399 static GenCollectedHeap* heap(); 400 401 void set_par_threads(uint t); 402 403 // Invoke the "do_oop" method of one of the closures "not_older_gens" 404 // or "older_gens" on root locations for the generation at 405 // "level". (The "older_gens" closure is used for scanning references 406 // from older generations; "not_older_gens" is used everywhere else.) 407 // If "younger_gens_as_roots" is false, younger generations are 408 // not scanned as roots; in this case, the caller must be arranging to 409 // scan the younger generations itself. (For example, a generation might 410 // explicitly mark reachable objects in younger generations, to avoid 411 // excess storage retention.) 412 // The "so" argument determines which of the roots 413 // the closure is applied to: 414 // "SO_None" does none; 415 // "SO_AllClasses" applies the closure to all entries in the SystemDictionary; 416 // "SO_SystemClasses" to all the "system" classes and loaders; 417 // "SO_Strings" applies the closure to all entries in the StringTable. 418 void gen_process_strong_roots(int level, 419 bool younger_gens_as_roots, 420 // The remaining arguments are in an order 421 // consistent with SharedHeap::process_strong_roots: 422 bool activate_scope, 423 bool is_scavenging, 424 SharedHeap::ScanningOption so, 425 OopsInGenClosure* not_older_gens, 426 bool do_code_roots, 427 OopsInGenClosure* older_gens, 428 KlassClosure* klass_closure); 429 430 // Apply "blk" to all the weak roots of the system. These include 431 // JNI weak roots, the code cache, system dictionary, symbol table, 432 // string table, and referents of reachable weak refs. 433 void gen_process_weak_roots(OopClosure* root_closure, 434 CodeBlobClosure* code_roots); 435 436 // Set the saved marks of generations, if that makes sense. 437 // In particular, if any generation might iterate over the oops 438 // in other generations, it should call this method. 439 void save_marks(); 440 441 // Apply "cur->do_oop" or "older->do_oop" to all the oops in objects 442 // allocated since the last call to save_marks in generations at or above 443 // "level". The "cur" closure is 444 // applied to references in the generation at "level", and the "older" 445 // closure to older generations. 446 #define GCH_SINCE_SAVE_MARKS_ITERATE_DECL(OopClosureType, nv_suffix) \ 447 void oop_since_save_marks_iterate(int level, \ 448 OopClosureType* cur, \ 449 OopClosureType* older); 450 451 ALL_SINCE_SAVE_MARKS_CLOSURES(GCH_SINCE_SAVE_MARKS_ITERATE_DECL) 452 453 #undef GCH_SINCE_SAVE_MARKS_ITERATE_DECL 454 455 // Returns "true" iff no allocations have occurred in any generation at 456 // "level" or above since the last 457 // call to "save_marks". 458 bool no_allocs_since_save_marks(int level); 459 460 // Returns true if an incremental collection is likely to fail. 461 // We optionally consult the young gen, if asked to do so; 462 // otherwise we base our answer on whether the previous incremental 463 // collection attempt failed with no corrective action as of yet. 464 bool incremental_collection_will_fail(bool consult_young) { 465 // Assumes a 2-generation system; the first disjunct remembers if an 466 // incremental collection failed, even when we thought (second disjunct) 467 // that it would not. 468 assert(heap()->collector_policy()->is_two_generation_policy(), 469 "the following definition may not be suitable for an n(>2)-generation system"); 470 return incremental_collection_failed() || 471 (consult_young && !get_gen(0)->collection_attempt_is_safe()); 472 } 473 474 // If a generation bails out of an incremental collection, 475 // it sets this flag. 476 bool incremental_collection_failed() const { 477 return _incremental_collection_failed; 478 } 479 void set_incremental_collection_failed() { 480 _incremental_collection_failed = true; 481 } 482 void clear_incremental_collection_failed() { 483 _incremental_collection_failed = false; 484 } 485 486 // Promotion of obj into gen failed. Try to promote obj to higher 487 // gens in ascending order; return the new location of obj if successful. 488 // Otherwise, try expand-and-allocate for obj in each generation starting at 489 // gen; return the new location of obj if successful. Otherwise, return NULL. 490 oop handle_failed_promotion(Generation* gen, 491 oop obj, 492 size_t obj_size); 493 494 private: 495 // Accessor for memory state verification support 496 NOT_PRODUCT( 497 static size_t skip_header_HeapWords() { return _skip_header_HeapWords; } 498 ) 499 500 // Override 501 void check_for_non_bad_heap_word_value(HeapWord* addr, 502 size_t size) PRODUCT_RETURN; 503 504 // For use by mark-sweep. As implemented, mark-sweep-compact is global 505 // in an essential way: compaction is performed across generations, by 506 // iterating over spaces. 507 void prepare_for_compaction(); 508 509 // Perform a full collection of the first max_level+1 generations. 510 // This is the low level interface used by the public versions of 511 // collect() and collect_locked(). Caller holds the Heap_lock on entry. 512 void collect_locked(GCCause::Cause cause, int max_level); 513 514 // Returns success or failure. 515 bool create_cms_collector(); 516 517 // In support of ExplicitGCInvokesConcurrent functionality 518 bool should_do_concurrent_full_gc(GCCause::Cause cause); 519 void collect_mostly_concurrent(GCCause::Cause cause); 520 521 // Save the tops of the spaces in all generations 522 void record_gen_tops_before_GC() PRODUCT_RETURN; 523 524 protected: 525 virtual void gc_prologue(bool full); 526 virtual void gc_epilogue(bool full); 527 }; 528 529 #endif // SHARE_VM_MEMORY_GENCOLLECTEDHEAP_HPP