1 #ifdef USE_PRAGMA_IDENT_HDR 2 #pragma ident "@(#)collectedHeap.hpp 1.58 07/09/07 10:56:50 JVM" 3 #endif 4 /* 5 * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. 6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 7 * 8 * This code is free software; you can redistribute it and/or modify it 9 * under the terms of the GNU General Public License version 2 only, as 10 * published by the Free Software Foundation. 11 * 12 * This code is distributed in the hope that it will be useful, but WITHOUT 13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 15 * version 2 for more details (a copy is included in the LICENSE file that 16 * accompanied this code). 17 * 18 * You should have received a copy of the GNU General Public License version 19 * 2 along with this work; if not, write to the Free Software Foundation, 20 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 21 * 22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, 23 * CA 95054 USA or visit www.sun.com if you need additional information or 24 * have any questions. 25 * 26 */ 27 28 // A "CollectedHeap" is an implementation of a java heap for HotSpot. This 29 // is an abstract class: there may be many different kinds of heaps. This 30 // class defines the functions that a heap must implement, and contains 31 // infrastructure common to all heaps. 32 33 class BarrierSet; 34 class ThreadClosure; 35 class AdaptiveSizePolicy; 36 class Thread; 37 38 // 39 // CollectedHeap 40 // SharedHeap 41 // GenCollectedHeap 42 // G1CollectedHeap 43 // ParallelScavengeHeap 44 // 45 class CollectedHeap : public CHeapObj { 46 friend class VMStructs; 47 friend class IsGCActiveMark; // Block structured external access to _is_gc_active 48 49 #ifdef ASSERT 50 static int _fire_out_of_memory_count; 51 #endif 52 53 // Used for filler objects (static, but initialized in ctor). 54 static size_t _filler_array_max_size; 55 56 protected: 57 MemRegion _reserved; 58 BarrierSet* _barrier_set; 59 bool _is_gc_active; 60 unsigned int _total_collections; // ... started 61 unsigned int _total_full_collections; // ... started 62 NOT_PRODUCT(volatile size_t _promotion_failure_alot_count;) 63 NOT_PRODUCT(volatile size_t _promotion_failure_alot_gc_number;) 64 65 // Reason for current garbage collection. Should be set to 66 // a value reflecting no collection between collections. 67 GCCause::Cause _gc_cause; 68 GCCause::Cause _gc_lastcause; 69 PerfStringVariable* _perf_gc_cause; 70 PerfStringVariable* _perf_gc_lastcause; 71 72 // Constructor 73 CollectedHeap(); 74 75 // Create a new tlab 76 virtual HeapWord* allocate_new_tlab(size_t size); 77 78 // Fix up tlabs to make the heap well-formed again, 79 // optionally retiring the tlabs. 80 virtual void fill_all_tlabs(bool retire); 81 82 // Accumulate statistics on all tlabs. 83 virtual void accumulate_statistics_all_tlabs(); 84 85 // Reinitialize tlabs before resuming mutators. 86 virtual void resize_all_tlabs(); 87 88 debug_only(static void check_for_valid_allocation_state();) 89 90 protected: 91 // Allocate from the current thread's TLAB, with broken-out slow path. 92 inline static HeapWord* allocate_from_tlab(Thread* thread, size_t size); 93 static HeapWord* allocate_from_tlab_slow(Thread* thread, size_t size); 94 95 // Allocate an uninitialized block of the given size, or returns NULL if 96 // this is impossible. 97 inline static HeapWord* common_mem_allocate_noinit(size_t size, bool is_noref, TRAPS); 98 99 // Like allocate_init, but the block returned by a successful allocation 100 // is guaranteed initialized to zeros. 101 inline static HeapWord* common_mem_allocate_init(size_t size, bool is_noref, TRAPS); 102 103 // Same as common_mem version, except memory is allocated in the permanent area 104 // If there is no permanent area, revert to common_mem_allocate_noinit 105 inline static HeapWord* common_permanent_mem_allocate_noinit(size_t size, TRAPS); 106 107 // Same as common_mem version, except memory is allocated in the permanent area 108 // If there is no permanent area, revert to common_mem_allocate_init 109 inline static HeapWord* common_permanent_mem_allocate_init(size_t size, TRAPS); 110 111 // Helper functions for (VM) allocation. 112 inline static void post_allocation_setup_common(KlassHandle klass, 113 HeapWord* obj, size_t size); 114 inline static void post_allocation_setup_no_klass_install(KlassHandle klass, 115 HeapWord* objPtr, 116 size_t size); 117 118 inline static void post_allocation_setup_obj(KlassHandle klass, 119 HeapWord* obj, size_t size); 120 121 inline static void post_allocation_setup_array(KlassHandle klass, 122 HeapWord* obj, size_t size, 123 int length); 124 125 // Clears an allocated object. 126 inline static void init_obj(HeapWord* obj, size_t size); 127 128 // Filler object utilities. 129 static inline size_t filler_array_hdr_size(); 130 static inline size_t filler_array_min_size(); 131 static inline size_t filler_array_max_size(); 132 133 DEBUG_ONLY(static void fill_args_check(HeapWord* start, size_t words);) 134 DEBUG_ONLY(static void zap_filler_array(HeapWord* start, size_t words);) 135 136 // Fill with a single array; caller must ensure filler_array_min_size() <= 137 // words <= filler_array_max_size(). 138 static inline void fill_with_array(HeapWord* start, size_t words); 139 140 // Fill with a single object (either an int array or a java.lang.Object). 141 static inline void fill_with_object_impl(HeapWord* start, size_t words); 142 143 // Verification functions 144 virtual void check_for_bad_heap_word_value(HeapWord* addr, size_t size) 145 PRODUCT_RETURN; 146 virtual void check_for_non_bad_heap_word_value(HeapWord* addr, size_t size) 147 PRODUCT_RETURN; 148 149 public: 150 enum Name { 151 Abstract, 152 SharedHeap, 153 GenCollectedHeap, 154 ParallelScavengeHeap, 155 G1CollectedHeap 156 }; 157 158 virtual CollectedHeap::Name kind() const { return CollectedHeap::Abstract; } 159 160 /** 161 * Returns JNI error code JNI_ENOMEM if memory could not be allocated, 162 * and JNI_OK on success. 163 */ 164 virtual jint initialize() = 0; 165 166 // In many heaps, there will be a need to perform some initialization activities 167 // after the Universe is fully formed, but before general heap allocation is allowed. 168 // This is the correct place to place such initialization methods. 169 virtual void post_initialize() = 0; 170 171 MemRegion reserved_region() const { return _reserved; } 172 address base() const { return (address)reserved_region().start(); } 173 174 // Future cleanup here. The following functions should specify bytes or 175 // heapwords as part of their signature. 176 virtual size_t capacity() const = 0; 177 virtual size_t used() const = 0; 178 179 // Return "true" if the part of the heap that allocates Java 180 // objects has reached the maximal committed limit that it can 181 // reach, without a garbage collection. 182 virtual bool is_maximal_no_gc() const = 0; 183 184 virtual size_t permanent_capacity() const = 0; 185 virtual size_t permanent_used() const = 0; 186 187 // Support for java.lang.Runtime.maxMemory(): return the maximum amount of 188 // memory that the vm could make available for storing 'normal' java objects. 189 // This is based on the reserved address space, but should not include space 190 // that the vm uses internally for bookkeeping or temporary storage (e.g., 191 // perm gen space or, in the case of the young gen, one of the survivor 192 // spaces). 193 virtual size_t max_capacity() const = 0; 194 195 // Returns "TRUE" if "p" points into the reserved area of the heap. 196 bool is_in_reserved(const void* p) const { 197 return _reserved.contains(p); 198 } 199 200 bool is_in_reserved_or_null(const void* p) const { 201 return p == NULL || is_in_reserved(p); 202 } 203 204 // Returns "TRUE" if "p" points to the head of an allocated object in the 205 // heap. Since this method can be expensive in general, we restrict its 206 // use to assertion checking only. 207 virtual bool is_in(const void* p) const = 0; 208 209 bool is_in_or_null(const void* p) const { 210 return p == NULL || is_in(p); 211 } 212 213 // Let's define some terms: a "closed" subset of a heap is one that 214 // 215 // 1) contains all currently-allocated objects, and 216 // 217 // 2) is closed under reference: no object in the closed subset 218 // references one outside the closed subset. 219 // 220 // Membership in a heap's closed subset is useful for assertions. 221 // Clearly, the entire heap is a closed subset, so the default 222 // implementation is to use "is_in_reserved". But this may not be too 223 // liberal to perform useful checking. Also, the "is_in" predicate 224 // defines a closed subset, but may be too expensive, since "is_in" 225 // verifies that its argument points to an object head. The 226 // "closed_subset" method allows a heap to define an intermediate 227 // predicate, allowing more precise checking than "is_in_reserved" at 228 // lower cost than "is_in." 229 230 // One important case is a heap composed of disjoint contiguous spaces, 231 // such as the Garbage-First collector. Such heaps have a convenient 232 // closed subset consisting of the allocated portions of those 233 // contiguous spaces. 234 235 // Return "TRUE" iff the given pointer points into the heap's defined 236 // closed subset (which defaults to the entire heap). 237 virtual bool is_in_closed_subset(const void* p) const { 238 return is_in_reserved(p); 239 } 240 241 bool is_in_closed_subset_or_null(const void* p) const { 242 return p == NULL || is_in_closed_subset(p); 243 } 244 245 // Returns "TRUE" if "p" is allocated as "permanent" data. 246 // If the heap does not use "permanent" data, returns the same 247 // value is_in_reserved() would return. 248 // NOTE: this actually returns true if "p" is in reserved space 249 // for the space not that it is actually allocated (i.e. in committed 250 // space). If you need the more conservative answer use is_permanent(). 251 virtual bool is_in_permanent(const void *p) const = 0; 252 253 // Returns "TRUE" if "p" is in the committed area of "permanent" data. 254 // If the heap does not use "permanent" data, returns the same 255 // value is_in() would return. 256 virtual bool is_permanent(const void *p) const = 0; 257 258 bool is_in_permanent_or_null(const void *p) const { 259 return p == NULL || is_in_permanent(p); 260 } 261 262 // Returns "TRUE" if "p" is a method oop in the 263 // current heap, with high probability. This predicate 264 // is not stable, in general. 265 bool is_valid_method(oop p) const; 266 267 void set_gc_cause(GCCause::Cause v) { 268 if (UsePerfData) { 269 _gc_lastcause = _gc_cause; 270 _perf_gc_lastcause->set_value(GCCause::to_string(_gc_lastcause)); 271 _perf_gc_cause->set_value(GCCause::to_string(v)); 272 } 273 _gc_cause = v; 274 } 275 GCCause::Cause gc_cause() { return _gc_cause; } 276 277 // Preload classes into the shared portion of the heap, and then dump 278 // that data to a file so that it can be loaded directly by another 279 // VM (then terminate). 280 virtual void preload_and_dump(TRAPS) { ShouldNotReachHere(); } 281 282 // General obj/array allocation facilities. 283 inline static oop obj_allocate(KlassHandle klass, int size, TRAPS); 284 inline static oop array_allocate(KlassHandle klass, int size, int length, TRAPS); 285 inline static oop large_typearray_allocate(KlassHandle klass, int size, int length, TRAPS); 286 287 // Special obj/array allocation facilities. 288 // Some heaps may want to manage "permanent" data uniquely. These default 289 // to the general routines if the heap does not support such handling. 290 inline static oop permanent_obj_allocate(KlassHandle klass, int size, TRAPS); 291 // permanent_obj_allocate_no_klass_install() does not do the installation of 292 // the klass pointer in the newly created object (as permanent_obj_allocate() 293 // above does). This allows for a delay in the installation of the klass 294 // pointer that is needed during the create of klassKlass's. The 295 // method post_allocation_install_obj_klass() is used to install the 296 // klass pointer. 297 inline static oop permanent_obj_allocate_no_klass_install(KlassHandle klass, 298 int size, 299 TRAPS); 300 inline static void post_allocation_install_obj_klass(KlassHandle klass, 301 oop obj, 302 int size); 303 inline static oop permanent_array_allocate(KlassHandle klass, int size, int length, TRAPS); 304 305 // Raw memory allocation facilities 306 // The obj and array allocate methods are covers for these methods. 307 // The permanent allocation method should default to mem_allocate if 308 // permanent memory isn't supported. 309 virtual HeapWord* mem_allocate(size_t size, 310 bool is_noref, 311 bool is_tlab, 312 bool* gc_overhead_limit_was_exceeded) = 0; 313 virtual HeapWord* permanent_mem_allocate(size_t size) = 0; 314 315 // The boundary between a "large" and "small" array of primitives, in words. 316 virtual size_t large_typearray_limit() = 0; 317 318 // Utilities for turning raw memory into filler objects. 319 // 320 // min_fill_size() is the smallest region that can be filled. 321 // fill_with_objects() can fill arbitrary-sized regions of the heap using 322 // multiple objects. fill_with_object() is for regions known to be smaller 323 // than the largest array of integers; it uses a single object to fill the 324 // region and has slightly less overhead. 325 static size_t min_fill_size() { 326 return size_t(align_object_size(oopDesc::header_size())); 327 } 328 329 static void fill_with_objects(HeapWord* start, size_t words); 330 331 static void fill_with_object(HeapWord* start, size_t words); 332 static void fill_with_object(MemRegion region) { 333 fill_with_object(region.start(), region.word_size()); 334 } 335 static void fill_with_object(HeapWord* start, HeapWord* end) { 336 fill_with_object(start, pointer_delta(end, start)); 337 } 338 339 // Some heaps may offer a contiguous region for shared non-blocking 340 // allocation, via inlined code (by exporting the address of the top and 341 // end fields defining the extent of the contiguous allocation region.) 342 343 // This function returns "true" iff the heap supports this kind of 344 // allocation. (Default is "no".) 345 virtual bool supports_inline_contig_alloc() const { 346 return false; 347 } 348 // These functions return the addresses of the fields that define the 349 // boundaries of the contiguous allocation area. (These fields should be 350 // physically near to one another.) 351 virtual HeapWord** top_addr() const { 352 guarantee(false, "inline contiguous allocation not supported"); 353 return NULL; 354 } 355 virtual HeapWord** end_addr() const { 356 guarantee(false, "inline contiguous allocation not supported"); 357 return NULL; 358 } 359 360 // Some heaps may be in an unparseable state at certain times between 361 // collections. This may be necessary for efficient implementation of 362 // certain allocation-related activities. Calling this function before 363 // attempting to parse a heap ensures that the heap is in a parsable 364 // state (provided other concurrent activity does not introduce 365 // unparsability). It is normally expected, therefore, that this 366 // method is invoked with the world stopped. 367 // NOTE: if you override this method, make sure you call 368 // super::ensure_parsability so that the non-generational 369 // part of the work gets done. See implementation of 370 // CollectedHeap::ensure_parsability and, for instance, 371 // that of GenCollectedHeap::ensure_parsability(). 372 // The argument "retire_tlabs" controls whether existing TLABs 373 // are merely filled or also retired, thus preventing further 374 // allocation from them and necessitating allocation of new TLABs. 375 virtual void ensure_parsability(bool retire_tlabs); 376 377 // Return an estimate of the maximum allocation that could be performed 378 // without triggering any collection or expansion activity. In a 379 // generational collector, for example, this is probably the largest 380 // allocation that could be supported (without expansion) in the youngest 381 // generation. It is "unsafe" because no locks are taken; the result 382 // should be treated as an approximation, not a guarantee, for use in 383 // heuristic resizing decisions. 384 virtual size_t unsafe_max_alloc() = 0; 385 386 // Section on thread-local allocation buffers (TLABs) 387 // If the heap supports thread-local allocation buffers, it should override 388 // the following methods: 389 // Returns "true" iff the heap supports thread-local allocation buffers. 390 // The default is "no". 391 virtual bool supports_tlab_allocation() const { 392 return false; 393 } 394 // The amount of space available for thread-local allocation buffers. 395 virtual size_t tlab_capacity(Thread *thr) const { 396 guarantee(false, "thread-local allocation buffers not supported"); 397 return 0; 398 } 399 // An estimate of the maximum allocation that could be performed 400 // for thread-local allocation buffers without triggering any 401 // collection or expansion activity. 402 virtual size_t unsafe_max_tlab_alloc(Thread *thr) const { 403 guarantee(false, "thread-local allocation buffers not supported"); 404 return 0; 405 } 406 // Can a compiler initialize a new object without store barriers? 407 // This permission only extends from the creation of a new object 408 // via a TLAB up to the first subsequent safepoint. 409 virtual bool can_elide_tlab_store_barriers() const = 0; 410 411 // If a compiler is eliding store barriers for TLAB-allocated objects, 412 // there is probably a corresponding slow path which can produce 413 // an object allocated anywhere. The compiler's runtime support 414 // promises to call this function on such a slow-path-allocated 415 // object before performing initializations that have elided 416 // store barriers. Returns new_obj, or maybe a safer copy thereof. 417 virtual oop new_store_barrier(oop new_obj); 418 419 // Can a compiler elide a store barrier when it writes 420 // a permanent oop into the heap? Applies when the compiler 421 // is storing x to the heap, where x->is_perm() is true. 422 virtual bool can_elide_permanent_oop_store_barriers() const = 0; 423 424 // Does this heap support heap inspection (+PrintClassHistogram?) 425 virtual bool supports_heap_inspection() const = 0; 426 427 // Perform a collection of the heap; intended for use in implementing 428 // "System.gc". This probably implies as full a collection as the 429 // "CollectedHeap" supports. 430 virtual void collect(GCCause::Cause cause) = 0; 431 432 // This interface assumes that it's being called by the 433 // vm thread. It collects the heap assuming that the 434 // heap lock is already held and that we are executing in 435 // the context of the vm thread. 436 virtual void collect_as_vm_thread(GCCause::Cause cause) = 0; 437 438 // Returns the barrier set for this heap 439 BarrierSet* barrier_set() { return _barrier_set; } 440 441 // Returns "true" iff there is a stop-world GC in progress. (I assume 442 // that it should answer "false" for the concurrent part of a concurrent 443 // collector -- dld). 444 bool is_gc_active() const { return _is_gc_active; } 445 446 // Total number of GC collections (started) 447 unsigned int total_collections() const { return _total_collections; } 448 unsigned int total_full_collections() const { return _total_full_collections;} 449 450 // Increment total number of GC collections (started) 451 // Should be protected but used by PSMarkSweep - cleanup for 1.4.2 452 void increment_total_collections(bool full = false) { 453 _total_collections++; 454 if (full) { 455 increment_total_full_collections(); 456 } 457 } 458 459 void increment_total_full_collections() { _total_full_collections++; } 460 461 // Return the AdaptiveSizePolicy for the heap. 462 virtual AdaptiveSizePolicy* size_policy() = 0; 463 464 // Iterate over all the ref-containing fields of all objects, calling 465 // "cl.do_oop" on each. This includes objects in permanent memory. 466 virtual void oop_iterate(OopClosure* cl) = 0; 467 468 // Iterate over all objects, calling "cl.do_object" on each. 469 // This includes objects in permanent memory. 470 virtual void object_iterate(ObjectClosure* cl) = 0; 471 472 // Behaves the same as oop_iterate, except only traverses 473 // interior pointers contained in permanent memory. If there 474 // is no permanent memory, does nothing. 475 virtual void permanent_oop_iterate(OopClosure* cl) = 0; 476 477 // Behaves the same as object_iterate, except only traverses 478 // object contained in permanent memory. If there is no 479 // permanent memory, does nothing. 480 virtual void permanent_object_iterate(ObjectClosure* cl) = 0; 481 482 // NOTE! There is no requirement that a collector implement these 483 // functions. 484 // 485 // A CollectedHeap is divided into a dense sequence of "blocks"; that is, 486 // each address in the (reserved) heap is a member of exactly 487 // one block. The defining characteristic of a block is that it is 488 // possible to find its size, and thus to progress forward to the next 489 // block. (Blocks may be of different sizes.) Thus, blocks may 490 // represent Java objects, or they might be free blocks in a 491 // free-list-based heap (or subheap), as long as the two kinds are 492 // distinguishable and the size of each is determinable. 493 494 // Returns the address of the start of the "block" that contains the 495 // address "addr". We say "blocks" instead of "object" since some heaps 496 // may not pack objects densely; a chunk may either be an object or a 497 // non-object. 498 virtual HeapWord* block_start(const void* addr) const = 0; 499 500 // Requires "addr" to be the start of a chunk, and returns its size. 501 // "addr + size" is required to be the start of a new chunk, or the end 502 // of the active area of the heap. 503 virtual size_t block_size(const HeapWord* addr) const = 0; 504 505 // Requires "addr" to be the start of a block, and returns "TRUE" iff 506 // the block is an object. 507 virtual bool block_is_obj(const HeapWord* addr) const = 0; 508 509 // Returns the longest time (in ms) that has elapsed since the last 510 // time that any part of the heap was examined by a garbage collection. 511 virtual jlong millis_since_last_gc() = 0; 512 513 // Perform any cleanup actions necessary before allowing a verification. 514 virtual void prepare_for_verify() = 0; 515 516 virtual void print() const = 0; 517 virtual void print_on(outputStream* st) const = 0; 518 519 // Print all GC threads (other than the VM thread) 520 // used by this heap. 521 virtual void print_gc_threads_on(outputStream* st) const = 0; 522 void print_gc_threads() { print_gc_threads_on(tty); } 523 // Iterator for all GC threads (other than VM thread) 524 virtual void gc_threads_do(ThreadClosure* tc) const = 0; 525 526 // Print any relevant tracing info that flags imply. 527 // Default implementation does nothing. 528 virtual void print_tracing_info() const = 0; 529 530 // Heap verification 531 virtual void verify(bool allow_dirty, bool silent) = 0; 532 533 // Non product verification and debugging. 534 #ifndef PRODUCT 535 // Support for PromotionFailureALot. Return true if it's time to cause a 536 // promotion failure. The no-argument version uses 537 // this->_promotion_failure_alot_count as the counter. 538 inline bool promotion_should_fail(volatile size_t* count); 539 inline bool promotion_should_fail(); 540 541 // Reset the PromotionFailureALot counters. Should be called at the end of a 542 // GC in which promotion failure ocurred. 543 inline void reset_promotion_should_fail(volatile size_t* count); 544 inline void reset_promotion_should_fail(); 545 #endif // #ifndef PRODUCT 546 547 #ifdef ASSERT 548 static int fired_fake_oom() { 549 return (CIFireOOMAt > 1 && _fire_out_of_memory_count >= CIFireOOMAt); 550 } 551 #endif 552 }; 553 554 // Class to set and reset the GC cause for a CollectedHeap. 555 556 class GCCauseSetter : StackObj { 557 CollectedHeap* _heap; 558 GCCause::Cause _previous_cause; 559 public: 560 GCCauseSetter(CollectedHeap* heap, GCCause::Cause cause) { 561 assert(SafepointSynchronize::is_at_safepoint(), 562 "This method manipulates heap state without locking"); 563 _heap = heap; 564 _previous_cause = _heap->gc_cause(); 565 _heap->set_gc_cause(cause); 566 } 567 568 ~GCCauseSetter() { 569 assert(SafepointSynchronize::is_at_safepoint(), 570 "This method manipulates heap state without locking"); 571 _heap->set_gc_cause(_previous_cause); 572 } 573 };