144 145 virtual void check_gen_kinds() = 0; 146 147 public: 148 149 // Returns JNI_OK on success 150 virtual jint initialize(); 151 152 // Does operations required after initialization has been done. 153 void post_initialize(); 154 155 Generation* young_gen() const { return _young_gen; } 156 Generation* old_gen() const { return _old_gen; } 157 158 bool is_young_gen(const Generation* gen) const { return gen == _young_gen; } 159 bool is_old_gen(const Generation* gen) const { return gen == _old_gen; } 160 161 // The generational collector policy. 162 GenCollectorPolicy* gen_policy() const { return _gen_policy; } 163 164 CollectorPolicy* collector_policy() const { return gen_policy(); } 165 166 // Adaptive size policy 167 AdaptiveSizePolicy* size_policy() { 168 return gen_policy()->size_policy(); 169 } 170 171 // Return the (conservative) maximum heap alignment 172 static size_t conservative_max_heap_alignment() { 173 return Generation::GenGrain; 174 } 175 176 size_t capacity() const; 177 size_t used() const; 178 179 // Save the "used_region" for both generations. 180 void save_used_regions(); 181 182 size_t max_capacity() const; 183 184 HeapWord* mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded); 185 186 // We may support a shared contiguous allocation area, if the youngest 187 // generation does. 200 // Perform a full collection of generations up to and including max_generation. 201 // Mostly used for testing purposes. Caller does not hold the Heap_lock on entry. 202 void collect(GCCause::Cause cause, GenerationType max_generation); 203 204 // Returns "TRUE" iff "p" points into the committed areas of the heap. 205 // The methods is_in(), is_in_closed_subset() and is_in_youngest() may 206 // be expensive to compute in general, so, to prevent 207 // their inadvertent use in product jvm's, we restrict their use to 208 // assertion checking or verification only. 209 bool is_in(const void* p) const; 210 211 // Returns true if the reference is to an object in the reserved space 212 // for the young generation. 213 // Assumes the the young gen address range is less than that of the old gen. 214 bool is_in_young(oop p); 215 216 #ifdef ASSERT 217 bool is_in_partial_collection(const void* p); 218 #endif 219 220 bool is_scavengable(oop obj) { 221 return is_in_young(obj); 222 } 223 224 // Optimized nmethod scanning support routines 225 virtual void register_nmethod(nmethod* nm); 226 virtual void verify_nmethod(nmethod* nmethod); 227 228 // Iteration functions. 229 void oop_iterate_no_header(OopClosure* cl); 230 void oop_iterate(ExtendedOopClosure* cl); 231 void object_iterate(ObjectClosure* cl); 232 void safe_object_iterate(ObjectClosure* cl); 233 Space* space_containing(const void* addr) const; 234 235 // A CollectedHeap is divided into a dense sequence of "blocks"; that is, 236 // each address in the (reserved) heap is a member of exactly 237 // one block. The defining characteristic of a block is that it is 238 // possible to find its size, and thus to progress forward to the next 239 // block. (Blocks may be of different sizes.) Thus, blocks may 240 // represent Java objects, or they might be free blocks in a 241 // free-list-based heap (or subheap), as long as the two kinds are 242 // distinguishable and the size of each is determinable. 243 244 // Returns the address of the start of the "block" that contains the 245 // address "addr". We say "blocks" instead of "object" since some heaps 246 // may not pack objects densely; a chunk may either be an object or a 247 // non-object. 248 HeapWord* block_start(const void* addr) const; 249 250 // Requires "addr" to be the start of a chunk, and returns its size. 251 // "addr + size" is required to be the start of a new chunk, or the end 252 // of the active area of the heap. Assumes (and verifies in non-product 253 // builds) that addr is in the allocated part of the heap and is 254 // the start of a chunk. 255 size_t block_size(const HeapWord* addr) const; 256 257 // Requires "addr" to be the start of a block, and returns "TRUE" iff 258 // the block is an object. Assumes (and verifies in non-product 259 // builds) that addr is in the allocated part of the heap and is 260 // the start of a chunk. 261 bool block_is_obj(const HeapWord* addr) const; 262 263 // Section on TLAB's. 264 bool supports_tlab_allocation() const; 265 size_t tlab_capacity(Thread* thr) const; 266 size_t tlab_used(Thread* thr) const; 267 size_t unsafe_max_tlab_alloc(Thread* thr) const; 268 HeapWord* allocate_new_tlab(size_t size); 269 270 // Can a compiler initialize a new object without store barriers? 271 // This permission only extends from the creation of a new object 272 // via a TLAB up to the first subsequent safepoint. 273 bool can_elide_tlab_store_barriers() const { 274 return true; 275 } 276 277 // We don't need barriers for stores to objects in the 278 // young gen and, a fortiori, for initializing stores to 279 // objects therein. This applies to DefNew+Tenured and ParNew+CMS 280 // only and may need to be re-examined in case other 281 // kinds of collectors are implemented in the future. 282 bool can_elide_initializing_store_barrier(oop new_obj) { 283 return is_in_young(new_obj); 284 } 285 286 // The "requestor" generation is performing some garbage collection 287 // action for which it would be useful to have scratch space. The 288 // requestor promises to allocate no more than "max_alloc_words" in any 289 // older generation (via promotion say.) Any blocks of space that can 290 // be provided are returned as a list of ScratchBlocks, sorted by 291 // decreasing size. 292 ScratchBlock* gather_scratch(Generation* requestor, size_t max_alloc_words); 293 // Allow each generation to reset any scratch space that it has 294 // contributed as it needs. 295 void release_scratch(); 296 297 // Ensure parsability: override 298 void ensure_parsability(bool retire_tlabs); 299 300 // Time in ms since the longest time a collector ran in 301 // in any generation. 302 jlong millis_since_last_gc(); 303 304 // Total number of full collections completed. 305 unsigned int total_full_collections_completed() { 306 assert(_full_collections_completed <= _total_full_collections, 307 "Can't complete more collections than were started"); 308 return _full_collections_completed; 309 } 310 311 // Update above counter, as appropriate, at the end of a stop-world GC cycle 312 unsigned int update_full_collections_completed(); 313 // Update above counter, as appropriate, at the end of a concurrent GC cycle 314 unsigned int update_full_collections_completed(unsigned int count); 315 316 // Update "time of last gc" for all generations to "now". 317 void update_time_of_last_gc(jlong now) { 318 _young_gen->update_time_of_last_gc(now); 319 _old_gen->update_time_of_last_gc(now); 320 } 321 322 // Update the gc statistics for each generation. 323 void update_gc_stats(Generation* current_generation, bool full) { 324 _old_gen->update_gc_stats(current_generation, full); 325 } 326 327 bool no_gc_in_progress() { return !is_gc_active(); } 328 329 // Override. 330 void prepare_for_verify(); 331 332 // Override. 333 void verify(VerifyOption option); 334 335 // Override. 336 void print_on(outputStream* st) const; 337 virtual void print_gc_threads_on(outputStream* st) const; 338 virtual void gc_threads_do(ThreadClosure* tc) const; 339 void print_tracing_info() const; 340 341 void print_heap_change(size_t young_prev_used, size_t old_prev_used) const; 342 343 // The functions below are helper functions that a subclass of 344 // "CollectedHeap" can use in the implementation of its virtual 345 // functions. 346 347 class GenClosure : public StackObj { 348 public: 349 virtual void do_generation(Generation* gen) = 0; 350 }; 351 352 // Apply "cl.do_generation" to all generations in the heap 353 // If "old_to_young" determines the order. 354 void generation_iterate(GenClosure* cl, bool old_to_young); 355 356 // Return "true" if all generations have reached the 357 // maximal committed limit that they can reach, without a garbage 358 // collection. 359 bool is_maximal_no_gc() const; 360 361 // This function returns the CardTableRS object that allows us to scan 362 // generations in a fully generational heap. 363 CardTableRS* rem_set() { return _rem_set; } 364 365 // Convenience function to be used in situations where the heap type can be 366 // asserted to be this type. 367 static GenCollectedHeap* heap(); 368 369 // The ScanningOption determines which of the roots 370 // the closure is applied to: 371 // "SO_None" does none; 372 enum ScanningOption { 373 SO_None = 0x0, 374 SO_AllCodeCache = 0x8, 375 SO_ScavengeCodeCache = 0x10 376 }; 377 378 protected: 379 void process_roots(StrongRootsScope* scope, | 144 145 virtual void check_gen_kinds() = 0; 146 147 public: 148 149 // Returns JNI_OK on success 150 virtual jint initialize(); 151 152 // Does operations required after initialization has been done. 153 void post_initialize(); 154 155 Generation* young_gen() const { return _young_gen; } 156 Generation* old_gen() const { return _old_gen; } 157 158 bool is_young_gen(const Generation* gen) const { return gen == _young_gen; } 159 bool is_old_gen(const Generation* gen) const { return gen == _old_gen; } 160 161 // The generational collector policy. 162 GenCollectorPolicy* gen_policy() const { return _gen_policy; } 163 164 virtual CollectorPolicy* collector_policy() const { return gen_policy(); } 165 166 // Adaptive size policy 167 virtual AdaptiveSizePolicy* size_policy() { 168 return gen_policy()->size_policy(); 169 } 170 171 // Return the (conservative) maximum heap alignment 172 static size_t conservative_max_heap_alignment() { 173 return Generation::GenGrain; 174 } 175 176 size_t capacity() const; 177 size_t used() const; 178 179 // Save the "used_region" for both generations. 180 void save_used_regions(); 181 182 size_t max_capacity() const; 183 184 HeapWord* mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded); 185 186 // We may support a shared contiguous allocation area, if the youngest 187 // generation does. 200 // Perform a full collection of generations up to and including max_generation. 201 // Mostly used for testing purposes. Caller does not hold the Heap_lock on entry. 202 void collect(GCCause::Cause cause, GenerationType max_generation); 203 204 // Returns "TRUE" iff "p" points into the committed areas of the heap. 205 // The methods is_in(), is_in_closed_subset() and is_in_youngest() may 206 // be expensive to compute in general, so, to prevent 207 // their inadvertent use in product jvm's, we restrict their use to 208 // assertion checking or verification only. 209 bool is_in(const void* p) const; 210 211 // Returns true if the reference is to an object in the reserved space 212 // for the young generation. 213 // Assumes the the young gen address range is less than that of the old gen. 214 bool is_in_young(oop p); 215 216 #ifdef ASSERT 217 bool is_in_partial_collection(const void* p); 218 #endif 219 220 virtual bool is_scavengable(oop obj) { 221 return is_in_young(obj); 222 } 223 224 // Optimized nmethod scanning support routines 225 virtual void register_nmethod(nmethod* nm); 226 virtual void verify_nmethod(nmethod* nmethod); 227 228 // Iteration functions. 229 void oop_iterate_no_header(OopClosure* cl); 230 void oop_iterate(ExtendedOopClosure* cl); 231 void object_iterate(ObjectClosure* cl); 232 void safe_object_iterate(ObjectClosure* cl); 233 Space* space_containing(const void* addr) const; 234 235 // A CollectedHeap is divided into a dense sequence of "blocks"; that is, 236 // each address in the (reserved) heap is a member of exactly 237 // one block. The defining characteristic of a block is that it is 238 // possible to find its size, and thus to progress forward to the next 239 // block. (Blocks may be of different sizes.) Thus, blocks may 240 // represent Java objects, or they might be free blocks in a 241 // free-list-based heap (or subheap), as long as the two kinds are 242 // distinguishable and the size of each is determinable. 243 244 // Returns the address of the start of the "block" that contains the 245 // address "addr". We say "blocks" instead of "object" since some heaps 246 // may not pack objects densely; a chunk may either be an object or a 247 // non-object. 248 virtual HeapWord* block_start(const void* addr) const; 249 250 // Requires "addr" to be the start of a chunk, and returns its size. 251 // "addr + size" is required to be the start of a new chunk, or the end 252 // of the active area of the heap. Assumes (and verifies in non-product 253 // builds) that addr is in the allocated part of the heap and is 254 // the start of a chunk. 255 virtual size_t block_size(const HeapWord* addr) const; 256 257 // Requires "addr" to be the start of a block, and returns "TRUE" iff 258 // the block is an object. Assumes (and verifies in non-product 259 // builds) that addr is in the allocated part of the heap and is 260 // the start of a chunk. 261 virtual bool block_is_obj(const HeapWord* addr) const; 262 263 // Section on TLAB's. 264 virtual bool supports_tlab_allocation() const; 265 virtual size_t tlab_capacity(Thread* thr) const; 266 virtual size_t tlab_used(Thread* thr) const; 267 virtual size_t unsafe_max_tlab_alloc(Thread* thr) const; 268 virtual HeapWord* allocate_new_tlab(size_t size); 269 270 // Can a compiler initialize a new object without store barriers? 271 // This permission only extends from the creation of a new object 272 // via a TLAB up to the first subsequent safepoint. 273 virtual bool can_elide_tlab_store_barriers() const { 274 return true; 275 } 276 277 // We don't need barriers for stores to objects in the 278 // young gen and, a fortiori, for initializing stores to 279 // objects therein. This applies to DefNew+Tenured and ParNew+CMS 280 // only and may need to be re-examined in case other 281 // kinds of collectors are implemented in the future. 282 virtual bool can_elide_initializing_store_barrier(oop new_obj) { 283 return is_in_young(new_obj); 284 } 285 286 // The "requestor" generation is performing some garbage collection 287 // action for which it would be useful to have scratch space. The 288 // requestor promises to allocate no more than "max_alloc_words" in any 289 // older generation (via promotion say.) Any blocks of space that can 290 // be provided are returned as a list of ScratchBlocks, sorted by 291 // decreasing size. 292 ScratchBlock* gather_scratch(Generation* requestor, size_t max_alloc_words); 293 // Allow each generation to reset any scratch space that it has 294 // contributed as it needs. 295 void release_scratch(); 296 297 // Ensure parsability: override 298 virtual void ensure_parsability(bool retire_tlabs); 299 300 // Time in ms since the longest time a collector ran in 301 // in any generation. 302 virtual jlong millis_since_last_gc(); 303 304 // Total number of full collections completed. 305 unsigned int total_full_collections_completed() { 306 assert(_full_collections_completed <= _total_full_collections, 307 "Can't complete more collections than were started"); 308 return _full_collections_completed; 309 } 310 311 // Update above counter, as appropriate, at the end of a stop-world GC cycle 312 unsigned int update_full_collections_completed(); 313 // Update above counter, as appropriate, at the end of a concurrent GC cycle 314 unsigned int update_full_collections_completed(unsigned int count); 315 316 // Update "time of last gc" for all generations to "now". 317 void update_time_of_last_gc(jlong now) { 318 _young_gen->update_time_of_last_gc(now); 319 _old_gen->update_time_of_last_gc(now); 320 } 321 322 // Update the gc statistics for each generation. 323 void update_gc_stats(Generation* current_generation, bool full) { 324 _old_gen->update_gc_stats(current_generation, full); 325 } 326 327 bool no_gc_in_progress() { return !is_gc_active(); } 328 329 // Override. 330 void prepare_for_verify(); 331 332 // Override. 333 void verify(VerifyOption option); 334 335 // Override. 336 virtual void print_on(outputStream* st) const; 337 virtual void print_gc_threads_on(outputStream* st) const; 338 virtual void gc_threads_do(ThreadClosure* tc) const; 339 virtual void print_tracing_info() const; 340 341 void print_heap_change(size_t young_prev_used, size_t old_prev_used) const; 342 343 // The functions below are helper functions that a subclass of 344 // "CollectedHeap" can use in the implementation of its virtual 345 // functions. 346 347 class GenClosure : public StackObj { 348 public: 349 virtual void do_generation(Generation* gen) = 0; 350 }; 351 352 // Apply "cl.do_generation" to all generations in the heap 353 // If "old_to_young" determines the order. 354 void generation_iterate(GenClosure* cl, bool old_to_young); 355 356 // Return "true" if all generations have reached the 357 // maximal committed limit that they can reach, without a garbage 358 // collection. 359 virtual bool is_maximal_no_gc() const; 360 361 // This function returns the CardTableRS object that allows us to scan 362 // generations in a fully generational heap. 363 CardTableRS* rem_set() { return _rem_set; } 364 365 // Convenience function to be used in situations where the heap type can be 366 // asserted to be this type. 367 static GenCollectedHeap* heap(); 368 369 // The ScanningOption determines which of the roots 370 // the closure is applied to: 371 // "SO_None" does none; 372 enum ScanningOption { 373 SO_None = 0x0, 374 SO_AllCodeCache = 0x8, 375 SO_ScavengeCodeCache = 0x10 376 }; 377 378 protected: 379 void process_roots(StrongRootsScope* scope, |