1 #ifdef USE_PRAGMA_IDENT_HDR
2 #pragma ident "@(#)generation.hpp 1.195 07/05/17 15:55:02 JVM"
3 #endif
4 /*
5 * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved.
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7 *
8 * This code is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License version 2 only, as
10 * published by the Free Software Foundation.
11 *
12 * This code is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 * version 2 for more details (a copy is included in the LICENSE file that
16 * accompanied this code).
17 *
18 * You should have received a copy of the GNU General Public License version
19 * 2 along with this work; if not, write to the Free Software Foundation,
20 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
21 *
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
24 * have any questions.
25 *
281 virtual HeapWord** end_addr() const { return NULL; }
282
283 // Thread-local allocation buffers
284 virtual bool supports_tlab_allocation() const { return false; }
285 virtual size_t tlab_capacity() const {
286 guarantee(false, "Generation doesn't support thread local allocation buffers");
287 return 0;
288 }
289 virtual size_t unsafe_max_tlab_alloc() const {
290 guarantee(false, "Generation doesn't support thread local allocation buffers");
291 return 0;
292 }
293
294 // "obj" is the address of an object in a younger generation. Allocate space
295 // for "obj" in the current (or some higher) generation, and copy "obj" into
296 // the newly allocated space, if possible, returning the result (or NULL if
297 // the allocation failed).
298 //
299 // The "obj_size" argument is just obj->size(), passed along so the caller can
300 // avoid repeating the virtual call to retrieve it.
301 //
302 // The "ref" argument, if non-NULL, is the address of some reference to "obj"
303 // (that is "*ref == obj"); some generations may use this information to, for
304 // example, influence placement decisions.
305 //
306 // The default implementation ignores "ref" and calls allocate().
307 virtual oop promote(oop obj, size_t obj_size, oop* ref);
308
309 // Thread "thread_num" (0 <= i < ParalleGCThreads) wants to promote
310 // object "obj", whose original mark word was "m", and whose size is
311 // "word_sz". If possible, allocate space for "obj", copy obj into it
312 // (taking care to copy "m" into the mark word when done, since the mark
313 // word of "obj" may have been overwritten with a forwarding pointer, and
314 // also taking care to copy the klass pointer *last*. Returns the new
315 // object if successful, or else NULL.
316 virtual oop par_promote(int thread_num,
317 oop obj, markOop m, size_t word_sz);
318
319 // Undo, if possible, the most recent par_promote_alloc allocation by
320 // "thread_num" ("obj", of "word_sz").
321 virtual void par_promote_alloc_undo(int thread_num,
322 HeapWord* obj, size_t word_sz);
323
324 // Informs the current generation that all par_promote_alloc's in the
325 // collection have been completed; any supporting data structures can be
326 // reset. Default is to do nothing.
327 virtual void par_promote_alloc_done(int thread_num) {}
368 size_t word_size,
369 bool is_tlab) = 0;
370
371 // Perform a heap collection, attempting to create (at least) enough
372 // space to support an allocation of the given "word_size". If
373 // successful, perform the allocation and return the resulting
374 // "oop" (initializing the allocated block). If the allocation is
375 // still unsuccessful, return "NULL".
376 virtual HeapWord* expand_and_allocate(size_t word_size,
377 bool is_tlab,
378 bool parallel = false) = 0;
379
380 // Some generations may require some cleanup or preparation actions before
381 // allowing a collection. The default is to do nothing.
382 virtual void gc_prologue(bool full) {};
383
384 // Some generations may require some cleanup actions after a collection.
385 // The default is to do nothing.
386 virtual void gc_epilogue(bool full) {};
387
388 // Some generations may need to be "fixed-up" after some allocation
389 // activity to make them parsable again. The default is to do nothing.
390 virtual void ensure_parsability() {};
391
392 // Time (in ms) when we were last collected or now if a collection is
393 // in progress.
394 virtual jlong time_of_last_gc(jlong now) {
395 // XXX See note in genCollectedHeap::millis_since_last_gc()
396 NOT_PRODUCT(
397 if (now < _time_of_last_gc) {
398 warning("time warp: %d to %d", _time_of_last_gc, now);
399 }
400 )
401 return _time_of_last_gc;
402 }
403
404 virtual void update_time_of_last_gc(jlong now) {
405 _time_of_last_gc = now;
406 }
407
468 #define Generation_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \
469 virtual void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \
470 oop_since_save_marks_iterate_v((OopsInGenClosure*)cl); \
471 }
472 SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(Generation_SINCE_SAVE_MARKS_DECL)
473
474 #undef Generation_SINCE_SAVE_MARKS_DECL
475
476 // The "requestor" generation is performing some garbage collection
477 // action for which it would be useful to have scratch space. If
478 // the target is not the requestor, no gc actions will be required
479 // of the target. The requestor promises to allocate no more than
480 // "max_alloc_words" in the target generation (via promotion say,
481 // if the requestor is a young generation and the target is older).
482 // If the target generation can provide any scratch space, it adds
483 // it to "list", leaving "list" pointing to the head of the
484 // augmented list. The default is to offer no space.
485 virtual void contribute_scratch(ScratchBlock*& list, Generation* requestor,
486 size_t max_alloc_words) {}
487
488 // When an older generation has been collected, and perhaps resized,
489 // this method will be invoked on all younger generations (from older to
490 // younger), allowing them to resize themselves as appropriate.
491 virtual void compute_new_size() = 0;
492
493 // Printing
494 virtual const char* name() const = 0;
495 virtual const char* short_name() const = 0;
496
497 int level() const { return _level; }
498
499 // Attributes
500
501 // True iff the given generation may only be the youngest generation.
502 virtual bool must_be_youngest() const = 0;
503 // True iff the given generation may only be the oldest generation.
504 virtual bool must_be_oldest() const = 0;
505
506 // Reference Processing accessor
507 ReferenceProcessor* const ref_processor() { return _ref_processor; }
591 // Class CardGeneration is a generation that is covered by a card table,
592 // and uses a card-size block-offset array to implement block_start.
593
594 // class BlockOffsetArray;
595 // class BlockOffsetArrayContigSpace;
596 class BlockOffsetSharedArray;
597
598 class CardGeneration: public Generation {
599 friend class VMStructs;
600 protected:
601 // This is shared with other generations.
602 GenRemSet* _rs;
603 // This is local to this generation.
604 BlockOffsetSharedArray* _bts;
605
606 CardGeneration(ReservedSpace rs, size_t initial_byte_size, int level,
607 GenRemSet* remset);
608
609 public:
610
611 virtual void clear_remembered_set();
612
613 virtual void invalidate_remembered_set();
614
615 virtual void prepare_for_verify();
616 };
617
618 // OneContigSpaceCardGeneration models a heap of old objects contained in a single
619 // contiguous space.
620 //
621 // Garbage collection is performed using mark-compact.
622
623 class OneContigSpaceCardGeneration: public CardGeneration {
624 friend class VMStructs;
625 // Abstractly, this is a subtype that gets access to protected fields.
626 friend class CompactingPermGen;
627 friend class VM_PopulateDumpSharedSpace;
628
629 protected:
630 size_t _min_heap_delta_bytes; // Minimum amount to expand.
631 ContiguousSpace* _the_space; // actual space holding objects
632 WaterMark _last_gc; // watermark between objects allocated before
633 // and after last GC.
634
635 // Grow generation with specified size (returns false if unable to grow)
636 bool grow_by(size_t bytes);
637 // Grow generation to reserved size.
638 bool grow_to_reserved();
639 // Shrink generation with specified size (returns false if unable to shrink)
640 void shrink_by(size_t bytes);
641
642 // Allocation failure
643 void expand(size_t bytes, size_t expand_bytes);
644 void shrink(size_t bytes);
645
646 // Accessing spaces
647 ContiguousSpace* the_space() const { return _the_space; }
648
649 public:
650 OneContigSpaceCardGeneration(ReservedSpace rs, size_t initial_byte_size,
651 size_t min_heap_delta_bytes,
652 int level, GenRemSet* remset,
653 ContiguousSpace* space) :
654 CardGeneration(rs, initial_byte_size, level, remset),
655 _the_space(space), _min_heap_delta_bytes(min_heap_delta_bytes)
656 {}
657
658 inline bool is_in(const void* p) const;
659
660 // Space enquiries
661 size_t capacity() const;
662 size_t used() const;
663 size_t free() const;
691 void save_marks();
692 void reset_saved_marks();
693 bool no_allocs_since_save_marks();
694
695 inline size_t block_size(const HeapWord* addr) const;
696
697 inline bool block_is_obj(const HeapWord* addr) const;
698
699 virtual void collect(bool full,
700 bool clear_all_soft_refs,
701 size_t size,
702 bool is_tlab);
703 HeapWord* expand_and_allocate(size_t size,
704 bool is_tlab,
705 bool parallel = false);
706
707 virtual void prepare_for_verify();
708
709 virtual void gc_epilogue(bool full);
710
711 virtual void verify(bool allow_dirty);
712 virtual void print_on(outputStream* st) const;
713 };
|
1 #ifdef USE_PRAGMA_IDENT_HDR
2 #pragma ident "@(#)generation.hpp 1.195 07/05/17 15:55:02 JVM"
3 #endif
4 /*
5 * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved.
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7 *
8 * This code is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License version 2 only, as
10 * published by the Free Software Foundation.
11 *
12 * This code is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 * version 2 for more details (a copy is included in the LICENSE file that
16 * accompanied this code).
17 *
18 * You should have received a copy of the GNU General Public License version
19 * 2 along with this work; if not, write to the Free Software Foundation,
20 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
21 *
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
24 * have any questions.
25 *
281 virtual HeapWord** end_addr() const { return NULL; }
282
283 // Thread-local allocation buffers
284 virtual bool supports_tlab_allocation() const { return false; }
285 virtual size_t tlab_capacity() const {
286 guarantee(false, "Generation doesn't support thread local allocation buffers");
287 return 0;
288 }
289 virtual size_t unsafe_max_tlab_alloc() const {
290 guarantee(false, "Generation doesn't support thread local allocation buffers");
291 return 0;
292 }
293
294 // "obj" is the address of an object in a younger generation. Allocate space
295 // for "obj" in the current (or some higher) generation, and copy "obj" into
296 // the newly allocated space, if possible, returning the result (or NULL if
297 // the allocation failed).
298 //
299 // The "obj_size" argument is just obj->size(), passed along so the caller can
300 // avoid repeating the virtual call to retrieve it.
301 virtual oop promote(oop obj, size_t obj_size);
302
303 // Thread "thread_num" (0 <= i < ParalleGCThreads) wants to promote
304 // object "obj", whose original mark word was "m", and whose size is
305 // "word_sz". If possible, allocate space for "obj", copy obj into it
306 // (taking care to copy "m" into the mark word when done, since the mark
307 // word of "obj" may have been overwritten with a forwarding pointer, and
308 // also taking care to copy the klass pointer *last*. Returns the new
309 // object if successful, or else NULL.
310 virtual oop par_promote(int thread_num,
311 oop obj, markOop m, size_t word_sz);
312
313 // Undo, if possible, the most recent par_promote_alloc allocation by
314 // "thread_num" ("obj", of "word_sz").
315 virtual void par_promote_alloc_undo(int thread_num,
316 HeapWord* obj, size_t word_sz);
317
318 // Informs the current generation that all par_promote_alloc's in the
319 // collection have been completed; any supporting data structures can be
320 // reset. Default is to do nothing.
321 virtual void par_promote_alloc_done(int thread_num) {}
362 size_t word_size,
363 bool is_tlab) = 0;
364
365 // Perform a heap collection, attempting to create (at least) enough
366 // space to support an allocation of the given "word_size". If
367 // successful, perform the allocation and return the resulting
368 // "oop" (initializing the allocated block). If the allocation is
369 // still unsuccessful, return "NULL".
370 virtual HeapWord* expand_and_allocate(size_t word_size,
371 bool is_tlab,
372 bool parallel = false) = 0;
373
374 // Some generations may require some cleanup or preparation actions before
375 // allowing a collection. The default is to do nothing.
376 virtual void gc_prologue(bool full) {};
377
378 // Some generations may require some cleanup actions after a collection.
379 // The default is to do nothing.
380 virtual void gc_epilogue(bool full) {};
381
382 // Save the high water marks for the used space in a generation.
383 virtual void record_spaces_top() {};
384
385 // Some generations may need to be "fixed-up" after some allocation
386 // activity to make them parsable again. The default is to do nothing.
387 virtual void ensure_parsability() {};
388
389 // Time (in ms) when we were last collected or now if a collection is
390 // in progress.
391 virtual jlong time_of_last_gc(jlong now) {
392 // XXX See note in genCollectedHeap::millis_since_last_gc()
393 NOT_PRODUCT(
394 if (now < _time_of_last_gc) {
395 warning("time warp: %d to %d", _time_of_last_gc, now);
396 }
397 )
398 return _time_of_last_gc;
399 }
400
401 virtual void update_time_of_last_gc(jlong now) {
402 _time_of_last_gc = now;
403 }
404
465 #define Generation_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \
466 virtual void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \
467 oop_since_save_marks_iterate_v((OopsInGenClosure*)cl); \
468 }
469 SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(Generation_SINCE_SAVE_MARKS_DECL)
470
471 #undef Generation_SINCE_SAVE_MARKS_DECL
472
473 // The "requestor" generation is performing some garbage collection
474 // action for which it would be useful to have scratch space. If
475 // the target is not the requestor, no gc actions will be required
476 // of the target. The requestor promises to allocate no more than
477 // "max_alloc_words" in the target generation (via promotion say,
478 // if the requestor is a young generation and the target is older).
479 // If the target generation can provide any scratch space, it adds
480 // it to "list", leaving "list" pointing to the head of the
481 // augmented list. The default is to offer no space.
482 virtual void contribute_scratch(ScratchBlock*& list, Generation* requestor,
483 size_t max_alloc_words) {}
484
485 // Give each generation an opportunity to do clean up for any
486 // contributed scratch.
487 virtual void reset_scratch() {};
488
489 // When an older generation has been collected, and perhaps resized,
490 // this method will be invoked on all younger generations (from older to
491 // younger), allowing them to resize themselves as appropriate.
492 virtual void compute_new_size() = 0;
493
494 // Printing
495 virtual const char* name() const = 0;
496 virtual const char* short_name() const = 0;
497
498 int level() const { return _level; }
499
500 // Attributes
501
502 // True iff the given generation may only be the youngest generation.
503 virtual bool must_be_youngest() const = 0;
504 // True iff the given generation may only be the oldest generation.
505 virtual bool must_be_oldest() const = 0;
506
507 // Reference Processing accessor
508 ReferenceProcessor* const ref_processor() { return _ref_processor; }
592 // Class CardGeneration is a generation that is covered by a card table,
593 // and uses a card-size block-offset array to implement block_start.
594
595 // class BlockOffsetArray;
596 // class BlockOffsetArrayContigSpace;
597 class BlockOffsetSharedArray;
598
599 class CardGeneration: public Generation {
600 friend class VMStructs;
601 protected:
602 // This is shared with other generations.
603 GenRemSet* _rs;
604 // This is local to this generation.
605 BlockOffsetSharedArray* _bts;
606
607 CardGeneration(ReservedSpace rs, size_t initial_byte_size, int level,
608 GenRemSet* remset);
609
610 public:
611
612 // Attempt to expand the generation by "bytes". Expand by at a
613 // minimum "expand_bytes". Return true if some amount (not
614 // necessarily the full "bytes") was done.
615 virtual bool expand(size_t bytes, size_t expand_bytes);
616
617 virtual void clear_remembered_set();
618
619 virtual void invalidate_remembered_set();
620
621 virtual void prepare_for_verify();
622
623 // Grow generation with specified size (returns false if unable to grow)
624 virtual bool grow_by(size_t bytes) = 0;
625 // Grow generation to reserved size.
626 virtual bool grow_to_reserved() = 0;
627 };
628
629 // OneContigSpaceCardGeneration models a heap of old objects contained in a single
630 // contiguous space.
631 //
632 // Garbage collection is performed using mark-compact.
633
634 class OneContigSpaceCardGeneration: public CardGeneration {
635 friend class VMStructs;
636 // Abstractly, this is a subtype that gets access to protected fields.
637 friend class CompactingPermGen;
638 friend class VM_PopulateDumpSharedSpace;
639
640 protected:
641 size_t _min_heap_delta_bytes; // Minimum amount to expand.
642 ContiguousSpace* _the_space; // actual space holding objects
643 WaterMark _last_gc; // watermark between objects allocated before
644 // and after last GC.
645
646 // Grow generation with specified size (returns false if unable to grow)
647 virtual bool grow_by(size_t bytes);
648 // Grow generation to reserved size.
649 virtual bool grow_to_reserved();
650 // Shrink generation with specified size (returns false if unable to shrink)
651 void shrink_by(size_t bytes);
652
653 // Allocation failure
654 virtual bool expand(size_t bytes, size_t expand_bytes);
655 void shrink(size_t bytes);
656
657 // Accessing spaces
658 ContiguousSpace* the_space() const { return _the_space; }
659
660 public:
661 OneContigSpaceCardGeneration(ReservedSpace rs, size_t initial_byte_size,
662 size_t min_heap_delta_bytes,
663 int level, GenRemSet* remset,
664 ContiguousSpace* space) :
665 CardGeneration(rs, initial_byte_size, level, remset),
666 _the_space(space), _min_heap_delta_bytes(min_heap_delta_bytes)
667 {}
668
669 inline bool is_in(const void* p) const;
670
671 // Space enquiries
672 size_t capacity() const;
673 size_t used() const;
674 size_t free() const;
702 void save_marks();
703 void reset_saved_marks();
704 bool no_allocs_since_save_marks();
705
706 inline size_t block_size(const HeapWord* addr) const;
707
708 inline bool block_is_obj(const HeapWord* addr) const;
709
710 virtual void collect(bool full,
711 bool clear_all_soft_refs,
712 size_t size,
713 bool is_tlab);
714 HeapWord* expand_and_allocate(size_t size,
715 bool is_tlab,
716 bool parallel = false);
717
718 virtual void prepare_for_verify();
719
720 virtual void gc_epilogue(bool full);
721
722 virtual void record_spaces_top();
723
724 virtual void verify(bool allow_dirty);
725 virtual void print_on(outputStream* st) const;
726 };
|