< prev index next >
src/hotspot/share/gc/shared/genCollectedHeap.hpp
Print this page
*** 159,172 ****
bool is_old_gen(const Generation* gen) const { return gen == _old_gen; }
// The generational collector policy.
GenCollectorPolicy* gen_policy() const { return _gen_policy; }
! CollectorPolicy* collector_policy() const { return gen_policy(); }
// Adaptive size policy
! AdaptiveSizePolicy* size_policy() {
return gen_policy()->size_policy();
}
// Return the (conservative) maximum heap alignment
static size_t conservative_max_heap_alignment() {
--- 159,172 ----
bool is_old_gen(const Generation* gen) const { return gen == _old_gen; }
// The generational collector policy.
GenCollectorPolicy* gen_policy() const { return _gen_policy; }
! virtual CollectorPolicy* collector_policy() const { return gen_policy(); }
// Adaptive size policy
! virtual AdaptiveSizePolicy* size_policy() {
return gen_policy()->size_policy();
}
// Return the (conservative) maximum heap alignment
static size_t conservative_max_heap_alignment() {
*** 215,225 ****
#ifdef ASSERT
bool is_in_partial_collection(const void* p);
#endif
! bool is_scavengable(oop obj) {
return is_in_young(obj);
}
// Optimized nmethod scanning support routines
virtual void register_nmethod(nmethod* nm);
--- 215,225 ----
#ifdef ASSERT
bool is_in_partial_collection(const void* p);
#endif
! virtual bool is_scavengable(oop obj) {
return is_in_young(obj);
}
// Optimized nmethod scanning support routines
virtual void register_nmethod(nmethod* nm);
*** 243,287 ****
// Returns the address of the start of the "block" that contains the
// address "addr". We say "blocks" instead of "object" since some heaps
// may not pack objects densely; a chunk may either be an object or a
// non-object.
! HeapWord* block_start(const void* addr) const;
// Requires "addr" to be the start of a chunk, and returns its size.
// "addr + size" is required to be the start of a new chunk, or the end
// of the active area of the heap. Assumes (and verifies in non-product
// builds) that addr is in the allocated part of the heap and is
// the start of a chunk.
! size_t block_size(const HeapWord* addr) const;
// Requires "addr" to be the start of a block, and returns "TRUE" iff
// the block is an object. Assumes (and verifies in non-product
// builds) that addr is in the allocated part of the heap and is
// the start of a chunk.
! bool block_is_obj(const HeapWord* addr) const;
// Section on TLAB's.
! bool supports_tlab_allocation() const;
! size_t tlab_capacity(Thread* thr) const;
! size_t tlab_used(Thread* thr) const;
! size_t unsafe_max_tlab_alloc(Thread* thr) const;
! HeapWord* allocate_new_tlab(size_t size);
// Can a compiler initialize a new object without store barriers?
// This permission only extends from the creation of a new object
// via a TLAB up to the first subsequent safepoint.
! bool can_elide_tlab_store_barriers() const {
return true;
}
// We don't need barriers for stores to objects in the
// young gen and, a fortiori, for initializing stores to
// objects therein. This applies to DefNew+Tenured and ParNew+CMS
// only and may need to be re-examined in case other
// kinds of collectors are implemented in the future.
! bool can_elide_initializing_store_barrier(oop new_obj) {
return is_in_young(new_obj);
}
// The "requestor" generation is performing some garbage collection
// action for which it would be useful to have scratch space. The
--- 243,287 ----
// Returns the address of the start of the "block" that contains the
// address "addr". We say "blocks" instead of "object" since some heaps
// may not pack objects densely; a chunk may either be an object or a
// non-object.
! virtual HeapWord* block_start(const void* addr) const;
// Requires "addr" to be the start of a chunk, and returns its size.
// "addr + size" is required to be the start of a new chunk, or the end
// of the active area of the heap. Assumes (and verifies in non-product
// builds) that addr is in the allocated part of the heap and is
// the start of a chunk.
! virtual size_t block_size(const HeapWord* addr) const;
// Requires "addr" to be the start of a block, and returns "TRUE" iff
// the block is an object. Assumes (and verifies in non-product
// builds) that addr is in the allocated part of the heap and is
// the start of a chunk.
! virtual bool block_is_obj(const HeapWord* addr) const;
// Section on TLAB's.
! virtual bool supports_tlab_allocation() const;
! virtual size_t tlab_capacity(Thread* thr) const;
! virtual size_t tlab_used(Thread* thr) const;
! virtual size_t unsafe_max_tlab_alloc(Thread* thr) const;
! virtual HeapWord* allocate_new_tlab(size_t size);
// Can a compiler initialize a new object without store barriers?
// This permission only extends from the creation of a new object
// via a TLAB up to the first subsequent safepoint.
! virtual bool can_elide_tlab_store_barriers() const {
return true;
}
// We don't need barriers for stores to objects in the
// young gen and, a fortiori, for initializing stores to
// objects therein. This applies to DefNew+Tenured and ParNew+CMS
// only and may need to be re-examined in case other
// kinds of collectors are implemented in the future.
! virtual bool can_elide_initializing_store_barrier(oop new_obj) {
return is_in_young(new_obj);
}
// The "requestor" generation is performing some garbage collection
// action for which it would be useful to have scratch space. The
*** 293,307 ****
// Allow each generation to reset any scratch space that it has
// contributed as it needs.
void release_scratch();
// Ensure parsability: override
! void ensure_parsability(bool retire_tlabs);
// Time in ms since the longest time a collector ran in
// in any generation.
! jlong millis_since_last_gc();
// Total number of full collections completed.
unsigned int total_full_collections_completed() {
assert(_full_collections_completed <= _total_full_collections,
"Can't complete more collections than were started");
--- 293,307 ----
// Allow each generation to reset any scratch space that it has
// contributed as it needs.
void release_scratch();
// Ensure parsability: override
! virtual void ensure_parsability(bool retire_tlabs);
// Time in ms since the longest time a collector ran in
// in any generation.
! virtual jlong millis_since_last_gc();
// Total number of full collections completed.
unsigned int total_full_collections_completed() {
assert(_full_collections_completed <= _total_full_collections,
"Can't complete more collections than were started");
*** 331,344 ****
// Override.
void verify(VerifyOption option);
// Override.
! void print_on(outputStream* st) const;
virtual void print_gc_threads_on(outputStream* st) const;
virtual void gc_threads_do(ThreadClosure* tc) const;
! void print_tracing_info() const;
void print_heap_change(size_t young_prev_used, size_t old_prev_used) const;
// The functions below are helper functions that a subclass of
// "CollectedHeap" can use in the implementation of its virtual
--- 331,344 ----
// Override.
void verify(VerifyOption option);
// Override.
! virtual void print_on(outputStream* st) const;
virtual void print_gc_threads_on(outputStream* st) const;
virtual void gc_threads_do(ThreadClosure* tc) const;
! virtual void print_tracing_info() const;
void print_heap_change(size_t young_prev_used, size_t old_prev_used) const;
// The functions below are helper functions that a subclass of
// "CollectedHeap" can use in the implementation of its virtual
*** 354,364 ****
void generation_iterate(GenClosure* cl, bool old_to_young);
// Return "true" if all generations have reached the
// maximal committed limit that they can reach, without a garbage
// collection.
! bool is_maximal_no_gc() const;
// This function returns the CardTableRS object that allows us to scan
// generations in a fully generational heap.
CardTableRS* rem_set() { return _rem_set; }
--- 354,364 ----
void generation_iterate(GenClosure* cl, bool old_to_young);
// Return "true" if all generations have reached the
// maximal committed limit that they can reach, without a garbage
// collection.
! virtual bool is_maximal_no_gc() const;
// This function returns the CardTableRS object that allows us to scan
// generations in a fully generational heap.
CardTableRS* rem_set() { return _rem_set; }
< prev index next >