< prev index next >
src/share/vm/gc/shared/genCollectedHeap.hpp
Print this page
*** 93,129 ****
bool is_tlab,
bool first_only);
// Helper function for two callbacks below.
// Considers collection of the first max_level+1 generations.
! void do_collection(bool full,
! bool clear_all_soft_refs,
! size_t size,
! bool is_tlab,
! int max_level);
// Callback from VM_GenCollectForAllocation operation.
// This function does everything necessary/possible to satisfy an
// allocation request that failed in the youngest generation that should
// have handled it (including collection, expansion, etc.)
HeapWord* satisfy_failed_allocation(size_t size, bool is_tlab);
// Callback from VM_GenCollectFull operation.
// Perform a full collection of the first max_level+1 generations.
virtual void do_full_collection(bool clear_all_soft_refs);
! void do_full_collection(bool clear_all_soft_refs, int max_level);
// Does the "cause" of GC indicate that
// we absolutely __must__ clear soft refs?
bool must_clear_all_soft_refs();
public:
GenCollectedHeap(GenCollectorPolicy *policy);
FlexibleWorkGang* workers() const { return _workers; }
! GCStats* gc_stats(int level) const;
// Returns JNI_OK on success
virtual jint initialize();
// Reserve aligned space for the heap as needed by the contained generations.
--- 93,129 ----
bool is_tlab,
bool first_only);
// Helper function for two callbacks below.
// Considers collection of the first max_level+1 generations.
! void do_collection(bool full,
! bool clear_all_soft_refs,
! size_t size,
! bool is_tlab,
! Generation::Type max_generation);
// Callback from VM_GenCollectForAllocation operation.
// This function does everything necessary/possible to satisfy an
// allocation request that failed in the youngest generation that should
// have handled it (including collection, expansion, etc.)
HeapWord* satisfy_failed_allocation(size_t size, bool is_tlab);
// Callback from VM_GenCollectFull operation.
// Perform a full collection of the first max_level+1 generations.
virtual void do_full_collection(bool clear_all_soft_refs);
! void do_full_collection(bool clear_all_soft_refs, Generation::Type max_generation);
// Does the "cause" of GC indicate that
// we absolutely __must__ clear soft refs?
bool must_clear_all_soft_refs();
public:
GenCollectedHeap(GenCollectorPolicy *policy);
FlexibleWorkGang* workers() const { return _workers; }
! GCStats* gc_stats(Generation* generation) const;
// Returns JNI_OK on success
virtual jint initialize();
// Reserve aligned space for the heap as needed by the contained generations.
*** 158,169 ****
}
size_t capacity() const;
size_t used() const;
! // Save the "used_region" for generations level and lower.
! void save_used_regions(int level);
size_t max_capacity() const;
HeapWord* mem_allocate(size_t size,
bool* gc_overhead_limit_was_exceeded);
--- 158,169 ----
}
size_t capacity() const;
size_t used() const;
! // Save the "used_region" for both generations.
! void save_used_regions();
size_t max_capacity() const;
HeapWord* mem_allocate(size_t size,
bool* gc_overhead_limit_was_exceeded);
*** 180,192 ****
void collect(GCCause::Cause cause);
// The same as above but assume that the caller holds the Heap_lock.
void collect_locked(GCCause::Cause cause);
! // Perform a full collection of the first max_level+1 generations.
// Mostly used for testing purposes. Caller does not hold the Heap_lock on entry.
! void collect(GCCause::Cause cause, int max_level);
// Returns "TRUE" iff "p" points into the committed areas of the heap.
// The methods is_in(), is_in_closed_subset() and is_in_youngest() may
// be expensive to compute in general, so, to prevent
// their inadvertent use in product jvm's, we restrict their use to
--- 180,192 ----
void collect(GCCause::Cause cause);
// The same as above but assume that the caller holds the Heap_lock.
void collect_locked(GCCause::Cause cause);
! // Perform a full collection of generations up to and including max_generation.
// Mostly used for testing purposes. Caller does not hold the Heap_lock on entry.
! void collect(GCCause::Cause cause, Generation::Type max_generation);
// Returns "TRUE" iff "p" points into the committed areas of the heap.
// The methods is_in(), is_in_closed_subset() and is_in_youngest() may
// be expensive to compute in general, so, to prevent
// their inadvertent use in product jvm's, we restrict their use to
*** 312,325 ****
_young_gen->update_time_of_last_gc(now);
_old_gen->update_time_of_last_gc(now);
}
// Update the gc statistics for each generation.
! // "level" is the level of the latest collection.
! void update_gc_stats(int current_level, bool full) {
! _young_gen->update_gc_stats(current_level, full);
! _old_gen->update_gc_stats(current_level, full);
}
bool no_gc_in_progress() { return !is_gc_active(); }
// Override.
--- 312,323 ----
_young_gen->update_time_of_last_gc(now);
_old_gen->update_time_of_last_gc(now);
}
// Update the gc statistics for each generation.
! void update_gc_stats(Generation* current_generation, bool full) {
! _old_gen->update_gc_stats(current_generation, full);
}
bool no_gc_in_progress() { return !is_gc_active(); }
// Override.
*** 363,374 ****
// Convenience function to be used in situations where the heap type can be
// asserted to be this type.
static GenCollectedHeap* heap();
// Invoke the "do_oop" method of one of the closures "not_older_gens"
! // or "older_gens" on root locations for the generation at
! // "level". (The "older_gens" closure is used for scanning references
// from older generations; "not_older_gens" is used everywhere else.)
// If "younger_gens_as_roots" is false, younger generations are
// not scanned as roots; in this case, the caller must be arranging to
// scan the younger generations itself. (For example, a generation might
// explicitly mark reachable objects in younger generations, to avoid
--- 361,372 ----
// Convenience function to be used in situations where the heap type can be
// asserted to be this type.
static GenCollectedHeap* heap();
// Invoke the "do_oop" method of one of the closures "not_older_gens"
! // or "older_gens" on root locations for the generations depending on
! // the type. (The "older_gens" closure is used for scanning references
// from older generations; "not_older_gens" is used everywhere else.)
// If "younger_gens_as_roots" is false, younger generations are
// not scanned as roots; in this case, the caller must be arranging to
// scan the younger generations itself. (For example, a generation might
// explicitly mark reachable objects in younger generations, to avoid
*** 394,404 ****
public:
static const bool StrongAndWeakRoots = false;
static const bool StrongRootsOnly = true;
void gen_process_roots(StrongRootsScope* scope,
! int level,
bool younger_gens_as_roots,
ScanningOption so,
bool only_strong_roots,
OopsInGenClosure* not_older_gens,
OopsInGenClosure* older_gens,
--- 392,402 ----
public:
static const bool StrongAndWeakRoots = false;
static const bool StrongRootsOnly = true;
void gen_process_roots(StrongRootsScope* scope,
! Generation::Type type,
bool younger_gens_as_roots,
ScanningOption so,
bool only_strong_roots,
OopsInGenClosure* not_older_gens,
OopsInGenClosure* older_gens,
*** 418,450 ****
// allocated since the last call to save_marks in generations at or above
// "level". The "cur" closure is
// applied to references in the generation at "level", and the "older"
// closure to older generations.
#define GCH_SINCE_SAVE_MARKS_ITERATE_DECL(OopClosureType, nv_suffix) \
! void oop_since_save_marks_iterate(int level, \
OopClosureType* cur, \
OopClosureType* older);
ALL_SINCE_SAVE_MARKS_CLOSURES(GCH_SINCE_SAVE_MARKS_ITERATE_DECL)
#undef GCH_SINCE_SAVE_MARKS_ITERATE_DECL
! // Returns "true" iff no allocations have occurred in any generation at
! // "level" or above since the last
// call to "save_marks".
! bool no_allocs_since_save_marks(int level);
// Returns true if an incremental collection is likely to fail.
// We optionally consult the young gen, if asked to do so;
// otherwise we base our answer on whether the previous incremental
// collection attempt failed with no corrective action as of yet.
bool incremental_collection_will_fail(bool consult_young) {
! // Assumes a 2-generation system; the first disjunct remembers if an
! // incremental collection failed, even when we thought (second disjunct)
! // that it would not.
! assert(heap()->collector_policy()->is_generation_policy(),
! "the following definition may not be suitable for an n(>2)-generation system");
return incremental_collection_failed() ||
(consult_young && !_young_gen->collection_attempt_is_safe());
}
// If a generation bails out of an incremental collection,
--- 416,444 ----
// allocated since the last call to save_marks in generations at or above
// "level". The "cur" closure is
// applied to references in the generation at "level", and the "older"
// closure to older generations.
#define GCH_SINCE_SAVE_MARKS_ITERATE_DECL(OopClosureType, nv_suffix) \
! void oop_since_save_marks_iterate(Generation::Type start_gen, \
OopClosureType* cur, \
OopClosureType* older);
ALL_SINCE_SAVE_MARKS_CLOSURES(GCH_SINCE_SAVE_MARKS_ITERATE_DECL)
#undef GCH_SINCE_SAVE_MARKS_ITERATE_DECL
! // Returns "true" iff no allocations have occurred since the last
// call to "save_marks".
! bool no_allocs_since_save_marks(bool include_young);
// Returns true if an incremental collection is likely to fail.
// We optionally consult the young gen, if asked to do so;
// otherwise we base our answer on whether the previous incremental
// collection attempt failed with no corrective action as of yet.
bool incremental_collection_will_fail(bool consult_young) {
! // The first disjunct remembers if an incremental collection failed, even
! // when we thought (second disjunct) that it would not.
return incremental_collection_failed() ||
(consult_young && !_young_gen->collection_attempt_is_safe());
}
// If a generation bails out of an incremental collection,
*** 480,493 ****
// For use by mark-sweep. As implemented, mark-sweep-compact is global
// in an essential way: compaction is performed across generations, by
// iterating over spaces.
void prepare_for_compaction();
! // Perform a full collection of the first max_level+1 generations.
// This is the low level interface used by the public versions of
// collect() and collect_locked(). Caller holds the Heap_lock on entry.
! void collect_locked(GCCause::Cause cause, int max_level);
// Returns success or failure.
bool create_cms_collector();
// In support of ExplicitGCInvokesConcurrent functionality
--- 474,487 ----
// For use by mark-sweep. As implemented, mark-sweep-compact is global
// in an essential way: compaction is performed across generations, by
// iterating over spaces.
void prepare_for_compaction();
! // Perform a full collection of the generations up to and including max_generation.
// This is the low level interface used by the public versions of
// collect() and collect_locked(). Caller holds the Heap_lock on entry.
! void collect_locked(GCCause::Cause cause, Generation::Type max_generation);
// Returns success or failure.
bool create_cms_collector();
// In support of ExplicitGCInvokesConcurrent functionality
< prev index next >