< prev index next >
src/share/vm/gc/shared/genCollectedHeap.hpp
Print this page
rev 13243 : [mq]: 8179387.patch
*** 76,100 ****
bool _incremental_collection_failed;
// In support of ExplicitGCInvokesConcurrent functionality
unsigned int _full_collections_completed;
- // Data structure for claiming the (potentially) parallel tasks in
- // (gen-specific) roots processing.
- SubTasksDone* _process_strong_tasks;
-
// Collects the given generation.
void collect_generation(Generation* gen, bool full, size_t size, bool is_tlab,
bool run_verification, bool clear_soft_refs,
bool restore_marks_for_biased_locking);
! // In block contents verification, the number of header words to skip
! NOT_PRODUCT(static size_t _skip_header_HeapWords;)
! WorkGang* _workers;
- protected:
// Helper functions for allocation
HeapWord* attempt_allocation(size_t size,
bool is_tlab,
bool first_only);
--- 76,113 ----
bool _incremental_collection_failed;
// In support of ExplicitGCInvokesConcurrent functionality
unsigned int _full_collections_completed;
// Collects the given generation.
void collect_generation(Generation* gen, bool full, size_t size, bool is_tlab,
bool run_verification, bool clear_soft_refs,
bool restore_marks_for_biased_locking);
! protected:
! // The set of potentially parallel tasks in root scanning.
! enum GCH_strong_roots_tasks {
! GCH_PS_Universe_oops_do,
! GCH_PS_JNIHandles_oops_do,
! GCH_PS_ObjectSynchronizer_oops_do,
! GCH_PS_FlatProfiler_oops_do,
! GCH_PS_Management_oops_do,
! GCH_PS_SystemDictionary_oops_do,
! GCH_PS_ClassLoaderDataGraph_oops_do,
! GCH_PS_jvmti_oops_do,
! GCH_PS_CodeCache_oops_do,
! GCH_PS_aot_oops_do,
! GCH_PS_younger_gens,
! // Leave this one last.
! GCH_PS_NumElements
! };
!
! // Data structure for claiming the (potentially) parallel tasks in
! // (gen-specific) roots processing.
! SubTasksDone* _process_strong_tasks;
// Helper functions for allocation
HeapWord* attempt_allocation(size_t size,
bool is_tlab,
bool first_only);
*** 122,156 ****
bool must_clear_all_soft_refs();
public:
GenCollectedHeap(GenCollectorPolicy *policy);
- WorkGang* workers() const { return _workers; }
-
// Returns JNI_OK on success
virtual jint initialize();
// Reserve aligned space for the heap as needed by the contained generations.
char* allocate(size_t alignment, ReservedSpace* heap_rs);
// Does operations required after initialization has been done.
void post_initialize();
// Initialize ("weak") refs processing support
virtual void ref_processing_init();
virtual Name kind() const {
return CollectedHeap::GenCollectedHeap;
}
virtual const char* name() const {
- if (UseConcMarkSweepGC) {
- return "Concurrent Mark Sweep";
- } else {
return "Serial";
}
- }
Generation* young_gen() const { return _young_gen; }
Generation* old_gen() const { return _old_gen; }
bool is_young_gen(const Generation* gen) const { return gen == _young_gen; }
--- 135,165 ----
bool must_clear_all_soft_refs();
public:
GenCollectedHeap(GenCollectorPolicy *policy);
// Returns JNI_OK on success
virtual jint initialize();
// Reserve aligned space for the heap as needed by the contained generations.
char* allocate(size_t alignment, ReservedSpace* heap_rs);
// Does operations required after initialization has been done.
void post_initialize();
+ virtual void check_gen_kinds();
+
// Initialize ("weak") refs processing support
virtual void ref_processing_init();
virtual Name kind() const {
return CollectedHeap::GenCollectedHeap;
}
virtual const char* name() const {
return "Serial";
}
Generation* young_gen() const { return _young_gen; }
Generation* old_gen() const { return _old_gen; }
bool is_young_gen(const Generation* gen) const { return gen == _young_gen; }
*** 188,198 ****
HeapWord** end_addr() const;
// Perform a full collection of the heap; intended for use in implementing
// "System.gc". This implies as full a collection as the CollectedHeap
// supports. Caller does not hold the Heap_lock on entry.
! void collect(GCCause::Cause cause);
// The same as above but assume that the caller holds the Heap_lock.
void collect_locked(GCCause::Cause cause);
// Perform a full collection of generations up to and including max_generation.
--- 197,207 ----
HeapWord** end_addr() const;
// Perform a full collection of the heap; intended for use in implementing
// "System.gc". This implies as full a collection as the CollectedHeap
// supports. Caller does not hold the Heap_lock on entry.
! virtual void collect(GCCause::Cause cause);
// The same as above but assume that the caller holds the Heap_lock.
void collect_locked(GCCause::Cause cause);
// Perform a full collection of generations up to and including max_generation.
*** 205,221 ****
// their inadvertent use in product jvm's, we restrict their use to
// assertion checking or verification only.
bool is_in(const void* p) const;
// override
! bool is_in_closed_subset(const void* p) const {
! if (UseConcMarkSweepGC) {
! return is_in_reserved(p);
! } else {
return is_in(p);
}
- }
// Returns true if the reference is to an object in the reserved space
// for the young generation.
// Assumes the the young gen address range is less than that of the old gen.
bool is_in_young(oop p);
--- 214,226 ----
// their inadvertent use in product jvm's, we restrict their use to
// assertion checking or verification only.
bool is_in(const void* p) const;
// override
! virtual bool is_in_closed_subset(const void* p) const {
return is_in(p);
}
// Returns true if the reference is to an object in the reserved space
// for the young generation.
// Assumes the the young gen address range is less than that of the old gen.
bool is_in_young(oop p);
*** 276,286 ****
virtual bool can_elide_tlab_store_barriers() const {
return true;
}
virtual bool card_mark_must_follow_store() const {
! return UseConcMarkSweepGC;
}
// We don't need barriers for stores to objects in the
// young gen and, a fortiori, for initializing stores to
// objects therein. This applies to DefNew+Tenured and ParNew+CMS
--- 281,291 ----
virtual bool can_elide_tlab_store_barriers() const {
return true;
}
virtual bool card_mark_must_follow_store() const {
! return false;
}
// We don't need barriers for stores to objects in the
// young gen and, a fortiori, for initializing stores to
// objects therein. This applies to DefNew+Tenured and ParNew+CMS
*** 342,352 ****
// Override.
virtual void print_on(outputStream* st) const;
virtual void print_gc_threads_on(outputStream* st) const;
virtual void gc_threads_do(ThreadClosure* tc) const;
virtual void print_tracing_info() const;
- virtual void print_on_error(outputStream* st) const;
void print_heap_change(size_t young_prev_used, size_t old_prev_used) const;
// The functions below are helper functions that a subclass of
// "CollectedHeap" can use in the implementation of its virtual
--- 347,356 ----
*** 381,391 ****
SO_None = 0x0,
SO_AllCodeCache = 0x8,
SO_ScavengeCodeCache = 0x10
};
! private:
void process_roots(StrongRootsScope* scope,
ScanningOption so,
OopClosure* strong_roots,
OopClosure* weak_roots,
CLDClosure* strong_cld_closure,
--- 385,395 ----
SO_None = 0x0,
SO_AllCodeCache = 0x8,
SO_ScavengeCodeCache = 0x10
};
! protected:
void process_roots(StrongRootsScope* scope,
ScanningOption so,
OopClosure* strong_roots,
OopClosure* weak_roots,
CLDClosure* strong_cld_closure,
*** 399,420 ****
void young_process_roots(StrongRootsScope* scope,
OopsInGenClosure* root_closure,
OopsInGenClosure* old_gen_closure,
CLDClosure* cld_closure);
- // If "young_gen_as_roots" is false, younger generations are
- // not scanned as roots; in this case, the caller must be arranging to
- // scan the younger generations itself. (For example, a generation might
- // explicitly mark reachable objects in younger generations, to avoid
- // excess storage retention.)
- void cms_process_roots(StrongRootsScope* scope,
- bool young_gen_as_roots,
- ScanningOption so,
- bool only_strong_roots,
- OopsInGenClosure* root_closure,
- CLDClosure* cld_closure);
-
void full_process_roots(StrongRootsScope* scope,
bool is_adjust_phase,
ScanningOption so,
bool only_strong_roots,
OopsInGenClosure* root_closure,
--- 403,412 ----
*** 477,492 ****
// generation; return the new location of obj if successful. Otherwise, return NULL.
oop handle_failed_promotion(Generation* old_gen,
oop obj,
size_t obj_size);
! private:
// Accessor for memory state verification support
NOT_PRODUCT(
! static size_t skip_header_HeapWords() { return _skip_header_HeapWords; }
)
// Override
void check_for_non_bad_heap_word_value(HeapWord* addr,
size_t size) PRODUCT_RETURN;
// For use by mark-sweep. As implemented, mark-sweep-compact is global
--- 469,486 ----
// generation; return the new location of obj if successful. Otherwise, return NULL.
oop handle_failed_promotion(Generation* old_gen,
oop obj,
size_t obj_size);
!
! protected:
// Accessor for memory state verification support
NOT_PRODUCT(
! virtual size_t skip_header_HeapWords() { return 0; }
)
+ private:
// Override
void check_for_non_bad_heap_word_value(HeapWord* addr,
size_t size) PRODUCT_RETURN;
// For use by mark-sweep. As implemented, mark-sweep-compact is global
*** 497,520 ****
// Perform a full collection of the generations up to and including max_generation.
// This is the low level interface used by the public versions of
// collect() and collect_locked(). Caller holds the Heap_lock on entry.
void collect_locked(GCCause::Cause cause, GenerationType max_generation);
- // Returns success or failure.
- bool create_cms_collector();
-
- // In support of ExplicitGCInvokesConcurrent functionality
- bool should_do_concurrent_full_gc(GCCause::Cause cause);
- void collect_mostly_concurrent(GCCause::Cause cause);
-
// Save the tops of the spaces in all generations
void record_gen_tops_before_GC() PRODUCT_RETURN;
protected:
! void gc_prologue(bool full);
! void gc_epilogue(bool full);
- public:
- void stop();
};
#endif // SHARE_VM_GC_SHARED_GENCOLLECTEDHEAP_HPP
--- 491,505 ----
// Perform a full collection of the generations up to and including max_generation.
// This is the low level interface used by the public versions of
// collect() and collect_locked(). Caller holds the Heap_lock on entry.
void collect_locked(GCCause::Cause cause, GenerationType max_generation);
// Save the tops of the spaces in all generations
void record_gen_tops_before_GC() PRODUCT_RETURN;
protected:
! virtual void gc_prologue(bool full);
! virtual void gc_epilogue(bool full);
};
#endif // SHARE_VM_GC_SHARED_GENCOLLECTEDHEAP_HPP
< prev index next >