src/share/vm/memory/genCollectedHeap.hpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File hotspot Cdiff src/share/vm/memory/genCollectedHeap.hpp

src/share/vm/memory/genCollectedHeap.hpp

Print this page
rev 7211 : [mq]: remove_ngen
rev 7212 : [mq]: remove_get_gen
rev 7213 : imported patch move_genspecs
rev 7214 : imported patch remove_n_gen
rev 7215 : imported patch remove_levels
rev 7216 : imported patch cleanup

*** 31,41 **** #include "memory/sharedHeap.hpp" class SubTasksDone; // A "GenCollectedHeap" is a SharedHeap that uses generational ! // collection. It is represented with a sequence of Generation's. class GenCollectedHeap : public SharedHeap { friend class GenCollectorPolicy; friend class Generation; friend class DefNewGeneration; friend class TenuredGeneration; --- 31,41 ---- #include "memory/sharedHeap.hpp" class SubTasksDone; // A "GenCollectedHeap" is a SharedHeap that uses generational ! // collection. It has two generations, young and old. class GenCollectedHeap : public SharedHeap { friend class GenCollectorPolicy; friend class Generation; friend class DefNewGeneration; friend class TenuredGeneration;
*** 49,72 **** friend class VM_HeapDumper; friend class HeapInspection; friend class GCCauseSetter; friend class VMStructs; public: - enum SomeConstants { - max_gens = 10 - }; - friend class VM_PopulateDumpSharedSpace; protected: // Fields: static GenCollectedHeap* _gch; private: ! int _n_gens; ! Generation* _gens[max_gens]; ! GenerationSpec** _gen_specs; // The generational collector policy. GenCollectorPolicy* _gen_policy; // Indicates that the most recent previous incremental collection failed. --- 49,67 ---- friend class VM_HeapDumper; friend class HeapInspection; friend class GCCauseSetter; friend class VMStructs; public: friend class VM_PopulateDumpSharedSpace; protected: // Fields: static GenCollectedHeap* _gch; private: ! Generation* _young_gen; ! Generation* _old_gen; // The generational collector policy. GenCollectorPolicy* _gen_policy; // Indicates that the most recent previous incremental collection failed.
*** 80,89 **** --- 75,87 ---- // Data structure for claiming the (potentially) parallel tasks in // (gen-specific) roots processing. SubTasksDone* _gen_process_roots_tasks; SubTasksDone* gen_process_roots_tasks() { return _gen_process_roots_tasks; } + void collect_generation(Generation* gen, bool full, size_t size, bool is_tlab, + bool run_verification, bool clear_soft_refs); + // In block contents verification, the number of header words to skip NOT_PRODUCT(static size_t _skip_header_HeapWords;) protected: // Helper functions for allocation
*** 95,128 **** // Considers collection of the first max_level+1 generations. void do_collection(bool full, bool clear_all_soft_refs, size_t size, bool is_tlab, ! int max_level); // Callback from VM_GenCollectForAllocation operation. // This function does everything necessary/possible to satisfy an // allocation request that failed in the youngest generation that should // have handled it (including collection, expansion, etc.) HeapWord* satisfy_failed_allocation(size_t size, bool is_tlab); // Callback from VM_GenCollectFull operation. // Perform a full collection of the first max_level+1 generations. virtual void do_full_collection(bool clear_all_soft_refs); ! void do_full_collection(bool clear_all_soft_refs, int max_level); // Does the "cause" of GC indicate that // we absolutely __must__ clear soft refs? bool must_clear_all_soft_refs(); public: GenCollectedHeap(GenCollectorPolicy *policy); ! GCStats* gc_stats(int level) const; // Returns JNI_OK on success virtual jint initialize(); char* allocate(size_t alignment, size_t* _total_reserved, int* _n_covered_regions, ReservedSpace* heap_rs); // Does operations required after initialization has been done. --- 93,127 ---- // Considers collection of the first max_level+1 generations. void do_collection(bool full, bool clear_all_soft_refs, size_t size, bool is_tlab, ! Generation::Type max_generation); // Callback from VM_GenCollectForAllocation operation. // This function does everything necessary/possible to satisfy an // allocation request that failed in the youngest generation that should // have handled it (including collection, expansion, etc.) HeapWord* satisfy_failed_allocation(size_t size, bool is_tlab); // Callback from VM_GenCollectFull operation. // Perform a full collection of the first max_level+1 generations. virtual void do_full_collection(bool clear_all_soft_refs); ! void do_full_collection(bool clear_all_soft_refs, Generation::Type max_gen); // Does the "cause" of GC indicate that // we absolutely __must__ clear soft refs? bool must_clear_all_soft_refs(); public: GenCollectedHeap(GenCollectorPolicy *policy); ! GCStats* gc_stats(Generation* gen) const; // Returns JNI_OK on success virtual jint initialize(); + char* allocate(size_t alignment, size_t* _total_reserved, int* _n_covered_regions, ReservedSpace* heap_rs); // Does operations required after initialization has been done.
*** 133,144 **** --- 132,147 ---- virtual CollectedHeap::Name kind() const { return CollectedHeap::GenCollectedHeap; } + Generation* young_gen() const { return _young_gen; } + Generation* old_gen() const { return _old_gen; } + // The generational collector policy. GenCollectorPolicy* gen_policy() const { return _gen_policy; } + virtual CollectorPolicy* collector_policy() const { return (CollectorPolicy*) gen_policy(); } // Adaptive size policy virtual AdaptiveSizePolicy* size_policy() { return gen_policy()->size_policy();
*** 150,166 **** } size_t capacity() const; size_t used() const; ! // Save the "used_region" for generations level and lower. ! void save_used_regions(int level); size_t max_capacity() const; ! HeapWord* mem_allocate(size_t size, ! bool* gc_overhead_limit_was_exceeded); // We may support a shared contiguous allocation area, if the youngest // generation does. bool supports_inline_contig_alloc() const; HeapWord** top_addr() const; --- 153,168 ---- } size_t capacity() const; size_t used() const; ! // Save the "used_region" for both generations. ! void save_used_regions(); size_t max_capacity() const; ! HeapWord* mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded); // We may support a shared contiguous allocation area, if the youngest // generation does. bool supports_inline_contig_alloc() const; HeapWord** top_addr() const;
*** 175,187 **** void collect(GCCause::Cause cause); // The same as above but assume that the caller holds the Heap_lock. void collect_locked(GCCause::Cause cause); ! // Perform a full collection of the first max_level+1 generations. // Mostly used for testing purposes. Caller does not hold the Heap_lock on entry. ! void collect(GCCause::Cause cause, int max_level); // Returns "TRUE" iff "p" points into the committed areas of the heap. // The methods is_in(), is_in_closed_subset() and is_in_youngest() may // be expensive to compute in general, so, to prevent // their inadvertent use in product jvm's, we restrict their use to --- 177,189 ---- void collect(GCCause::Cause cause); // The same as above but assume that the caller holds the Heap_lock. void collect_locked(GCCause::Cause cause); ! // Perform a full collection of generations up to and including max_gen. // Mostly used for testing purposes. Caller does not hold the Heap_lock on entry. ! void collect(GCCause::Cause cause, Generation::Type max_gen); // Returns "TRUE" iff "p" points into the committed areas of the heap. // The methods is_in(), is_in_closed_subset() and is_in_youngest() may // be expensive to compute in general, so, to prevent // their inadvertent use in product jvm's, we restrict their use to
*** 304,327 **** // Update above counter, as appropriate, at the end of a stop-world GC cycle unsigned int update_full_collections_completed(); // Update above counter, as appropriate, at the end of a concurrent GC cycle unsigned int update_full_collections_completed(unsigned int count); ! // Update "time of last gc" for all constituent generations ! // to "now". void update_time_of_last_gc(jlong now) { ! for (int i = 0; i < _n_gens; i++) { ! _gens[i]->update_time_of_last_gc(now); ! } } // Update the gc statistics for each generation. ! // "level" is the level of the latest collection. ! void update_gc_stats(int current_level, bool full) { ! for (int i = 0; i < _n_gens; i++) { ! _gens[i]->update_gc_stats(current_level, full); ! } } // Override. bool no_gc_in_progress() { return !is_gc_active(); } --- 306,324 ---- // Update above counter, as appropriate, at the end of a stop-world GC cycle unsigned int update_full_collections_completed(); // Update above counter, as appropriate, at the end of a concurrent GC cycle unsigned int update_full_collections_completed(unsigned int count); ! // Update "time of last gc" for all generations to "now". void update_time_of_last_gc(jlong now) { ! _young_gen->update_time_of_last_gc(now); ! _old_gen->update_time_of_last_gc(now); } // Update the gc statistics for each generation. ! void update_gc_stats(Generation* current_generation, bool full) { ! _old_gen->update_gc_stats(current_generation, full); } // Override. bool no_gc_in_progress() { return !is_gc_active(); }
*** 359,412 **** // Return "true" if all generations have reached the // maximal committed limit that they can reach, without a garbage // collection. virtual bool is_maximal_no_gc() const; - // Return the generation before "gen". - Generation* prev_gen(Generation* gen) const { - int l = gen->level(); - guarantee(l > 0, "Out of bounds"); - return _gens[l-1]; - } - - // Return the generation after "gen". - Generation* next_gen(Generation* gen) const { - int l = gen->level() + 1; - guarantee(l < _n_gens, "Out of bounds"); - return _gens[l]; - } - - Generation* get_gen(int i) const { - guarantee(i >= 0 && i < _n_gens, "Out of bounds"); - return _gens[i]; - } - - int n_gens() const { - assert(_n_gens == gen_policy()->number_of_generations(), "Sanity"); - return _n_gens; - } - // Convenience function to be used in situations where the heap type can be // asserted to be this type. static GenCollectedHeap* heap(); void set_par_threads(uint t); // Invoke the "do_oop" method of one of the closures "not_older_gens" ! // or "older_gens" on root locations for the generation at ! // "level". (The "older_gens" closure is used for scanning references // from older generations; "not_older_gens" is used everywhere else.) // If "younger_gens_as_roots" is false, younger generations are // not scanned as roots; in this case, the caller must be arranging to // scan the younger generations itself. (For example, a generation might // explicitly mark reachable objects in younger generations, to avoid // excess storage retention.) // The "so" argument determines which of the roots // the closure is applied to: // "SO_None" does none; private: ! void gen_process_roots(int level, bool younger_gens_as_roots, bool activate_scope, SharedHeap::ScanningOption so, OopsInGenClosure* not_older_gens, OopsInGenClosure* weak_roots, --- 356,385 ---- // Return "true" if all generations have reached the // maximal committed limit that they can reach, without a garbage // collection. virtual bool is_maximal_no_gc() const; // Convenience function to be used in situations where the heap type can be // asserted to be this type. static GenCollectedHeap* heap(); void set_par_threads(uint t); // Invoke the "do_oop" method of one of the closures "not_older_gens" ! // or "older_gens" on root locations for the generations depending on ! // the type. (The "older_gens" closure is used for scanning references // from older generations; "not_older_gens" is used everywhere else.) // If "younger_gens_as_roots" is false, younger generations are // not scanned as roots; in this case, the caller must be arranging to // scan the younger generations itself. (For example, a generation might // explicitly mark reachable objects in younger generations, to avoid // excess storage retention.) // The "so" argument determines which of the roots // the closure is applied to: // "SO_None" does none; private: ! void gen_process_roots(Generation::Type type, bool younger_gens_as_roots, bool activate_scope, SharedHeap::ScanningOption so, OopsInGenClosure* not_older_gens, OopsInGenClosure* weak_roots,
*** 417,427 **** public: static const bool StrongAndWeakRoots = false; static const bool StrongRootsOnly = true; ! void gen_process_roots(int level, bool younger_gens_as_roots, bool activate_scope, SharedHeap::ScanningOption so, bool only_strong_roots, OopsInGenClosure* not_older_gens, --- 390,400 ---- public: static const bool StrongAndWeakRoots = false; static const bool StrongRootsOnly = true; ! void gen_process_roots(Generation::Type type, bool younger_gens_as_roots, bool activate_scope, SharedHeap::ScanningOption so, bool only_strong_roots, OopsInGenClosure* not_older_gens,
*** 442,476 **** // allocated since the last call to save_marks in generations at or above // "level". The "cur" closure is // applied to references in the generation at "level", and the "older" // closure to older generations. #define GCH_SINCE_SAVE_MARKS_ITERATE_DECL(OopClosureType, nv_suffix) \ ! void oop_since_save_marks_iterate(int level, \ OopClosureType* cur, \ OopClosureType* older); ALL_SINCE_SAVE_MARKS_CLOSURES(GCH_SINCE_SAVE_MARKS_ITERATE_DECL) #undef GCH_SINCE_SAVE_MARKS_ITERATE_DECL ! // Returns "true" iff no allocations have occurred in any generation at ! // "level" or above since the last // call to "save_marks". ! bool no_allocs_since_save_marks(int level); // Returns true if an incremental collection is likely to fail. // We optionally consult the young gen, if asked to do so; // otherwise we base our answer on whether the previous incremental // collection attempt failed with no corrective action as of yet. bool incremental_collection_will_fail(bool consult_young) { ! // Assumes a 2-generation system; the first disjunct remembers if an ! // incremental collection failed, even when we thought (second disjunct) ! // that it would not. ! assert(heap()->collector_policy()->is_generation_policy(), ! "the following definition may not be suitable for an n(>2)-generation system"); return incremental_collection_failed() || ! (consult_young && !get_gen(0)->collection_attempt_is_safe()); } // If a generation bails out of an incremental collection, // it sets this flag. bool incremental_collection_failed() const { --- 415,445 ---- // allocated since the last call to save_marks in generations at or above // "level". The "cur" closure is // applied to references in the generation at "level", and the "older" // closure to older generations. #define GCH_SINCE_SAVE_MARKS_ITERATE_DECL(OopClosureType, nv_suffix) \ ! void oop_since_save_marks_iterate(Generation::Type start_gen, \ OopClosureType* cur, \ OopClosureType* older); ALL_SINCE_SAVE_MARKS_CLOSURES(GCH_SINCE_SAVE_MARKS_ITERATE_DECL) #undef GCH_SINCE_SAVE_MARKS_ITERATE_DECL ! // Returns "true" iff no allocations have occurred since the last // call to "save_marks". ! bool no_allocs_since_save_marks(bool include_young); // Returns true if an incremental collection is likely to fail. // We optionally consult the young gen, if asked to do so; // otherwise we base our answer on whether the previous incremental // collection attempt failed with no corrective action as of yet. bool incremental_collection_will_fail(bool consult_young) { ! // The first disjunct remembers if an incremental collection failed, even ! // when we thought (second disjunct) that it would not. return incremental_collection_failed() || ! (consult_young && !_young_gen->collection_attempt_is_safe()); } // If a generation bails out of an incremental collection, // it sets this flag. bool incremental_collection_failed() const {
*** 504,517 **** // For use by mark-sweep. As implemented, mark-sweep-compact is global // in an essential way: compaction is performed across generations, by // iterating over spaces. void prepare_for_compaction(); ! // Perform a full collection of the first max_level+1 generations. // This is the low level interface used by the public versions of // collect() and collect_locked(). Caller holds the Heap_lock on entry. ! void collect_locked(GCCause::Cause cause, int max_level); // Returns success or failure. bool create_cms_collector(); // In support of ExplicitGCInvokesConcurrent functionality --- 473,486 ---- // For use by mark-sweep. As implemented, mark-sweep-compact is global // in an essential way: compaction is performed across generations, by // iterating over spaces. void prepare_for_compaction(); ! // Perform a full collection of the generations up to and including max_gen. // This is the low level interface used by the public versions of // collect() and collect_locked(). Caller holds the Heap_lock on entry. ! void collect_locked(GCCause::Cause cause, Generation::Type max_gen); // Returns success or failure. bool create_cms_collector(); // In support of ExplicitGCInvokesConcurrent functionality
src/share/vm/memory/genCollectedHeap.hpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File