src/share/vm/memory/genCollectedHeap.hpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File hotspot Cdiff src/share/vm/memory/genCollectedHeap.hpp

src/share/vm/memory/genCollectedHeap.hpp

Print this page
rev 7211 : [mq]: remove_ngen
rev 7212 : [mq]: remove_get_gen
rev 7213 : imported patch move_genspecs
rev 7214 : imported patch remove_n_gen
rev 7215 : imported patch remove_levels

*** 93,123 **** // Considers collection of the first max_level+1 generations. void do_collection(bool full, bool clear_all_soft_refs, size_t size, bool is_tlab, ! int max_level); // Callback from VM_GenCollectForAllocation operation. // This function does everything necessary/possible to satisfy an // allocation request that failed in the youngest generation that should // have handled it (including collection, expansion, etc.) HeapWord* satisfy_failed_allocation(size_t size, bool is_tlab); // Callback from VM_GenCollectFull operation. // Perform a full collection of the first max_level+1 generations. virtual void do_full_collection(bool clear_all_soft_refs); ! void do_full_collection(bool clear_all_soft_refs, int max_level); // Does the "cause" of GC indicate that // we absolutely __must__ clear soft refs? bool must_clear_all_soft_refs(); public: GenCollectedHeap(GenCollectorPolicy *policy); ! GCStats* gc_stats(int level) const; // Returns JNI_OK on success virtual jint initialize(); char* allocate(size_t alignment, --- 93,123 ---- // Considers collection of the first max_level+1 generations. void do_collection(bool full, bool clear_all_soft_refs, size_t size, bool is_tlab, ! Generation::Type max_generation); // Callback from VM_GenCollectForAllocation operation. // This function does everything necessary/possible to satisfy an // allocation request that failed in the youngest generation that should // have handled it (including collection, expansion, etc.) HeapWord* satisfy_failed_allocation(size_t size, bool is_tlab); // Callback from VM_GenCollectFull operation. // Perform a full collection of the first max_level+1 generations. virtual void do_full_collection(bool clear_all_soft_refs); ! void do_full_collection(bool clear_all_soft_refs, Generation::Type max_gen); // Does the "cause" of GC indicate that // we absolutely __must__ clear soft refs? bool must_clear_all_soft_refs(); public: GenCollectedHeap(GenCollectorPolicy *policy); ! GCStats* gc_stats(Generation* gen) const; // Returns JNI_OK on success virtual jint initialize(); char* allocate(size_t alignment,
*** 153,164 **** } size_t capacity() const; size_t used() const; ! // Save the "used_region" for generations level and lower. ! void save_used_regions(int level); size_t max_capacity() const; HeapWord* mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded); --- 153,164 ---- } size_t capacity() const; size_t used() const; ! // Save the "used_region" for both generations. ! void save_used_regions(); size_t max_capacity() const; HeapWord* mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded);
*** 178,190 **** void collect(GCCause::Cause cause); // The same as above but assume that the caller holds the Heap_lock. void collect_locked(GCCause::Cause cause); ! // Perform a full collection of the first max_level+1 generations. // Mostly used for testing purposes. Caller does not hold the Heap_lock on entry. ! void collect(GCCause::Cause cause, int max_level); // Returns "TRUE" iff "p" points into the committed areas of the heap. // The methods is_in(), is_in_closed_subset() and is_in_youngest() may // be expensive to compute in general, so, to prevent // their inadvertent use in product jvm's, we restrict their use to --- 178,190 ---- void collect(GCCause::Cause cause); // The same as above but assume that the caller holds the Heap_lock. void collect_locked(GCCause::Cause cause); ! // Perform a full collection of generations up to and including max_gen. // Mostly used for testing purposes. Caller does not hold the Heap_lock on entry. ! void collect(GCCause::Cause cause, Generation::Type max_gen); // Returns "TRUE" iff "p" points into the committed areas of the heap. // The methods is_in(), is_in_closed_subset() and is_in_youngest() may // be expensive to compute in general, so, to prevent // their inadvertent use in product jvm's, we restrict their use to
*** 314,327 **** _young_gen->update_time_of_last_gc(now); _old_gen->update_time_of_last_gc(now); } // Update the gc statistics for each generation. ! // "level" is the level of the latest collection. ! void update_gc_stats(int current_level, bool full) { ! _young_gen->update_gc_stats(current_level, full); ! _old_gen->update_gc_stats(current_level, full); } // Override. bool no_gc_in_progress() { return !is_gc_active(); } --- 314,325 ---- _young_gen->update_time_of_last_gc(now); _old_gen->update_time_of_last_gc(now); } // Update the gc statistics for each generation. ! void update_gc_stats(Generation* current_generation, bool full) { ! _old_gen->update_gc_stats(current_generation, full); } // Override. bool no_gc_in_progress() { return !is_gc_active(); }
*** 366,388 **** static GenCollectedHeap* heap(); void set_par_threads(uint t); // Invoke the "do_oop" method of one of the closures "not_older_gens" ! // or "older_gens" on root locations for the generation at ! // "level". (The "older_gens" closure is used for scanning references // from older generations; "not_older_gens" is used everywhere else.) // If "younger_gens_as_roots" is false, younger generations are // not scanned as roots; in this case, the caller must be arranging to // scan the younger generations itself. (For example, a generation might // explicitly mark reachable objects in younger generations, to avoid // excess storage retention.) // The "so" argument determines which of the roots // the closure is applied to: // "SO_None" does none; private: ! void gen_process_roots(int level, bool younger_gens_as_roots, bool activate_scope, SharedHeap::ScanningOption so, OopsInGenClosure* not_older_gens, OopsInGenClosure* weak_roots, --- 364,386 ---- static GenCollectedHeap* heap(); void set_par_threads(uint t); // Invoke the "do_oop" method of one of the closures "not_older_gens" ! // or "older_gens" on root locations for the generations depending on ! // the type. (The "older_gens" closure is used for scanning references // from older generations; "not_older_gens" is used everywhere else.) // If "younger_gens_as_roots" is false, younger generations are // not scanned as roots; in this case, the caller must be arranging to // scan the younger generations itself. (For example, a generation might // explicitly mark reachable objects in younger generations, to avoid // excess storage retention.) // The "so" argument determines which of the roots // the closure is applied to: // "SO_None" does none; private: ! void gen_process_roots(Generation::Type type, bool younger_gens_as_roots, bool activate_scope, SharedHeap::ScanningOption so, OopsInGenClosure* not_older_gens, OopsInGenClosure* weak_roots,
*** 393,403 **** public: static const bool StrongAndWeakRoots = false; static const bool StrongRootsOnly = true; ! void gen_process_roots(int level, bool younger_gens_as_roots, bool activate_scope, SharedHeap::ScanningOption so, bool only_strong_roots, OopsInGenClosure* not_older_gens, --- 391,401 ---- public: static const bool StrongAndWeakRoots = false; static const bool StrongRootsOnly = true; ! void gen_process_roots(Generation::Type type, bool younger_gens_as_roots, bool activate_scope, SharedHeap::ScanningOption so, bool only_strong_roots, OopsInGenClosure* not_older_gens,
*** 418,450 **** // allocated since the last call to save_marks in generations at or above // "level". The "cur" closure is // applied to references in the generation at "level", and the "older" // closure to older generations. #define GCH_SINCE_SAVE_MARKS_ITERATE_DECL(OopClosureType, nv_suffix) \ ! void oop_since_save_marks_iterate(int level, \ OopClosureType* cur, \ OopClosureType* older); ALL_SINCE_SAVE_MARKS_CLOSURES(GCH_SINCE_SAVE_MARKS_ITERATE_DECL) #undef GCH_SINCE_SAVE_MARKS_ITERATE_DECL ! // Returns "true" iff no allocations have occurred in any generation at ! // "level" or above since the last // call to "save_marks". ! bool no_allocs_since_save_marks(int level); // Returns true if an incremental collection is likely to fail. // We optionally consult the young gen, if asked to do so; // otherwise we base our answer on whether the previous incremental // collection attempt failed with no corrective action as of yet. bool incremental_collection_will_fail(bool consult_young) { ! // Assumes a 2-generation system; the first disjunct remembers if an ! // incremental collection failed, even when we thought (second disjunct) ! // that it would not. ! assert(heap()->collector_policy()->is_generation_policy(), ! "the following definition may not be suitable for an n(>2)-generation system"); return incremental_collection_failed() || (consult_young && !_young_gen->collection_attempt_is_safe()); } // If a generation bails out of an incremental collection, --- 416,444 ---- // allocated since the last call to save_marks in generations at or above // "level". The "cur" closure is // applied to references in the generation at "level", and the "older" // closure to older generations. #define GCH_SINCE_SAVE_MARKS_ITERATE_DECL(OopClosureType, nv_suffix) \ ! void oop_since_save_marks_iterate(Generation::Type start_gen, \ OopClosureType* cur, \ OopClosureType* older); ALL_SINCE_SAVE_MARKS_CLOSURES(GCH_SINCE_SAVE_MARKS_ITERATE_DECL) #undef GCH_SINCE_SAVE_MARKS_ITERATE_DECL ! // Returns "true" iff no allocations have occurred since the last // call to "save_marks". ! bool no_allocs_since_save_marks(bool include_young); // Returns true if an incremental collection is likely to fail. // We optionally consult the young gen, if asked to do so; // otherwise we base our answer on whether the previous incremental // collection attempt failed with no corrective action as of yet. bool incremental_collection_will_fail(bool consult_young) { ! // The first disjunct remembers if an incremental collection failed, even ! // when we thought (second disjunct) that it would not. return incremental_collection_failed() || (consult_young && !_young_gen->collection_attempt_is_safe()); } // If a generation bails out of an incremental collection,
*** 480,493 **** // For use by mark-sweep. As implemented, mark-sweep-compact is global // in an essential way: compaction is performed across generations, by // iterating over spaces. void prepare_for_compaction(); ! // Perform a full collection of the first max_level+1 generations. // This is the low level interface used by the public versions of // collect() and collect_locked(). Caller holds the Heap_lock on entry. ! void collect_locked(GCCause::Cause cause, int max_level); // Returns success or failure. bool create_cms_collector(); // In support of ExplicitGCInvokesConcurrent functionality --- 474,487 ---- // For use by mark-sweep. As implemented, mark-sweep-compact is global // in an essential way: compaction is performed across generations, by // iterating over spaces. void prepare_for_compaction(); ! // Perform a full collection of the generations up to and including max_gen. // This is the low level interface used by the public versions of // collect() and collect_locked(). Caller holds the Heap_lock on entry. ! void collect_locked(GCCause::Cause cause, Generation::Type max_gen); // Returns success or failure. bool create_cms_collector(); // In support of ExplicitGCInvokesConcurrent functionality
src/share/vm/memory/genCollectedHeap.hpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File