src/share/vm/gc_implementation/parNew/parNewGeneration.hpp

Print this page
rev 4773 : 8005849: JEP 167: Event-Based JVM Tracing
Reviewed-by: acorn, coleenp, sla
Contributed-by: Karen Kinnear <karen.kinnear@oracle.com>, Bengt Rutisson <bengt.rutisson@oracle.com>, Calvin Cheung <calvin.cheung@oracle.com>, Erik Gahlin <erik.gahlin@oracle.com>, Erik Helin <erik.helin@oracle.com>, Jesper Wilhelmsson <jesper.wilhelmsson@oracle.com>, Keith McGuigan <keith.mcguigan@oracle.com>, Mattias Tobiasson <mattias.tobiasson@oracle.com>, Markus Gronlund <markus.gronlund@oracle.com>, Mikael Auno <mikael.auno@oracle.com>, Nils Eliasson <nils.eliasson@oracle.com>, Nils Loodin <nils.loodin@oracle.com>, Rickard Backman <rickard.backman@oracle.com>, Staffan Larsen <staffan.larsen@oracle.com>, Stefan Karlsson <stefan.karlsson@oracle.com>, Yekaterina Kantserova <yekaterina.kantserova@oracle.com>

*** 23,33 **** --- 23,35 ---- */ #ifndef SHARE_VM_GC_IMPLEMENTATION_PARNEW_PARNEWGENERATION_HPP #define SHARE_VM_GC_IMPLEMENTATION_PARNEW_PARNEWGENERATION_HPP + #include "gc_implementation/shared/gcTrace.hpp" #include "gc_implementation/shared/parGCAllocBuffer.hpp" + #include "gc_implementation/shared/copyFailedInfo.hpp" #include "memory/defNewGeneration.hpp" #include "utilities/taskqueue.hpp" class ChunkArray; class ParScanWithoutBarrierClosure;
*** 103,113 **** size_t _overflow_refills; size_t _overflow_refill_objs; #endif // TASKQUEUE_STATS // Stats for promotion failure ! size_t _promotion_failure_size; // Timing numbers. double _start; double _start_strong_roots; double _strong_roots_time; --- 105,115 ---- size_t _overflow_refills; size_t _overflow_refill_objs; #endif // TASKQUEUE_STATS // Stats for promotion failure ! PromotionFailedInfo _promotion_failed_info; // Timing numbers. double _start; double _start_strong_roots; double _strong_roots_time;
*** 178,194 **** // Undo the most recent allocation ("obj", of "word_sz"). void undo_alloc_in_to_space(HeapWord* obj, size_t word_sz); // Promotion failure stats ! size_t promotion_failure_size() { return promotion_failure_size(); } ! void log_promotion_failure(size_t sz) { ! if (_promotion_failure_size == 0) { ! _promotion_failure_size = sz; } } ! void print_and_clear_promotion_failure_size(); #if TASKQUEUE_STATS TaskQueueStats & taskqueue_stats() const { return _work_queue->stats; } size_t term_attempts() const { return _term_attempts; } --- 180,199 ---- // Undo the most recent allocation ("obj", of "word_sz"). void undo_alloc_in_to_space(HeapWord* obj, size_t word_sz); // Promotion failure stats ! void register_promotion_failure(size_t sz) { ! _promotion_failed_info.register_copy_failure(sz); } + PromotionFailedInfo& promotion_failed_info() { + return _promotion_failed_info; } ! bool promotion_failed() { ! return _promotion_failed_info.has_failed(); ! } ! void print_promotion_failure_size(); #if TASKQUEUE_STATS TaskQueueStats & taskqueue_stats() const { return _work_queue->stats; } size_t term_attempts() const { return _term_attempts; }
*** 335,344 **** --- 340,351 ---- // Preserve the mark of "obj", if necessary, in preparation for its mark // word being overwritten with a self-forwarding-pointer. void preserve_mark_if_necessary(oop obj, markOop m); + void handle_promotion_failed(GenCollectedHeap* gch, ParScanThreadStateSet& thread_state_set, ParNewTracer& gc_tracer); + protected: bool _survivor_overflow; bool avoid_promotion_undo() { return _avoid_promotion_undo; }