src/share/vm/gc_implementation/g1/concurrentMark.hpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File hotspot Cdiff src/share/vm/gc_implementation/g1/concurrentMark.hpp

src/share/vm/gc_implementation/g1/concurrentMark.hpp

Print this page
rev 5732 : [mq]: comments2

*** 376,475 **** friend class G1CMRefProcTaskExecutor; friend class G1CMKeepAliveAndDrainClosure; friend class G1CMDrainMarkingStackClosure; protected: ! ConcurrentMarkThread* _cmThread; // the thread doing the work ! G1CollectedHeap* _g1h; // the heap. ! uint _parallel_marking_threads; // the number of marking ! // threads we're use ! uint _max_parallel_marking_threads; // max number of marking // threads we'll ever use ! double _sleep_factor; // how much we have to sleep, with // respect to the work we just did, to // meet the marking overhead goal ! double _marking_task_overhead; // marking target overhead for // a single task ! // same as the two above, but for the cleanup task double _cleanup_sleep_factor; double _cleanup_task_overhead; FreeRegionList _cleanup_list; // Concurrent marking support structures CMBitMap _markBitMap1; CMBitMap _markBitMap2; ! CMBitMapRO* _prevMarkBitMap; // completed mark bitmap ! CMBitMap* _nextMarkBitMap; // under-construction mark bitmap BitMap _region_bm; BitMap _card_bm; // Heap bounds HeapWord* _heap_start; HeapWord* _heap_end; ! // Root region tracking and claiming. CMRootRegions _root_regions; // For gray objects ! CMMarkStack _markStack; // Grey objects behind global finger. ! HeapWord* volatile _finger; // the global finger, region aligned, // always points to the end of the // last claimed region ! // marking tasks ! uint _max_worker_id;// maximum worker id ! uint _active_tasks; // task num currently active ! CMTask** _tasks; // task queue array (max_worker_id len) ! CMTaskQueueSet* _task_queues; // task queue set ! ParallelTaskTerminator _terminator; // for termination ! // Two sync barriers that are used to synchronise tasks when an // overflow occurs. The algorithm is the following. All tasks enter // the first one to ensure that they have all stopped manipulating ! // the global data structures. After they exit it, they re-initialise ! // their data structures and task 0 re-initialises the global data // structures. Then, they enter the second sync barrier. This // ensure, that no task starts doing work before all data ! // structures (local and global) have been re-initialised. When they // exit it, they are free to start working again. WorkGangBarrierSync _first_overflow_barrier_sync; WorkGangBarrierSync _second_overflow_barrier_sync; ! // this is set by any task, when an overflow on the global data ! // structures is detected. volatile bool _has_overflown; ! // true: marking is concurrent, false: we're in remark volatile bool _concurrent; ! // set at the end of a Full GC so that marking aborts volatile bool _has_aborted; ! // used when remark aborts due to an overflow to indicate that // another concurrent marking phase should start volatile bool _restart_for_overflow; // This is true from the very start of concurrent marking until the // point when all the tasks complete their work. It is really used // to determine the points between the end of concurrent marking and // time of remark. volatile bool _concurrent_marking_in_progress; ! // verbose level CMVerboseLevel _verbose_level; ! // All of these times are in ms. NumberSeq _init_times; NumberSeq _remark_times; NumberSeq _remark_mark_times; NumberSeq _remark_weak_ref_times; NumberSeq _cleanup_times; double _total_counting_time; double _total_rs_scrub_time; ! double* _accum_task_vtime; // accumulated task vtime FlexibleWorkGang* _parallel_workers; ForceOverflowSettings _force_overflow_conc; ForceOverflowSettings _force_overflow_stw; --- 376,475 ---- friend class G1CMRefProcTaskExecutor; friend class G1CMKeepAliveAndDrainClosure; friend class G1CMDrainMarkingStackClosure; protected: ! ConcurrentMarkThread* _cmThread; // The thread doing the work ! G1CollectedHeap* _g1h; // The heap ! uint _parallel_marking_threads; // The number of marking ! // threads we're using ! uint _max_parallel_marking_threads; // Max number of marking // threads we'll ever use ! double _sleep_factor; // How much we have to sleep, with // respect to the work we just did, to // meet the marking overhead goal ! double _marking_task_overhead; // Marking target overhead for // a single task ! // Same as the two above, but for the cleanup task double _cleanup_sleep_factor; double _cleanup_task_overhead; FreeRegionList _cleanup_list; // Concurrent marking support structures CMBitMap _markBitMap1; CMBitMap _markBitMap2; ! CMBitMapRO* _prevMarkBitMap; // Completed mark bitmap ! CMBitMap* _nextMarkBitMap; // Under-construction mark bitmap BitMap _region_bm; BitMap _card_bm; // Heap bounds HeapWord* _heap_start; HeapWord* _heap_end; ! // Root region tracking and claiming CMRootRegions _root_regions; // For gray objects ! CMMarkStack _markStack; // Grey objects behind global finger ! HeapWord* volatile _finger; // The global finger, region aligned, // always points to the end of the // last claimed region ! // Marking tasks ! uint _max_worker_id;// Maximum worker id ! uint _active_tasks; // Task num currently active ! CMTask** _tasks; // Task queue array (max_worker_id len) ! CMTaskQueueSet* _task_queues; // Task queue set ! ParallelTaskTerminator _terminator; // For termination ! // Two sync barriers that are used to synchronize tasks when an // overflow occurs. The algorithm is the following. All tasks enter // the first one to ensure that they have all stopped manipulating ! // the global data structures. After they exit it, they re-initialize ! // their data structures and task 0 re-initializes the global data // structures. Then, they enter the second sync barrier. This // ensure, that no task starts doing work before all data ! // structures (local and global) have been re-initialized. When they // exit it, they are free to start working again. WorkGangBarrierSync _first_overflow_barrier_sync; WorkGangBarrierSync _second_overflow_barrier_sync; ! // This is set by any task, when an overflow on the global data ! // structures is detected volatile bool _has_overflown; ! // True: marking is concurrent, false: we're in remark volatile bool _concurrent; ! // Set at the end of a Full GC so that marking aborts volatile bool _has_aborted; ! // Used when remark aborts due to an overflow to indicate that // another concurrent marking phase should start volatile bool _restart_for_overflow; // This is true from the very start of concurrent marking until the // point when all the tasks complete their work. It is really used // to determine the points between the end of concurrent marking and // time of remark. volatile bool _concurrent_marking_in_progress; ! // Verbose level CMVerboseLevel _verbose_level; ! // All of these times are in ms NumberSeq _init_times; NumberSeq _remark_times; NumberSeq _remark_mark_times; NumberSeq _remark_weak_ref_times; NumberSeq _cleanup_times; double _total_counting_time; double _total_rs_scrub_time; ! double* _accum_task_vtime; // Accumulated task vtime FlexibleWorkGang* _parallel_workers; ForceOverflowSettings _force_overflow_conc; ForceOverflowSettings _force_overflow_stw;
*** 485,512 **** // Resets all the marking data structures. Called when we have to restart // marking or when marking completes (via set_non_marking_state below). void reset_marking_state(bool clear_overflow = true); // We do this after we're done with marking so that the marking data ! // structures are initialised to a sensible and predictable state. void set_non_marking_state(); // Called to indicate how many threads are currently active. void set_concurrency(uint active_tasks); // It should be called to indicate which phase we're in (concurrent // mark or remark) and how many threads are currently active. void set_concurrency_and_phase(uint active_tasks, bool concurrent); ! // prints all gathered CM-related statistics void print_stats(); bool cleanup_list_is_empty() { return _cleanup_list.is_empty(); } ! // accessor methods uint parallel_marking_threads() const { return _parallel_marking_threads; } uint max_parallel_marking_threads() const { return _max_parallel_marking_threads;} double sleep_factor() { return _sleep_factor; } double marking_task_overhead() { return _marking_task_overhead;} double cleanup_sleep_factor() { return _cleanup_sleep_factor; } --- 485,512 ---- // Resets all the marking data structures. Called when we have to restart // marking or when marking completes (via set_non_marking_state below). void reset_marking_state(bool clear_overflow = true); // We do this after we're done with marking so that the marking data ! // structures are initialized to a sensible and predictable state. void set_non_marking_state(); // Called to indicate how many threads are currently active. void set_concurrency(uint active_tasks); // It should be called to indicate which phase we're in (concurrent // mark or remark) and how many threads are currently active. void set_concurrency_and_phase(uint active_tasks, bool concurrent); ! // Prints all gathered CM-related statistics void print_stats(); bool cleanup_list_is_empty() { return _cleanup_list.is_empty(); } ! // Accessor methods uint parallel_marking_threads() const { return _parallel_marking_threads; } uint max_parallel_marking_threads() const { return _max_parallel_marking_threads;} double sleep_factor() { return _sleep_factor; } double marking_task_overhead() { return _marking_task_overhead;} double cleanup_sleep_factor() { return _cleanup_sleep_factor; }
*** 540,550 **** // method. So, this way, each task will spend very little time in // claim_region() and is allowed to call the regular clock method // frequently. HeapRegion* claim_region(uint worker_id); ! // It determines whether we've run out of regions to scan. bool out_of_regions() { return _finger == _heap_end; } // Returns the task with the given id CMTask* task(int id) { assert(0 <= id && id < (int) _active_tasks, --- 540,550 ---- // method. So, this way, each task will spend very little time in // claim_region() and is allowed to call the regular clock method // frequently. HeapRegion* claim_region(uint worker_id); ! // It determines whether we've run out of regions to scan bool out_of_regions() { return _finger == _heap_end; } // Returns the task with the given id CMTask* task(int id) { assert(0 <= id && id < (int) _active_tasks,
*** 814,824 **** } inline bool do_yield_check(uint worker_i = 0); inline bool should_yield(); ! // Called to abort the marking cycle after a Full GC takes palce. void abort(); bool has_aborted() { return _has_aborted; } // This prints the global/local fingers. It is used for debugging. --- 814,824 ---- } inline bool do_yield_check(uint worker_i = 0); inline bool should_yield(); ! // Called to abort the marking cycle after a Full GC takes place. void abort(); bool has_aborted() { return _has_aborted; } // This prints the global/local fingers. It is used for debugging.
*** 931,945 **** // contains the object to be marked/counted, which this routine looks up. inline bool par_mark_and_count(oop obj, uint worker_id); // Similar to the above routine but there are times when we cannot // safely calculate the size of obj due to races and we, therefore, ! // pass the size in as a parameter. It is the caller's reponsibility // to ensure that the size passed in for obj is valid. inline bool par_mark_and_count(oop obj, size_t word_size, uint worker_id); ! // Unconditionally mark the given object, and unconditinally count // the object in the counting structures for worker id 0. // Should *not* be called from parallel code. inline bool mark_and_count(oop obj, HeapRegion* hr); // Similar to the above routine but we don't know the heap region that --- 931,945 ---- // contains the object to be marked/counted, which this routine looks up. inline bool par_mark_and_count(oop obj, uint worker_id); // Similar to the above routine but there are times when we cannot // safely calculate the size of obj due to races and we, therefore, ! // pass the size in as a parameter. It is the caller's responsibility // to ensure that the size passed in for obj is valid. inline bool par_mark_and_count(oop obj, size_t word_size, uint worker_id); ! // Unconditionally mark the given object, and unconditionally count // the object in the counting structures for worker id 0. // Should *not* be called from parallel code. inline bool mark_and_count(oop obj, HeapRegion* hr); // Similar to the above routine but we don't know the heap region that
src/share/vm/gc_implementation/g1/concurrentMark.hpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File