337 void scan_finished();
338
339 // If CM threads are still scanning root regions, wait until they
340 // are done. Return true if we had to wait, false otherwise.
341 bool wait_until_scan_finished();
342 };
343
344 class ConcurrentMarkThread;
345
346 class ConcurrentMark: public CHeapObj<mtGC> {
347 friend class ConcurrentMarkThread;
348 friend class CMTask;
349 friend class CMBitMapClosure;
350 friend class CMGlobalObjectClosure;
351 friend class CMRemarkTask;
352 friend class CMConcurrentMarkingTask;
353 friend class G1ParNoteEndTask;
354 friend class CalcLiveObjectsClosure;
355 friend class G1CMRefProcTaskProxy;
356 friend class G1CMRefProcTaskExecutor;
357 friend class G1CMParKeepAliveAndDrainClosure;
358 friend class G1CMParDrainMarkingStackClosure;
359
360 protected:
361 ConcurrentMarkThread* _cmThread; // the thread doing the work
362 G1CollectedHeap* _g1h; // the heap.
363 uint _parallel_marking_threads; // the number of marking
364 // threads we're use
365 uint _max_parallel_marking_threads; // max number of marking
366 // threads we'll ever use
367 double _sleep_factor; // how much we have to sleep, with
368 // respect to the work we just did, to
369 // meet the marking overhead goal
370 double _marking_task_overhead; // marking target overhead for
371 // a single task
372
373 // same as the two above, but for the cleanup task
374 double _cleanup_sleep_factor;
375 double _cleanup_task_overhead;
376
377 FreeRegionList _cleanup_list;
378
453
454 ForceOverflowSettings _force_overflow_conc;
455 ForceOverflowSettings _force_overflow_stw;
456
457 void weakRefsWork(bool clear_all_soft_refs);
458
459 void swapMarkBitMaps();
460
461 // It resets the global marking data structures, as well as the
462 // task local ones; should be called during initial mark.
463 void reset();
464
465 // Resets all the marking data structures. Called when we have to restart
466 // marking or when marking completes (via set_non_marking_state below).
467 void reset_marking_state(bool clear_overflow = true);
468
469 // We do this after we're done with marking so that the marking data
470 // structures are initialised to a sensible and predictable state.
471 void set_non_marking_state();
472
473 // It should be called to indicate which phase we're in (concurrent
474 // mark or remark) and how many threads are currently active.
475 void set_phase(uint active_tasks, bool concurrent);
476
477 // prints all gathered CM-related statistics
478 void print_stats();
479
480 bool cleanup_list_is_empty() {
481 return _cleanup_list.is_empty();
482 }
483
484 // accessor methods
485 uint parallel_marking_threads() const { return _parallel_marking_threads; }
486 uint max_parallel_marking_threads() const { return _max_parallel_marking_threads;}
487 double sleep_factor() { return _sleep_factor; }
488 double marking_task_overhead() { return _marking_task_overhead;}
489 double cleanup_sleep_factor() { return _cleanup_sleep_factor; }
490 double cleanup_task_overhead() { return _cleanup_task_overhead;}
491
492 bool use_parallel_marking_threads() const {
493 assert(parallel_marking_threads() <=
494 max_parallel_marking_threads(), "sanity");
495 assert((_parallel_workers == NULL && parallel_marking_threads() == 0) ||
1100 }
1101 // this is supposed to be called regularly during a marking step as
1102 // it checks a bunch of conditions that might cause the marking step
1103 // to abort
1104 void regular_clock_call();
1105 bool concurrent() { return _concurrent; }
1106
1107 public:
1108 // It resets the task; it should be called right at the beginning of
1109 // a marking phase.
1110 void reset(CMBitMap* _nextMarkBitMap);
1111 // it clears all the fields that correspond to a claimed region.
1112 void clear_region_fields();
1113
1114 void set_concurrent(bool concurrent) { _concurrent = concurrent; }
1115
1116 // The main method of this class which performs a marking step
1117 // trying not to exceed the given duration. However, it might exit
1118 // prematurely, according to some conditions (i.e. SATB buffers are
1119 // available for processing).
1120 void do_marking_step(double target_ms, bool do_stealing, bool do_termination);
1121
1122 // These two calls start and stop the timer
1123 void record_start_time() {
1124 _elapsed_time_ms = os::elapsedTime() * 1000.0;
1125 }
1126 void record_end_time() {
1127 _elapsed_time_ms = os::elapsedTime() * 1000.0 - _elapsed_time_ms;
1128 }
1129
1130 // returns the task ID
1131 int task_id() { return _task_id; }
1132
1133 // From TerminatorTerminator. It determines whether this task should
1134 // exit the termination protocol after it's entered it.
1135 virtual bool should_exit_termination();
1136
1137 // Resets the local region fields after a task has finished scanning a
1138 // region; or when they have become stale as a result of the region
1139 // being evacuated.
1140 void giveup_current_region();
|
337 void scan_finished();
338
339 // If CM threads are still scanning root regions, wait until they
340 // are done. Return true if we had to wait, false otherwise.
341 bool wait_until_scan_finished();
342 };
343
344 class ConcurrentMarkThread;
345
346 class ConcurrentMark: public CHeapObj<mtGC> {
347 friend class ConcurrentMarkThread;
348 friend class CMTask;
349 friend class CMBitMapClosure;
350 friend class CMGlobalObjectClosure;
351 friend class CMRemarkTask;
352 friend class CMConcurrentMarkingTask;
353 friend class G1ParNoteEndTask;
354 friend class CalcLiveObjectsClosure;
355 friend class G1CMRefProcTaskProxy;
356 friend class G1CMRefProcTaskExecutor;
357 friend class G1CMKeepAliveAndDrainClosure;
358 friend class G1CMDrainMarkingStackClosure;
359
360 protected:
361 ConcurrentMarkThread* _cmThread; // the thread doing the work
362 G1CollectedHeap* _g1h; // the heap.
363 uint _parallel_marking_threads; // the number of marking
364 // threads we're use
365 uint _max_parallel_marking_threads; // max number of marking
366 // threads we'll ever use
367 double _sleep_factor; // how much we have to sleep, with
368 // respect to the work we just did, to
369 // meet the marking overhead goal
370 double _marking_task_overhead; // marking target overhead for
371 // a single task
372
373 // same as the two above, but for the cleanup task
374 double _cleanup_sleep_factor;
375 double _cleanup_task_overhead;
376
377 FreeRegionList _cleanup_list;
378
453
454 ForceOverflowSettings _force_overflow_conc;
455 ForceOverflowSettings _force_overflow_stw;
456
457 void weakRefsWork(bool clear_all_soft_refs);
458
459 void swapMarkBitMaps();
460
461 // It resets the global marking data structures, as well as the
462 // task local ones; should be called during initial mark.
463 void reset();
464
465 // Resets all the marking data structures. Called when we have to restart
466 // marking or when marking completes (via set_non_marking_state below).
467 void reset_marking_state(bool clear_overflow = true);
468
469 // We do this after we're done with marking so that the marking data
470 // structures are initialised to a sensible and predictable state.
471 void set_non_marking_state();
472
473 // Called to indicate how many threads are currently active.
474 void set_concurrency(uint active_tasks);
475
476 // It should be called to indicate which phase we're in (concurrent
477 // mark or remark) and how many threads are currently active.
478 void set_concurrency_and_phase(uint active_tasks, bool concurrent);
479
480 // prints all gathered CM-related statistics
481 void print_stats();
482
483 bool cleanup_list_is_empty() {
484 return _cleanup_list.is_empty();
485 }
486
487 // accessor methods
488 uint parallel_marking_threads() const { return _parallel_marking_threads; }
489 uint max_parallel_marking_threads() const { return _max_parallel_marking_threads;}
490 double sleep_factor() { return _sleep_factor; }
491 double marking_task_overhead() { return _marking_task_overhead;}
492 double cleanup_sleep_factor() { return _cleanup_sleep_factor; }
493 double cleanup_task_overhead() { return _cleanup_task_overhead;}
494
495 bool use_parallel_marking_threads() const {
496 assert(parallel_marking_threads() <=
497 max_parallel_marking_threads(), "sanity");
498 assert((_parallel_workers == NULL && parallel_marking_threads() == 0) ||
1103 }
1104 // this is supposed to be called regularly during a marking step as
1105 // it checks a bunch of conditions that might cause the marking step
1106 // to abort
1107 void regular_clock_call();
1108 bool concurrent() { return _concurrent; }
1109
1110 public:
1111 // It resets the task; it should be called right at the beginning of
1112 // a marking phase.
1113 void reset(CMBitMap* _nextMarkBitMap);
1114 // it clears all the fields that correspond to a claimed region.
1115 void clear_region_fields();
1116
1117 void set_concurrent(bool concurrent) { _concurrent = concurrent; }
1118
1119 // The main method of this class which performs a marking step
1120 // trying not to exceed the given duration. However, it might exit
1121 // prematurely, according to some conditions (i.e. SATB buffers are
1122 // available for processing).
1123 void do_marking_step(double target_ms,
1124 bool do_termination,
1125 bool is_serial);
1126
1127 // These two calls start and stop the timer
1128 void record_start_time() {
1129 _elapsed_time_ms = os::elapsedTime() * 1000.0;
1130 }
1131 void record_end_time() {
1132 _elapsed_time_ms = os::elapsedTime() * 1000.0 - _elapsed_time_ms;
1133 }
1134
1135 // returns the task ID
1136 int task_id() { return _task_id; }
1137
1138 // From TerminatorTerminator. It determines whether this task should
1139 // exit the termination protocol after it's entered it.
1140 virtual bool should_exit_termination();
1141
1142 // Resets the local region fields after a task has finished scanning a
1143 // region; or when they have become stale as a result of the region
1144 // being evacuated.
1145 void giveup_current_region();
|