461 NumberSeq _remark_mark_times;
462 NumberSeq _remark_weak_ref_times;
463 NumberSeq _cleanup_times;
464 double _total_counting_time;
465 double _total_rs_scrub_time;
466
467 double* _accum_task_vtime; // accumulated task vtime
468
469 FlexibleWorkGang* _parallel_workers;
470
471 ForceOverflowSettings _force_overflow_conc;
472 ForceOverflowSettings _force_overflow_stw;
473
474 void weakRefsWork(bool clear_all_soft_refs);
475
476 void swapMarkBitMaps();
477
478 // It resets the global marking data structures, as well as the
479 // task local ones; should be called during initial mark.
480 void reset();
481 // It resets all the marking data structures.
482 void clear_marking_state(bool clear_overflow = true);
483
484 // It should be called to indicate which phase we're in (concurrent
485 // mark or remark) and how many threads are currently active.
486 void set_phase(uint active_tasks, bool concurrent);
487 // We do this after we're done with marking so that the marking data
488 // structures are initialised to a sensible and predictable state.
489 void set_non_marking_state();
490
491 // prints all gathered CM-related statistics
492 void print_stats();
493
494 bool cleanup_list_is_empty() {
495 return _cleanup_list.is_empty();
496 }
497
498 // accessor methods
499 uint parallel_marking_threads() { return _parallel_marking_threads; }
500 uint max_parallel_marking_threads() { return _max_parallel_marking_threads;}
501 double sleep_factor() { return _sleep_factor; }
502 double marking_task_overhead() { return _marking_task_overhead;}
503 double cleanup_sleep_factor() { return _cleanup_sleep_factor; }
504 double cleanup_task_overhead() { return _cleanup_task_overhead;}
505
506 HeapWord* finger() { return _finger; }
507 bool concurrent() { return _concurrent; }
508 uint active_tasks() { return _active_tasks; }
509 ParallelTaskTerminator* terminator() { return &_terminator; }
510
|
461 NumberSeq _remark_mark_times;
462 NumberSeq _remark_weak_ref_times;
463 NumberSeq _cleanup_times;
464 double _total_counting_time;
465 double _total_rs_scrub_time;
466
467 double* _accum_task_vtime; // accumulated task vtime
468
469 FlexibleWorkGang* _parallel_workers;
470
471 ForceOverflowSettings _force_overflow_conc;
472 ForceOverflowSettings _force_overflow_stw;
473
474 void weakRefsWork(bool clear_all_soft_refs);
475
476 void swapMarkBitMaps();
477
478 // It resets the global marking data structures, as well as the
479 // task local ones; should be called during initial mark.
480 void reset();
481
482 // Resets all the marking data structures. Called when we have to restart
483 // marking or when marking completes (via set_non_marking_state below).
484 void reset_marking_state(bool clear_overflow = true);
485
486 // We do this after we're done with marking so that the marking data
487 // structures are initialised to a sensible and predictable state.
488 void set_non_marking_state();
489
490 // It should be called to indicate which phase we're in (concurrent
491 // mark or remark) and how many threads are currently active.
492 void set_phase(uint active_tasks, bool concurrent);
493
494 // prints all gathered CM-related statistics
495 void print_stats();
496
497 bool cleanup_list_is_empty() {
498 return _cleanup_list.is_empty();
499 }
500
501 // accessor methods
502 uint parallel_marking_threads() { return _parallel_marking_threads; }
503 uint max_parallel_marking_threads() { return _max_parallel_marking_threads;}
504 double sleep_factor() { return _sleep_factor; }
505 double marking_task_overhead() { return _marking_task_overhead;}
506 double cleanup_sleep_factor() { return _cleanup_sleep_factor; }
507 double cleanup_task_overhead() { return _cleanup_task_overhead;}
508
509 HeapWord* finger() { return _finger; }
510 bool concurrent() { return _concurrent; }
511 uint active_tasks() { return _active_tasks; }
512 ParallelTaskTerminator* terminator() { return &_terminator; }
513
|