49
50 G1DefaultPolicy::G1DefaultPolicy() :
51 _predictor(G1ConfidencePercent / 100.0),
52 _analytics(new G1Analytics(&_predictor)),
53 _mmu_tracker(new G1MMUTrackerQueue(GCPauseIntervalMillis / 1000.0, MaxGCPauseMillis / 1000.0)),
54 _ihop_control(create_ihop_control(&_predictor)),
55 _policy_counters(new GCPolicyCounters("GarbageFirst", 1, 3)),
56 _young_list_fixed_length(0),
57 _short_lived_surv_rate_group(new SurvRateGroup()),
58 _survivor_surv_rate_group(new SurvRateGroup()),
59 _reserve_factor((double) G1ReservePercent / 100.0),
60 _reserve_regions(0),
61 _rs_lengths_prediction(0),
62 _bytes_allocated_in_old_since_last_gc(0),
63 _initial_mark_to_mixed(),
64 _collection_set(NULL),
65 _g1(NULL),
66 _phase_times(new G1GCPhaseTimes(ParallelGCThreads)),
67 _tenuring_threshold(MaxTenuringThreshold),
68 _max_survivor_regions(0),
69 _survivors_age_table(true) { }
70
71 G1DefaultPolicy::~G1DefaultPolicy() {
72 delete _ihop_control;
73 }
74
75 G1CollectorState* G1DefaultPolicy::collector_state() const { return _g1->collector_state(); }
76
77 void G1DefaultPolicy::init(G1CollectedHeap* g1h, G1CollectionSet* collection_set) {
78 _g1 = g1h;
79 _collection_set = collection_set;
80
81 assert(Heap_lock->owned_by_self(), "Locking discipline.");
82
83 if (!adaptive_young_list_length()) {
84 _young_list_fixed_length = _young_gen_sizer.min_desired_young_length();
85 }
86 _young_gen_sizer.adjust_max_new_size(_g1->max_regions());
87
88 _free_regions_at_end_of_collection = _g1->num_free_regions();
89
401
402 void G1DefaultPolicy::update_rs_lengths_prediction() {
403 update_rs_lengths_prediction(_analytics->predict_rs_lengths());
404 }
405
406 void G1DefaultPolicy::update_rs_lengths_prediction(size_t prediction) {
407 if (collector_state()->gcs_are_young() && adaptive_young_list_length()) {
408 _rs_lengths_prediction = prediction;
409 }
410 }
411
412 void G1DefaultPolicy::record_full_collection_start() {
413 _full_collection_start_sec = os::elapsedTime();
414 // Release the future to-space so that it is available for compaction into.
415 collector_state()->set_full_collection(true);
416 }
417
418 void G1DefaultPolicy::record_full_collection_end() {
419 // Consider this like a collection pause for the purposes of allocation
420 // since last pause.
421 double end_sec = os::elapsedTime();
422 double full_gc_time_sec = end_sec - _full_collection_start_sec;
423 double full_gc_time_ms = full_gc_time_sec * 1000.0;
424
425 _analytics->update_recent_gc_times(end_sec, full_gc_time_ms);
426
427 collector_state()->set_full_collection(false);
428
429 // "Nuke" the heuristics that control the young/mixed GC
430 // transitions and make sure we start with young GCs after the Full GC.
431 collector_state()->set_gcs_are_young(true);
432 collector_state()->set_last_young_gc(false);
433 collector_state()->set_initiate_conc_mark_if_possible(need_to_start_conc_mark("end of Full GC", 0));
434 collector_state()->set_during_initial_mark_pause(false);
435 collector_state()->set_in_marking_window(false);
436 collector_state()->set_in_marking_window_im(false);
437
438 _short_lived_surv_rate_group->start_adding_regions();
439 // also call this on any additional surv rate groups
440
|
49
50 G1DefaultPolicy::G1DefaultPolicy() :
51 _predictor(G1ConfidencePercent / 100.0),
52 _analytics(new G1Analytics(&_predictor)),
53 _mmu_tracker(new G1MMUTrackerQueue(GCPauseIntervalMillis / 1000.0, MaxGCPauseMillis / 1000.0)),
54 _ihop_control(create_ihop_control(&_predictor)),
55 _policy_counters(new GCPolicyCounters("GarbageFirst", 1, 3)),
56 _young_list_fixed_length(0),
57 _short_lived_surv_rate_group(new SurvRateGroup()),
58 _survivor_surv_rate_group(new SurvRateGroup()),
59 _reserve_factor((double) G1ReservePercent / 100.0),
60 _reserve_regions(0),
61 _rs_lengths_prediction(0),
62 _bytes_allocated_in_old_since_last_gc(0),
63 _initial_mark_to_mixed(),
64 _collection_set(NULL),
65 _g1(NULL),
66 _phase_times(new G1GCPhaseTimes(ParallelGCThreads)),
67 _tenuring_threshold(MaxTenuringThreshold),
68 _max_survivor_regions(0),
69 _survivors_age_table(true),
70 _full_collection_end_millis(os::javaTimeNanos() / NANOSECS_PER_MILLISEC) { }
71
72 G1DefaultPolicy::~G1DefaultPolicy() {
73 delete _ihop_control;
74 }
75
76 G1CollectorState* G1DefaultPolicy::collector_state() const { return _g1->collector_state(); }
77
78 void G1DefaultPolicy::init(G1CollectedHeap* g1h, G1CollectionSet* collection_set) {
79 _g1 = g1h;
80 _collection_set = collection_set;
81
82 assert(Heap_lock->owned_by_self(), "Locking discipline.");
83
84 if (!adaptive_young_list_length()) {
85 _young_list_fixed_length = _young_gen_sizer.min_desired_young_length();
86 }
87 _young_gen_sizer.adjust_max_new_size(_g1->max_regions());
88
89 _free_regions_at_end_of_collection = _g1->num_free_regions();
90
402
403 void G1DefaultPolicy::update_rs_lengths_prediction() {
404 update_rs_lengths_prediction(_analytics->predict_rs_lengths());
405 }
406
407 void G1DefaultPolicy::update_rs_lengths_prediction(size_t prediction) {
408 if (collector_state()->gcs_are_young() && adaptive_young_list_length()) {
409 _rs_lengths_prediction = prediction;
410 }
411 }
412
413 void G1DefaultPolicy::record_full_collection_start() {
414 _full_collection_start_sec = os::elapsedTime();
415 // Release the future to-space so that it is available for compaction into.
416 collector_state()->set_full_collection(true);
417 }
418
419 void G1DefaultPolicy::record_full_collection_end() {
420 // Consider this like a collection pause for the purposes of allocation
421 // since last pause.
422 _full_collection_end_millis = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
423 double end_sec = os::elapsedTime();
424 double full_gc_time_sec = end_sec - _full_collection_start_sec;
425 double full_gc_time_ms = full_gc_time_sec * 1000.0;
426
427 _analytics->update_recent_gc_times(end_sec, full_gc_time_ms);
428
429 collector_state()->set_full_collection(false);
430
431 // "Nuke" the heuristics that control the young/mixed GC
432 // transitions and make sure we start with young GCs after the Full GC.
433 collector_state()->set_gcs_are_young(true);
434 collector_state()->set_last_young_gc(false);
435 collector_state()->set_initiate_conc_mark_if_possible(need_to_start_conc_mark("end of Full GC", 0));
436 collector_state()->set_during_initial_mark_pause(false);
437 collector_state()->set_in_marking_window(false);
438 collector_state()->set_in_marking_window_im(false);
439
440 _short_lived_surv_rate_group->start_adding_regions();
441 // also call this on any additional surv rate groups
442
|