93 uint _reserve_regions;
94
95 G1YoungGenSizer* _young_gen_sizer;
96
97 uint _free_regions_at_end_of_collection;
98
99 size_t _rs_length;
100
101 size_t _rs_length_prediction;
102
103 size_t _pending_cards_at_gc_start;
104 size_t _pending_cards_at_prev_gc_end;
105 size_t _total_mutator_refined_cards;
106 size_t _total_concurrent_refined_cards;
107 Tickspan _total_concurrent_refinement_time;
108
109 // The amount of allocated bytes in old gen during the last mutator and the following
110 // young GC phase.
111 size_t _bytes_allocated_in_old_since_last_gc;
112
113 G1InitialMarkToMixedTimeTracker _initial_mark_to_mixed;
114
115 bool should_update_surv_rate_group_predictors() {
116 return collector_state()->in_young_only_phase() && !collector_state()->mark_or_rebuild_in_progress();
117 }
118
119 double logged_cards_processing_time() const;
120 public:
121 const G1Predictions& predictor() const { return _predictor; }
122 const G1Analytics* analytics() const { return const_cast<const G1Analytics*>(_analytics); }
123
124 G1RemSetTrackingPolicy* remset_tracker() { return &_remset_tracker; }
125
126 // Add the given number of bytes to the total number of allocated bytes in the old gen.
127 void add_bytes_allocated_in_old_since_last_gc(size_t bytes) { _bytes_allocated_in_old_since_last_gc += bytes; }
128
129 void set_region_eden(HeapRegion* hr) {
130 hr->set_eden();
131 hr->install_surv_rate_group(_eden_surv_rate_group);
132 }
321 void record_collection_pause_start(double start_time_sec);
322 virtual void record_collection_pause_end(double pause_time_ms);
323
324 // Record the start and end of a full collection.
325 void record_full_collection_start();
326 virtual void record_full_collection_end();
327
328 // Must currently be called while the world is stopped.
329 void record_concurrent_mark_init_end(double mark_init_elapsed_time_ms);
330
331 // Record start and end of remark.
332 void record_concurrent_mark_remark_start();
333 void record_concurrent_mark_remark_end();
334
335 // Record start, end, and completion of cleanup.
336 void record_concurrent_mark_cleanup_start();
337 void record_concurrent_mark_cleanup_end();
338
339 void print_phases();
340
341 bool next_gc_should_be_mixed(const char* true_action_str,
342 const char* false_action_str) const;
343
344 // Calculate and return the number of initial and optional old gen regions from
345 // the given collection set candidates and the remaining time.
346 void calculate_old_collection_set_regions(G1CollectionSetCandidates* candidates,
347 double time_remaining_ms,
348 uint& num_initial_regions,
349 uint& num_optional_regions);
350
351 // Calculate the number of optional regions from the given collection set candidates,
352 // the remaining time and the maximum number of these regions and return the number
353 // of actually selected regions in num_optional_regions.
354 void calculate_optional_collection_set_regions(G1CollectionSetCandidates* candidates,
355 uint const max_optional_regions,
356 double time_remaining_ms,
357 uint& num_optional_regions);
358
359 private:
360 // Set the state to start a concurrent marking cycle and clear
361 // _initiate_conc_mark_if_possible because it has now been
362 // acted on.
363 void initiate_conc_mark();
364
365 public:
366 // This sets the initiate_conc_mark_if_possible() flag to start a
367 // new cycle, as long as we are not already in one. It's best if it
368 // is called during a safepoint when the test whether a cycle is in
369 // progress or not is stable.
370 bool force_initial_mark_if_outside_cycle(GCCause::Cause gc_cause);
371
372 // This is called at the very beginning of an evacuation pause (it
373 // has to be the first thing that the pause does). If
374 // initiate_conc_mark_if_possible() is true, and the concurrent
375 // marking thread has completed its work during the previous cycle,
376 // it will set in_initial_mark_gc() to so that the pause does
377 // the initial-mark work and start a marking cycle.
378 void decide_on_conc_mark_initiation();
379
380 size_t young_list_target_length() const { return _young_list_target_length; }
381
382 bool should_allocate_mutator_region() const;
383
384 bool can_expand_young_list() const;
385
386 uint young_list_max_length() const {
387 return _young_list_max_length;
388 }
389
390 bool use_adaptive_young_list_length() const;
391
392 void transfer_survivors_to_cset(const G1SurvivorRegions* survivors);
393
394 private:
395 //
396 // Survivor regions policy.
397 //
398
399 // Current tenuring threshold, set to 0 if the collector reaches the
427 _survivor_surv_rate_group->start_adding_regions();
428 }
429
430 void note_stop_adding_survivor_regions() {
431 _survivor_surv_rate_group->stop_adding_regions();
432 }
433
434 void record_age_table(AgeTable* age_table) {
435 _survivors_age_table.merge(age_table);
436 }
437
438 void print_age_table();
439
440 void update_max_gc_locker_expansion();
441
442 void update_survivors_policy();
443
444 virtual bool force_upgrade_to_full() {
445 return false;
446 }
447 };
448
449 #endif // SHARE_GC_G1_G1POLICY_HPP
|
93 uint _reserve_regions;
94
95 G1YoungGenSizer* _young_gen_sizer;
96
97 uint _free_regions_at_end_of_collection;
98
99 size_t _rs_length;
100
101 size_t _rs_length_prediction;
102
103 size_t _pending_cards_at_gc_start;
104 size_t _pending_cards_at_prev_gc_end;
105 size_t _total_mutator_refined_cards;
106 size_t _total_concurrent_refined_cards;
107 Tickspan _total_concurrent_refinement_time;
108
109 // The amount of allocated bytes in old gen during the last mutator and the following
110 // young GC phase.
111 size_t _bytes_allocated_in_old_since_last_gc;
112
113 size_t _minimum_desired_bytes_after_last_cm;
114
115 void determine_desired_bytes_after_concurrent_mark();
116
117 G1InitialMarkToMixedTimeTracker _initial_mark_to_mixed;
118
119 bool should_update_surv_rate_group_predictors() {
120 return collector_state()->in_young_only_phase() && !collector_state()->mark_or_rebuild_in_progress();
121 }
122
123 double logged_cards_processing_time() const;
124 public:
125 const G1Predictions& predictor() const { return _predictor; }
126 const G1Analytics* analytics() const { return const_cast<const G1Analytics*>(_analytics); }
127
128 G1RemSetTrackingPolicy* remset_tracker() { return &_remset_tracker; }
129
130 // Add the given number of bytes to the total number of allocated bytes in the old gen.
131 void add_bytes_allocated_in_old_since_last_gc(size_t bytes) { _bytes_allocated_in_old_since_last_gc += bytes; }
132
133 void set_region_eden(HeapRegion* hr) {
134 hr->set_eden();
135 hr->install_surv_rate_group(_eden_surv_rate_group);
136 }
325 void record_collection_pause_start(double start_time_sec);
326 virtual void record_collection_pause_end(double pause_time_ms);
327
328 // Record the start and end of a full collection.
329 void record_full_collection_start();
330 virtual void record_full_collection_end();
331
332 // Must currently be called while the world is stopped.
333 void record_concurrent_mark_init_end(double mark_init_elapsed_time_ms);
334
335 // Record start and end of remark.
336 void record_concurrent_mark_remark_start();
337 void record_concurrent_mark_remark_end();
338
339 // Record start, end, and completion of cleanup.
340 void record_concurrent_mark_cleanup_start();
341 void record_concurrent_mark_cleanup_end();
342
343 void print_phases();
344
345 bool next_gc_should_be_mixed(const char* true_action_str = NULL,
346 const char* false_action_str = NULL) const;
347
348 // Calculate and return the number of initial and optional old gen regions from
349 // the given collection set candidates and the remaining time.
350 void calculate_old_collection_set_regions(G1CollectionSetCandidates* candidates,
351 double time_remaining_ms,
352 uint& num_initial_regions,
353 uint& num_optional_regions);
354
355 // Calculate the number of optional regions from the given collection set candidates,
356 // the remaining time and the maximum number of these regions and return the number
357 // of actually selected regions in num_optional_regions.
358 void calculate_optional_collection_set_regions(G1CollectionSetCandidates* candidates,
359 uint const max_optional_regions,
360 double time_remaining_ms,
361 uint& num_optional_regions);
362
363 private:
364 // Set the state to start a concurrent marking cycle and clear
365 // _initiate_conc_mark_if_possible because it has now been
366 // acted on.
367 void initiate_conc_mark();
368
369 public:
370 // This sets the initiate_conc_mark_if_possible() flag to start a
371 // new cycle, as long as we are not already in one. It's best if it
372 // is called during a safepoint when the test whether a cycle is in
373 // progress or not is stable.
374 bool force_initial_mark_if_outside_cycle(GCCause::Cause gc_cause);
375
376 // This is called at the very beginning of an evacuation pause (it
377 // has to be the first thing that the pause does). If
378 // initiate_conc_mark_if_possible() is true, and the concurrent
379 // marking thread has completed its work during the previous cycle,
380 // it will set in_initial_mark_gc() to so that the pause does
381 // the initial-mark work and start a marking cycle.
382 void decide_on_conc_mark_initiation();
383
384 size_t desired_bytes_after_concurrent_mark() const { return _minimum_desired_bytes_after_last_cm; }
385
386 size_t young_list_target_length() const { return _young_list_target_length; }
387
388 bool should_allocate_mutator_region() const;
389
390 bool can_expand_young_list() const;
391
392 uint young_list_max_length() const {
393 return _young_list_max_length;
394 }
395
396 bool use_adaptive_young_list_length() const;
397
398 void transfer_survivors_to_cset(const G1SurvivorRegions* survivors);
399
400 private:
401 //
402 // Survivor regions policy.
403 //
404
405 // Current tenuring threshold, set to 0 if the collector reaches the
433 _survivor_surv_rate_group->start_adding_regions();
434 }
435
436 void note_stop_adding_survivor_regions() {
437 _survivor_surv_rate_group->stop_adding_regions();
438 }
439
440 void record_age_table(AgeTable* age_table) {
441 _survivors_age_table.merge(age_table);
442 }
443
444 void print_age_table();
445
446 void update_max_gc_locker_expansion();
447
448 void update_survivors_policy();
449
450 virtual bool force_upgrade_to_full() {
451 return false;
452 }
453 size_t desired_bytes_after_concurrent_mark(size_t used_bytes);
454 };
455
456 #endif // SHARE_GC_G1_G1POLICY_HPP
|