86 // next one. The given region should be the current candidate region
87 // in the CSet chooser.
88 void remove_and_move_to_next(HeapRegion* hr) {
89 assert(hr != NULL, "pre-condition");
90 assert(_curr_index < _length, "pre-condition");
91 assert(regions_at(_curr_index) == hr, "pre-condition");
92 regions_at_put(_curr_index, NULL);
93 assert(hr->reclaimable_bytes() <= _remaining_reclaimable_bytes,
94 err_msg("remaining reclaimable bytes inconsistent "
95 "from region: "SIZE_FORMAT" remaining: "SIZE_FORMAT,
96 hr->reclaimable_bytes(), _remaining_reclaimable_bytes));
97 _remaining_reclaimable_bytes -= hr->reclaimable_bytes();
98 _curr_index += 1;
99 }
100
101 CollectionSetChooser();
102
103 void sort_regions();
104
105 // Determine whether to add the given region to the CSet chooser or
106 // not. Currently, we skip humongous regions (we never add them to
107 // the CSet, we only reclaim them during cleanup) and archive regions,
108 // which are both "pinned", and regions whose live bytes are over the
109 // threshold.
110 bool should_add(HeapRegion* hr) {
111 assert(hr->is_marked(), "pre-condition");
112 assert(!hr->is_young(), "should never consider young regions");
113 return !hr->is_pinned() &&
114 hr->live_bytes() < _region_live_threshold_bytes;
115 }
116
117 // Returns the number candidate old regions added
118 uint length() { return _length; }
119
120 // Serial version.
121 void add_region(HeapRegion *hr);
122
123 // Must be called before calls to claim_array_chunk().
124 // n_regions is the number of regions, chunk_size the chunk size.
125 void prepare_for_par_region_addition(uint n_threads, uint n_regions, uint chunk_size);
126 // Returns the first index in a contiguous chunk of chunk_size indexes
127 // that the calling thread has reserved. These must be set by the
128 // calling thread using set_region() (to NULL if necessary).
129 uint claim_array_chunk(uint chunk_size);
|
86 // next one. The given region should be the current candidate region
87 // in the CSet chooser.
88 void remove_and_move_to_next(HeapRegion* hr) {
89 assert(hr != NULL, "pre-condition");
90 assert(_curr_index < _length, "pre-condition");
91 assert(regions_at(_curr_index) == hr, "pre-condition");
92 regions_at_put(_curr_index, NULL);
93 assert(hr->reclaimable_bytes() <= _remaining_reclaimable_bytes,
94 err_msg("remaining reclaimable bytes inconsistent "
95 "from region: "SIZE_FORMAT" remaining: "SIZE_FORMAT,
96 hr->reclaimable_bytes(), _remaining_reclaimable_bytes));
97 _remaining_reclaimable_bytes -= hr->reclaimable_bytes();
98 _curr_index += 1;
99 }
100
101 CollectionSetChooser();
102
103 void sort_regions();
104
105 // Determine whether to add the given region to the CSet chooser or
106 // not. Currently, we skip pinned regions and regions whose live
107 // bytes are over the threshold. Humongous regions may be reclaimed during cleanup.
108 bool should_add(HeapRegion* hr) {
109 assert(hr->is_marked(), "pre-condition");
110 assert(!hr->is_young(), "should never consider young regions");
111 return !hr->is_pinned() &&
112 hr->live_bytes() < _region_live_threshold_bytes;
113 }
114
115 // Returns the number candidate old regions added
116 uint length() { return _length; }
117
118 // Serial version.
119 void add_region(HeapRegion *hr);
120
121 // Must be called before calls to claim_array_chunk().
122 // n_regions is the number of regions, chunk_size the chunk size.
123 void prepare_for_par_region_addition(uint n_threads, uint n_regions, uint chunk_size);
124 // Returns the first index in a contiguous chunk of chunk_size indexes
125 // that the calling thread has reserved. These must be set by the
126 // calling thread using set_region() (to NULL if necessary).
127 uint claim_array_chunk(uint chunk_size);
|