150 // If no additional changes are required, this can be deleted
151 // and the changes factored back into PSYoungGen::resize_generation().
152 bool ASPSYoungGen::resize_generation(size_t eden_size, size_t survivor_size) {
153 const size_t alignment = virtual_space()->alignment();
154 size_t orig_size = virtual_space()->committed_size();
155 bool size_changed = false;
156
157 // There used to be a guarantee here that
158 // (eden_size + 2*survivor_size) <= _max_gen_size
159 // This requirement is enforced by the calculation of desired_size
160 // below. It may not be true on entry since the size of the
161 // eden_size is no bounded by the generation size.
162
163 assert(max_size() == reserved().byte_size(), "max gen size problem?");
164 assert(min_gen_size() <= orig_size && orig_size <= max_size(),
165 "just checking");
166
167 // Adjust new generation size
168 const size_t eden_plus_survivors =
169 align_up(eden_size + 2 * survivor_size, alignment);
170 size_t desired_size = MAX2(MIN2(eden_plus_survivors, gen_size_limit()),
171 min_gen_size());
172 assert(desired_size <= gen_size_limit(), "just checking");
173
174 if (desired_size > orig_size) {
175 // Grow the generation
176 size_t change = desired_size - orig_size;
177 HeapWord* prev_low = (HeapWord*) virtual_space()->low();
178 if (!virtual_space()->expand_by(change)) {
179 return false;
180 }
181 if (ZapUnusedHeapArea) {
182 // Mangle newly committed space immediately because it
183 // can be done here more simply that after the new
184 // spaces have been computed.
185 HeapWord* new_low = (HeapWord*) virtual_space()->low();
186 assert(new_low < prev_low, "Did not grow");
187
188 MemRegion mangle_region(new_low, prev_low);
189 SpaceMangler::mangle_region(mangle_region);
190 }
191 size_changed = true;
|
150 // If no additional changes are required, this can be deleted
151 // and the changes factored back into PSYoungGen::resize_generation().
152 bool ASPSYoungGen::resize_generation(size_t eden_size, size_t survivor_size) {
153 const size_t alignment = virtual_space()->alignment();
154 size_t orig_size = virtual_space()->committed_size();
155 bool size_changed = false;
156
157 // There used to be a guarantee here that
158 // (eden_size + 2*survivor_size) <= _max_gen_size
159 // This requirement is enforced by the calculation of desired_size
160 // below. It may not be true on entry since the size of the
161 // eden_size is no bounded by the generation size.
162
163 assert(max_size() == reserved().byte_size(), "max gen size problem?");
164 assert(min_gen_size() <= orig_size && orig_size <= max_size(),
165 "just checking");
166
167 // Adjust new generation size
168 const size_t eden_plus_survivors =
169 align_up(eden_size + 2 * survivor_size, alignment);
170 size_t desired_size = clamp(eden_plus_survivors, min_gen_size(), gen_size_limit());
171 assert(desired_size <= gen_size_limit(), "just checking");
172
173 if (desired_size > orig_size) {
174 // Grow the generation
175 size_t change = desired_size - orig_size;
176 HeapWord* prev_low = (HeapWord*) virtual_space()->low();
177 if (!virtual_space()->expand_by(change)) {
178 return false;
179 }
180 if (ZapUnusedHeapArea) {
181 // Mangle newly committed space immediately because it
182 // can be done here more simply that after the new
183 // spaces have been computed.
184 HeapWord* new_low = (HeapWord*) virtual_space()->low();
185 assert(new_low < prev_low, "Did not grow");
186
187 MemRegion mangle_region(new_low, prev_low);
188 SpaceMangler::mangle_region(mangle_region);
189 }
190 size_changed = true;
|