1 /* 2 * Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/parallel/objectStartArray.inline.hpp" 27 #include "gc/parallel/parallelArguments.hpp" 28 #include "gc/parallel/parallelScavengeHeap.hpp" 29 #include "gc/parallel/psAdaptiveSizePolicy.hpp" 30 #include "gc/parallel/psCardTable.hpp" 31 #include "gc/parallel/psFileBackedVirtualspace.hpp" 32 #include "gc/parallel/psOldGen.hpp" 33 #include "gc/shared/cardTableBarrierSet.hpp" 34 #include "gc/shared/gcLocker.hpp" 35 #include "gc/shared/spaceDecorator.inline.hpp" 36 #include "logging/log.hpp" 37 #include "oops/oop.inline.hpp" 38 #include "runtime/java.hpp" 39 #include "utilities/align.hpp" 40 41 PSOldGen::PSOldGen(ReservedSpace rs, size_t initial_size, size_t min_size, 42 size_t max_size, const char* perf_data_name, int level): 43 _init_gen_size(initial_size), _min_gen_size(min_size), 44 _max_gen_size(max_size) 45 { 46 initialize(rs, GenAlignment, perf_data_name, level); 47 } 48 49 void PSOldGen::initialize(ReservedSpace rs, size_t alignment, 50 const char* perf_data_name, int level) { 51 initialize_virtual_space(rs, alignment); 52 initialize_work(perf_data_name, level); 53 54 // The old gen can grow to max_gen_size(). _reserve reflects only 55 // the current maximum that can be committed. 56 assert(_reserved.byte_size() <= max_gen_size(), "Consistency check"); 57 58 initialize_performance_counters(perf_data_name, level); 59 } 60 61 void PSOldGen::initialize_virtual_space(ReservedSpace rs, size_t alignment) { 62 63 if(ParallelArguments::is_heterogeneous_heap()) { 64 _virtual_space = new PSFileBackedVirtualSpace(rs, alignment, AllocateOldGenAt); 65 if (!(static_cast <PSFileBackedVirtualSpace*>(_virtual_space))->initialize()) { 66 vm_exit_during_initialization("Could not map space for PSOldGen at given AllocateOldGenAt path"); 67 } 68 } else { 69 _virtual_space = new PSVirtualSpace(rs, alignment); 70 } 71 if (!_virtual_space->expand_by(_init_gen_size)) { 72 vm_exit_during_initialization("Could not reserve enough space for " 73 "object heap"); 74 } 75 } 76 77 void PSOldGen::initialize_work(const char* perf_data_name, int level) { 78 // 79 // Basic memory initialization 80 // 81 82 MemRegion limit_reserved((HeapWord*)virtual_space()->low_boundary(), 83 heap_word_size(_max_gen_size)); 84 assert(limit_reserved.byte_size() == _max_gen_size, 85 "word vs bytes confusion"); 86 // 87 // Object start stuff 88 // 89 90 start_array()->initialize(limit_reserved); 91 92 _reserved = MemRegion((HeapWord*)virtual_space()->low_boundary(), 93 (HeapWord*)virtual_space()->high_boundary()); 94 95 // 96 // Card table stuff 97 // 98 99 MemRegion cmr((HeapWord*)virtual_space()->low(), 100 (HeapWord*)virtual_space()->high()); 101 if (ZapUnusedHeapArea) { 102 // Mangle newly committed space immediately rather than 103 // waiting for the initialization of the space even though 104 // mangling is related to spaces. Doing it here eliminates 105 // the need to carry along information that a complete mangling 106 // (bottom to end) needs to be done. 107 SpaceMangler::mangle_region(cmr); 108 } 109 110 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 111 PSCardTable* ct = heap->card_table(); 112 ct->resize_covered_region(cmr); 113 114 // Verify that the start and end of this generation is the start of a card. 115 // If this wasn't true, a single card could span more than one generation, 116 // which would cause problems when we commit/uncommit memory, and when we 117 // clear and dirty cards. 118 guarantee(ct->is_card_aligned(_reserved.start()), "generation must be card aligned"); 119 if (_reserved.end() != heap->reserved_region().end()) { 120 // Don't check at the very end of the heap as we'll assert that we're probing off 121 // the end if we try. 122 guarantee(ct->is_card_aligned(_reserved.end()), "generation must be card aligned"); 123 } 124 125 // 126 // ObjectSpace stuff 127 // 128 129 _object_space = new MutableSpace(virtual_space()->alignment()); 130 object_space()->initialize(cmr, 131 SpaceDecorator::Clear, 132 SpaceDecorator::Mangle); 133 134 // Update the start_array 135 start_array()->set_covered_region(cmr); 136 } 137 138 void PSOldGen::initialize_performance_counters(const char* perf_data_name, int level) { 139 // Generation Counters, generation 'level', 1 subspace 140 _gen_counters = new PSGenerationCounters(perf_data_name, level, 1, _min_gen_size, 141 _max_gen_size, virtual_space()); 142 _space_counters = new SpaceCounters(perf_data_name, 0, 143 virtual_space()->reserved_size(), 144 _object_space, _gen_counters); 145 } 146 147 // Assume that the generation has been allocated if its 148 // reserved size is not 0. 149 bool PSOldGen::is_allocated() { 150 return virtual_space()->reserved_size() != 0; 151 } 152 153 // Allocation. We report all successful allocations to the size policy 154 // Note that the perm gen does not use this method, and should not! 155 HeapWord* PSOldGen::allocate(size_t word_size) { 156 assert_locked_or_safepoint(Heap_lock); 157 HeapWord* res = allocate_noexpand(word_size); 158 159 if (res == NULL) { 160 res = expand_and_allocate(word_size); 161 } 162 163 // Allocations in the old generation need to be reported 164 if (res != NULL) { 165 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 166 heap->size_policy()->tenured_allocation(word_size * HeapWordSize); 167 } 168 169 return res; 170 } 171 172 HeapWord* PSOldGen::expand_and_allocate(size_t word_size) { 173 expand(word_size*HeapWordSize); 174 if (GCExpandToAllocateDelayMillis > 0) { 175 os::naked_sleep(GCExpandToAllocateDelayMillis); 176 } 177 return allocate_noexpand(word_size); 178 } 179 180 HeapWord* PSOldGen::expand_and_cas_allocate(size_t word_size) { 181 expand(word_size*HeapWordSize); 182 if (GCExpandToAllocateDelayMillis > 0) { 183 os::naked_sleep(GCExpandToAllocateDelayMillis); 184 } 185 return cas_allocate_noexpand(word_size); 186 } 187 188 void PSOldGen::expand(size_t bytes) { 189 if (bytes == 0) { 190 return; 191 } 192 MutexLocker x(ExpandHeap_lock); 193 const size_t alignment = virtual_space()->alignment(); 194 size_t aligned_bytes = align_up(bytes, alignment); 195 size_t aligned_expand_bytes = align_up(MinHeapDeltaBytes, alignment); 196 197 if (UseNUMA) { 198 // With NUMA we use round-robin page allocation for the old gen. Expand by at least 199 // providing a page per lgroup. Alignment is larger or equal to the page size. 200 aligned_expand_bytes = MAX2(aligned_expand_bytes, alignment * os::numa_get_groups_num()); 201 } 202 if (aligned_bytes == 0){ 203 // The alignment caused the number of bytes to wrap. An expand_by(0) will 204 // return true with the implication that and expansion was done when it 205 // was not. A call to expand implies a best effort to expand by "bytes" 206 // but not a guarantee. Align down to give a best effort. This is likely 207 // the most that the generation can expand since it has some capacity to 208 // start with. 209 aligned_bytes = align_down(bytes, alignment); 210 } 211 212 bool success = false; 213 if (aligned_expand_bytes > aligned_bytes) { 214 success = expand_by(aligned_expand_bytes); 215 } 216 if (!success) { 217 success = expand_by(aligned_bytes); 218 } 219 if (!success) { 220 success = expand_to_reserved(); 221 } 222 223 if (success && GCLocker::is_active_and_needs_gc()) { 224 log_debug(gc)("Garbage collection disabled, expanded heap instead"); 225 } 226 } 227 228 bool PSOldGen::expand_by(size_t bytes) { 229 assert_lock_strong(ExpandHeap_lock); 230 assert_locked_or_safepoint(Heap_lock); 231 if (bytes == 0) { 232 return true; // That's what virtual_space()->expand_by(0) would return 233 } 234 bool result = virtual_space()->expand_by(bytes); 235 if (result) { 236 if (ZapUnusedHeapArea) { 237 // We need to mangle the newly expanded area. The memregion spans 238 // end -> new_end, we assume that top -> end is already mangled. 239 // Do the mangling before post_resize() is called because 240 // the space is available for allocation after post_resize(); 241 HeapWord* const virtual_space_high = (HeapWord*) virtual_space()->high(); 242 assert(object_space()->end() < virtual_space_high, 243 "Should be true before post_resize()"); 244 MemRegion mangle_region(object_space()->end(), virtual_space_high); 245 // Note that the object space has not yet been updated to 246 // coincide with the new underlying virtual space. 247 SpaceMangler::mangle_region(mangle_region); 248 } 249 post_resize(); 250 if (UsePerfData) { 251 _space_counters->update_capacity(); 252 _gen_counters->update_all(); 253 } 254 } 255 256 if (result) { 257 size_t new_mem_size = virtual_space()->committed_size(); 258 size_t old_mem_size = new_mem_size - bytes; 259 log_debug(gc)("Expanding %s from " SIZE_FORMAT "K by " SIZE_FORMAT "K to " SIZE_FORMAT "K", 260 name(), old_mem_size/K, bytes/K, new_mem_size/K); 261 } 262 263 return result; 264 } 265 266 bool PSOldGen::expand_to_reserved() { 267 assert_lock_strong(ExpandHeap_lock); 268 assert_locked_or_safepoint(Heap_lock); 269 270 bool result = true; 271 const size_t remaining_bytes = virtual_space()->uncommitted_size(); 272 if (remaining_bytes > 0) { 273 result = expand_by(remaining_bytes); 274 DEBUG_ONLY(if (!result) log_warning(gc)("grow to reserve failed")); 275 } 276 return result; 277 } 278 279 void PSOldGen::shrink(size_t bytes) { 280 assert_lock_strong(ExpandHeap_lock); 281 assert_locked_or_safepoint(Heap_lock); 282 283 size_t size = align_down(bytes, virtual_space()->alignment()); 284 if (size > 0) { 285 assert_lock_strong(ExpandHeap_lock); 286 virtual_space()->shrink_by(bytes); 287 post_resize(); 288 289 size_t new_mem_size = virtual_space()->committed_size(); 290 size_t old_mem_size = new_mem_size + bytes; 291 log_debug(gc)("Shrinking %s from " SIZE_FORMAT "K by " SIZE_FORMAT "K to " SIZE_FORMAT "K", 292 name(), old_mem_size/K, bytes/K, new_mem_size/K); 293 } 294 } 295 296 void PSOldGen::resize(size_t desired_free_space) { 297 const size_t alignment = virtual_space()->alignment(); 298 const size_t size_before = virtual_space()->committed_size(); 299 size_t new_size = used_in_bytes() + desired_free_space; 300 if (new_size < used_in_bytes()) { 301 // Overflowed the addition. 302 new_size = max_gen_size(); 303 } 304 // Adjust according to our min and max 305 new_size = clamp(new_size, min_gen_size(), max_gen_size()); 306 307 assert(max_gen_size() >= reserved().byte_size(), "max new size problem?"); 308 new_size = align_up(new_size, alignment); 309 310 const size_t current_size = capacity_in_bytes(); 311 312 log_trace(gc, ergo)("AdaptiveSizePolicy::old generation size: " 313 "desired free: " SIZE_FORMAT " used: " SIZE_FORMAT 314 " new size: " SIZE_FORMAT " current size " SIZE_FORMAT 315 " gen limits: " SIZE_FORMAT " / " SIZE_FORMAT, 316 desired_free_space, used_in_bytes(), new_size, current_size, 317 max_gen_size(), min_gen_size()); 318 319 if (new_size == current_size) { 320 // No change requested 321 return; 322 } 323 if (new_size > current_size) { 324 size_t change_bytes = new_size - current_size; 325 expand(change_bytes); 326 } else { 327 size_t change_bytes = current_size - new_size; 328 // shrink doesn't grab this lock, expand does. Is that right? 329 MutexLocker x(ExpandHeap_lock); 330 shrink(change_bytes); 331 } 332 333 log_trace(gc, ergo)("AdaptiveSizePolicy::old generation size: collection: %d (" SIZE_FORMAT ") -> (" SIZE_FORMAT ") ", 334 ParallelScavengeHeap::heap()->total_collections(), 335 size_before, 336 virtual_space()->committed_size()); 337 } 338 339 // NOTE! We need to be careful about resizing. During a GC, multiple 340 // allocators may be active during heap expansion. If we allow the 341 // heap resizing to become visible before we have correctly resized 342 // all heap related data structures, we may cause program failures. 343 void PSOldGen::post_resize() { 344 // First construct a memregion representing the new size 345 MemRegion new_memregion((HeapWord*)virtual_space()->low(), 346 (HeapWord*)virtual_space()->high()); 347 size_t new_word_size = new_memregion.word_size(); 348 349 start_array()->set_covered_region(new_memregion); 350 ParallelScavengeHeap::heap()->card_table()->resize_covered_region(new_memregion); 351 352 // ALWAYS do this last!! 353 object_space()->initialize(new_memregion, 354 SpaceDecorator::DontClear, 355 SpaceDecorator::DontMangle); 356 357 assert(new_word_size == heap_word_size(object_space()->capacity_in_bytes()), 358 "Sanity"); 359 } 360 361 void PSOldGen::print() const { print_on(tty);} 362 void PSOldGen::print_on(outputStream* st) const { 363 st->print(" %-15s", name()); 364 st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K", 365 capacity_in_bytes()/K, used_in_bytes()/K); 366 st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")", 367 p2i(virtual_space()->low_boundary()), 368 p2i(virtual_space()->high()), 369 p2i(virtual_space()->high_boundary())); 370 371 st->print(" object"); object_space()->print_on(st); 372 } 373 374 void PSOldGen::update_counters() { 375 if (UsePerfData) { 376 _space_counters->update_all(); 377 _gen_counters->update_all(); 378 } 379 } 380 381 void PSOldGen::verify() { 382 object_space()->verify(); 383 } 384 385 class VerifyObjectStartArrayClosure : public ObjectClosure { 386 PSOldGen* _old_gen; 387 ObjectStartArray* _start_array; 388 389 public: 390 VerifyObjectStartArrayClosure(PSOldGen* old_gen, ObjectStartArray* start_array) : 391 _old_gen(old_gen), _start_array(start_array) { } 392 393 virtual void do_object(oop obj) { 394 HeapWord* test_addr = cast_from_oop<HeapWord*>(obj) + 1; 395 guarantee(_start_array->object_start(test_addr) == cast_from_oop<HeapWord*>(obj), "ObjectStartArray cannot find start of object"); 396 guarantee(_start_array->is_block_allocated(cast_from_oop<HeapWord*>(obj)), "ObjectStartArray missing block allocation"); 397 } 398 }; 399 400 void PSOldGen::verify_object_start_array() { 401 VerifyObjectStartArrayClosure check( this, &_start_array ); 402 object_iterate(&check); 403 } 404 405 #ifndef PRODUCT 406 void PSOldGen::record_spaces_top() { 407 assert(ZapUnusedHeapArea, "Not mangling unused space"); 408 object_space()->set_top_for_allocations(); 409 } 410 #endif