1 /* 2 * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/parallel/objectStartArray.inline.hpp" 27 #include "gc/parallel/parallelScavengeHeap.hpp" 28 #include "gc/parallel/psAdaptiveSizePolicy.hpp" 29 #include "gc/parallel/psCardTable.hpp" 30 #include "gc/parallel/psMarkSweepDecorator.hpp" 31 #include "gc/parallel/psOldGen.hpp" 32 #include "gc/shared/cardTableBarrierSet.hpp" 33 #include "gc/shared/gcLocker.hpp" 34 #include "gc/shared/spaceDecorator.hpp" 35 #include "logging/log.hpp" 36 #include "oops/oop.inline.hpp" 37 #include "runtime/java.hpp" 38 #include "utilities/align.hpp" 39 40 inline const char* PSOldGen::select_name() { 41 return UseParallelOldGC ? "ParOldGen" : "PSOldGen"; 42 } 43 44 PSOldGen::PSOldGen(ReservedSpace rs, size_t alignment, 45 size_t initial_size, size_t min_size, size_t max_size, 46 const char* perf_data_name, int level): 47 _name(select_name()), _init_gen_size(initial_size), _min_gen_size(min_size), 48 _max_gen_size(max_size) 49 { 50 initialize(rs, alignment, perf_data_name, level); 51 } 52 53 PSOldGen::PSOldGen(size_t initial_size, 54 size_t min_size, size_t max_size, 55 const char* perf_data_name, int level): 56 _name(select_name()), _init_gen_size(initial_size), _min_gen_size(min_size), 57 _max_gen_size(max_size) 58 {} 59 60 void PSOldGen::initialize(ReservedSpace rs, size_t alignment, 61 const char* perf_data_name, int level) { 62 initialize_virtual_space(rs, alignment); 63 initialize_work(perf_data_name, level); 64 65 // The old gen can grow to gen_size_limit(). _reserve reflects only 66 // the current maximum that can be committed. 67 assert(_reserved.byte_size() <= gen_size_limit(), "Consistency check"); 68 69 initialize_performance_counters(perf_data_name, level); 70 } 71 72 void PSOldGen::initialize_virtual_space(ReservedSpace rs, size_t alignment) { 73 74 if(AllocateOldGenAt != NULL) { 75 _virtual_space = new PSFileBackedVirtualSpace(rs, alignment, AllocateOldGenAt); 76 } else { 77 _virtual_space = new PSVirtualSpace(rs, alignment); 78 } 79 if (!_virtual_space->expand_by(_init_gen_size)) { 80 vm_exit_during_initialization("Could not reserve enough space for " 81 "object heap"); 82 } 83 } 84 85 void PSOldGen::initialize_work(const char* perf_data_name, int level) { 86 // 87 // Basic memory initialization 88 // 89 90 MemRegion limit_reserved((HeapWord*)virtual_space()->low_boundary(), 91 heap_word_size(_max_gen_size)); 92 assert(limit_reserved.byte_size() == _max_gen_size, 93 "word vs bytes confusion"); 94 // 95 // Object start stuff 96 // 97 98 start_array()->initialize(limit_reserved); 99 100 _reserved = MemRegion((HeapWord*)virtual_space()->low_boundary(), 101 (HeapWord*)virtual_space()->high_boundary()); 102 103 // 104 // Card table stuff 105 // 106 107 MemRegion cmr((HeapWord*)virtual_space()->low(), 108 (HeapWord*)virtual_space()->high()); 109 if (ZapUnusedHeapArea) { 110 // Mangle newly committed space immediately rather than 111 // waiting for the initialization of the space even though 112 // mangling is related to spaces. Doing it here eliminates 113 // the need to carry along information that a complete mangling 114 // (bottom to end) needs to be done. 115 SpaceMangler::mangle_region(cmr); 116 } 117 118 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 119 PSCardTable* ct = heap->card_table(); 120 ct->resize_covered_region(cmr); 121 122 // Verify that the start and end of this generation is the start of a card. 123 // If this wasn't true, a single card could span more than one generation, 124 // which would cause problems when we commit/uncommit memory, and when we 125 // clear and dirty cards. 126 guarantee(ct->is_card_aligned(_reserved.start()), "generation must be card aligned"); 127 if (_reserved.end() != heap->reserved_region().end()) { 128 // Don't check at the very end of the heap as we'll assert that we're probing off 129 // the end if we try. 130 guarantee(ct->is_card_aligned(_reserved.end()), "generation must be card aligned"); 131 } 132 133 // 134 // ObjectSpace stuff 135 // 136 137 _object_space = new MutableSpace(virtual_space()->alignment()); 138 139 if (_object_space == NULL) 140 vm_exit_during_initialization("Could not allocate an old gen space"); 141 142 object_space()->initialize(cmr, 143 SpaceDecorator::Clear, 144 SpaceDecorator::Mangle); 145 146 #if INCLUDE_SERIALGC 147 _object_mark_sweep = new PSMarkSweepDecorator(_object_space, start_array(), MarkSweepDeadRatio); 148 149 if (_object_mark_sweep == NULL) { 150 vm_exit_during_initialization("Could not complete allocation of old generation"); 151 } 152 #endif // INCLUDE_SERIALGC 153 154 // Update the start_array 155 start_array()->set_covered_region(cmr); 156 } 157 158 void PSOldGen::initialize_performance_counters(const char* perf_data_name, int level) { 159 // Generation Counters, generation 'level', 1 subspace 160 _gen_counters = new PSGenerationCounters(perf_data_name, level, 1, _min_gen_size, 161 _max_gen_size, virtual_space()); 162 _space_counters = new SpaceCounters(perf_data_name, 0, 163 virtual_space()->reserved_size(), 164 _object_space, _gen_counters); 165 } 166 167 // Assume that the generation has been allocated if its 168 // reserved size is not 0. 169 bool PSOldGen::is_allocated() { 170 return virtual_space()->reserved_size() != 0; 171 } 172 173 #if INCLUDE_SERIALGC 174 175 void PSOldGen::precompact() { 176 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 177 178 // Reset start array first. 179 start_array()->reset(); 180 181 object_mark_sweep()->precompact(); 182 183 // Now compact the young gen 184 heap->young_gen()->precompact(); 185 } 186 187 void PSOldGen::adjust_pointers() { 188 object_mark_sweep()->adjust_pointers(); 189 } 190 191 void PSOldGen::compact() { 192 object_mark_sweep()->compact(ZapUnusedHeapArea); 193 } 194 195 #endif // INCLUDE_SERIALGC 196 197 size_t PSOldGen::contiguous_available() const { 198 return object_space()->free_in_bytes() + virtual_space()->uncommitted_size(); 199 } 200 201 // Allocation. We report all successful allocations to the size policy 202 // Note that the perm gen does not use this method, and should not! 203 HeapWord* PSOldGen::allocate(size_t word_size) { 204 assert_locked_or_safepoint(Heap_lock); 205 HeapWord* res = allocate_noexpand(word_size); 206 207 if (res == NULL) { 208 res = expand_and_allocate(word_size); 209 } 210 211 // Allocations in the old generation need to be reported 212 if (res != NULL) { 213 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 214 heap->size_policy()->tenured_allocation(word_size * HeapWordSize); 215 } 216 217 return res; 218 } 219 220 HeapWord* PSOldGen::expand_and_allocate(size_t word_size) { 221 expand(word_size*HeapWordSize); 222 if (GCExpandToAllocateDelayMillis > 0) { 223 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false); 224 } 225 return allocate_noexpand(word_size); 226 } 227 228 HeapWord* PSOldGen::expand_and_cas_allocate(size_t word_size) { 229 expand(word_size*HeapWordSize); 230 if (GCExpandToAllocateDelayMillis > 0) { 231 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false); 232 } 233 return cas_allocate_noexpand(word_size); 234 } 235 236 void PSOldGen::expand(size_t bytes) { 237 if (bytes == 0) { 238 return; 239 } 240 MutexLocker x(ExpandHeap_lock); 241 const size_t alignment = virtual_space()->alignment(); 242 size_t aligned_bytes = align_up(bytes, alignment); 243 size_t aligned_expand_bytes = align_up(MinHeapDeltaBytes, alignment); 244 245 if (UseNUMA) { 246 // With NUMA we use round-robin page allocation for the old gen. Expand by at least 247 // providing a page per lgroup. Alignment is larger or equal to the page size. 248 aligned_expand_bytes = MAX2(aligned_expand_bytes, alignment * os::numa_get_groups_num()); 249 } 250 if (aligned_bytes == 0){ 251 // The alignment caused the number of bytes to wrap. An expand_by(0) will 252 // return true with the implication that and expansion was done when it 253 // was not. A call to expand implies a best effort to expand by "bytes" 254 // but not a guarantee. Align down to give a best effort. This is likely 255 // the most that the generation can expand since it has some capacity to 256 // start with. 257 aligned_bytes = align_down(bytes, alignment); 258 } 259 260 bool success = false; 261 if (aligned_expand_bytes > aligned_bytes) { 262 success = expand_by(aligned_expand_bytes); 263 } 264 if (!success) { 265 success = expand_by(aligned_bytes); 266 } 267 if (!success) { 268 success = expand_to_reserved(); 269 } 270 271 if (success && GCLocker::is_active_and_needs_gc()) { 272 log_debug(gc)("Garbage collection disabled, expanded heap instead"); 273 } 274 } 275 276 bool PSOldGen::expand_by(size_t bytes) { 277 assert_lock_strong(ExpandHeap_lock); 278 assert_locked_or_safepoint(Heap_lock); 279 if (bytes == 0) { 280 return true; // That's what virtual_space()->expand_by(0) would return 281 } 282 bool result = virtual_space()->expand_by(bytes); 283 if (result) { 284 if (ZapUnusedHeapArea) { 285 // We need to mangle the newly expanded area. The memregion spans 286 // end -> new_end, we assume that top -> end is already mangled. 287 // Do the mangling before post_resize() is called because 288 // the space is available for allocation after post_resize(); 289 HeapWord* const virtual_space_high = (HeapWord*) virtual_space()->high(); 290 assert(object_space()->end() < virtual_space_high, 291 "Should be true before post_resize()"); 292 MemRegion mangle_region(object_space()->end(), virtual_space_high); 293 // Note that the object space has not yet been updated to 294 // coincide with the new underlying virtual space. 295 SpaceMangler::mangle_region(mangle_region); 296 } 297 post_resize(); 298 if (UsePerfData) { 299 _space_counters->update_capacity(); 300 _gen_counters->update_all(); 301 } 302 } 303 304 if (result) { 305 size_t new_mem_size = virtual_space()->committed_size(); 306 size_t old_mem_size = new_mem_size - bytes; 307 log_debug(gc)("Expanding %s from " SIZE_FORMAT "K by " SIZE_FORMAT "K to " SIZE_FORMAT "K", 308 name(), old_mem_size/K, bytes/K, new_mem_size/K); 309 } 310 311 return result; 312 } 313 314 bool PSOldGen::expand_to_reserved() { 315 assert_lock_strong(ExpandHeap_lock); 316 assert_locked_or_safepoint(Heap_lock); 317 318 bool result = true; 319 const size_t remaining_bytes = virtual_space()->uncommitted_size(); 320 if (remaining_bytes > 0) { 321 result = expand_by(remaining_bytes); 322 DEBUG_ONLY(if (!result) log_warning(gc)("grow to reserve failed")); 323 } 324 return result; 325 } 326 327 void PSOldGen::shrink(size_t bytes) { 328 assert_lock_strong(ExpandHeap_lock); 329 assert_locked_or_safepoint(Heap_lock); 330 331 size_t size = align_down(bytes, virtual_space()->alignment()); 332 if (size > 0) { 333 assert_lock_strong(ExpandHeap_lock); 334 virtual_space()->shrink_by(bytes); 335 post_resize(); 336 337 size_t new_mem_size = virtual_space()->committed_size(); 338 size_t old_mem_size = new_mem_size + bytes; 339 log_debug(gc)("Shrinking %s from " SIZE_FORMAT "K by " SIZE_FORMAT "K to " SIZE_FORMAT "K", 340 name(), old_mem_size/K, bytes/K, new_mem_size/K); 341 } 342 } 343 344 void PSOldGen::resize(size_t desired_free_space) { 345 const size_t alignment = virtual_space()->alignment(); 346 const size_t size_before = virtual_space()->committed_size(); 347 size_t new_size = used_in_bytes() + desired_free_space; 348 if (new_size < used_in_bytes()) { 349 // Overflowed the addition. 350 new_size = gen_size_limit(); 351 } 352 // Adjust according to our min and max 353 new_size = MAX2(MIN2(new_size, gen_size_limit()), min_gen_size()); 354 355 assert(gen_size_limit() >= reserved().byte_size(), "max new size problem?"); 356 new_size = align_up(new_size, alignment); 357 358 const size_t current_size = capacity_in_bytes(); 359 360 log_trace(gc, ergo)("AdaptiveSizePolicy::old generation size: " 361 "desired free: " SIZE_FORMAT " used: " SIZE_FORMAT 362 " new size: " SIZE_FORMAT " current size " SIZE_FORMAT 363 " gen limits: " SIZE_FORMAT " / " SIZE_FORMAT, 364 desired_free_space, used_in_bytes(), new_size, current_size, 365 gen_size_limit(), min_gen_size()); 366 367 if (new_size == current_size) { 368 // No change requested 369 return; 370 } 371 if (new_size > current_size) { 372 size_t change_bytes = new_size - current_size; 373 expand(change_bytes); 374 } else { 375 size_t change_bytes = current_size - new_size; 376 // shrink doesn't grab this lock, expand does. Is that right? 377 MutexLocker x(ExpandHeap_lock); 378 shrink(change_bytes); 379 } 380 381 log_trace(gc, ergo)("AdaptiveSizePolicy::old generation size: collection: %d (" SIZE_FORMAT ") -> (" SIZE_FORMAT ") ", 382 ParallelScavengeHeap::heap()->total_collections(), 383 size_before, 384 virtual_space()->committed_size()); 385 } 386 387 // NOTE! We need to be careful about resizing. During a GC, multiple 388 // allocators may be active during heap expansion. If we allow the 389 // heap resizing to become visible before we have correctly resized 390 // all heap related data structures, we may cause program failures. 391 void PSOldGen::post_resize() { 392 // First construct a memregion representing the new size 393 MemRegion new_memregion((HeapWord*)virtual_space()->low(), 394 (HeapWord*)virtual_space()->high()); 395 size_t new_word_size = new_memregion.word_size(); 396 397 start_array()->set_covered_region(new_memregion); 398 ParallelScavengeHeap::heap()->card_table()->resize_covered_region(new_memregion); 399 400 // ALWAYS do this last!! 401 object_space()->initialize(new_memregion, 402 SpaceDecorator::DontClear, 403 SpaceDecorator::DontMangle); 404 405 assert(new_word_size == heap_word_size(object_space()->capacity_in_bytes()), 406 "Sanity"); 407 } 408 409 size_t PSOldGen::gen_size_limit() { 410 return _max_gen_size; 411 } 412 413 void PSOldGen::reset_after_change() { 414 ShouldNotReachHere(); 415 return; 416 } 417 418 size_t PSOldGen::available_for_expansion() { 419 ShouldNotReachHere(); 420 return 0; 421 } 422 423 size_t PSOldGen::available_for_contraction() { 424 ShouldNotReachHere(); 425 return 0; 426 } 427 428 void PSOldGen::print() const { print_on(tty);} 429 void PSOldGen::print_on(outputStream* st) const { 430 st->print(" %-15s", name()); 431 st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K", 432 capacity_in_bytes()/K, used_in_bytes()/K); 433 st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")", 434 p2i(virtual_space()->low_boundary()), 435 p2i(virtual_space()->high()), 436 p2i(virtual_space()->high_boundary())); 437 438 st->print(" object"); object_space()->print_on(st); 439 } 440 441 void PSOldGen::print_used_change(size_t prev_used) const { 442 log_info(gc, heap)("%s: " SIZE_FORMAT "K->" SIZE_FORMAT "K(" SIZE_FORMAT "K)", 443 name(), prev_used / K, used_in_bytes() / K, capacity_in_bytes() / K); 444 } 445 446 void PSOldGen::update_counters() { 447 if (UsePerfData) { 448 _space_counters->update_all(); 449 _gen_counters->update_all(); 450 } 451 } 452 453 #ifndef PRODUCT 454 455 void PSOldGen::space_invariants() { 456 assert(object_space()->end() == (HeapWord*) virtual_space()->high(), 457 "Space invariant"); 458 assert(object_space()->bottom() == (HeapWord*) virtual_space()->low(), 459 "Space invariant"); 460 assert(virtual_space()->low_boundary() <= virtual_space()->low(), 461 "Space invariant"); 462 assert(virtual_space()->high_boundary() >= virtual_space()->high(), 463 "Space invariant"); 464 assert(virtual_space()->low_boundary() == (char*) _reserved.start(), 465 "Space invariant"); 466 assert(virtual_space()->high_boundary() == (char*) _reserved.end(), 467 "Space invariant"); 468 assert(virtual_space()->committed_size() <= virtual_space()->reserved_size(), 469 "Space invariant"); 470 } 471 #endif 472 473 void PSOldGen::verify() { 474 object_space()->verify(); 475 } 476 class VerifyObjectStartArrayClosure : public ObjectClosure { 477 PSOldGen* _old_gen; 478 ObjectStartArray* _start_array; 479 480 public: 481 VerifyObjectStartArrayClosure(PSOldGen* old_gen, ObjectStartArray* start_array) : 482 _old_gen(old_gen), _start_array(start_array) { } 483 484 virtual void do_object(oop obj) { 485 HeapWord* test_addr = (HeapWord*)obj + 1; 486 guarantee(_start_array->object_start(test_addr) == (HeapWord*)obj, "ObjectStartArray cannot find start of object"); 487 guarantee(_start_array->is_block_allocated((HeapWord*)obj), "ObjectStartArray missing block allocation"); 488 } 489 }; 490 491 void PSOldGen::verify_object_start_array() { 492 VerifyObjectStartArrayClosure check( this, &_start_array ); 493 object_iterate(&check); 494 } 495 496 #ifndef PRODUCT 497 void PSOldGen::record_spaces_top() { 498 assert(ZapUnusedHeapArea, "Not mangling unused space"); 499 object_space()->set_top_for_allocations(); 500 } 501 #endif