1 /* 2 * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/parallel/parallelScavengeHeap.hpp" 27 #include "gc/parallel/psAdaptiveSizePolicy.hpp" 28 #include "gc/parallel/psMarkSweepDecorator.hpp" 29 #include "gc/parallel/psOldGen.hpp" 30 #include "gc/shared/cardTableModRefBS.hpp" 31 #include "gc/shared/gcLocker.inline.hpp" 32 #include "gc/shared/spaceDecorator.hpp" 33 #include "logging/log.hpp" 34 #include "oops/oop.inline.hpp" 35 #include "runtime/java.hpp" 36 37 inline const char* PSOldGen::select_name() { 38 return UseParallelOldGC ? "ParOldGen" : "PSOldGen"; 39 } 40 41 PSOldGen::PSOldGen(ReservedSpace rs, size_t alignment, 42 size_t initial_size, size_t min_size, size_t max_size, 43 const char* perf_data_name, int level): 44 _name(select_name()), _init_gen_size(initial_size), _min_gen_size(min_size), 45 _max_gen_size(max_size) 46 { 47 initialize(rs, alignment, perf_data_name, level); 48 } 49 50 PSOldGen::PSOldGen(size_t initial_size, 51 size_t min_size, size_t max_size, 52 const char* perf_data_name, int level): 53 _name(select_name()), _init_gen_size(initial_size), _min_gen_size(min_size), 54 _max_gen_size(max_size) 55 {} 56 57 void PSOldGen::initialize(ReservedSpace rs, size_t alignment, 58 const char* perf_data_name, int level) { 59 initialize_virtual_space(rs, alignment); 60 initialize_work(perf_data_name, level); 61 62 // The old gen can grow to gen_size_limit(). _reserve reflects only 63 // the current maximum that can be committed. 64 assert(_reserved.byte_size() <= gen_size_limit(), "Consistency check"); 65 66 initialize_performance_counters(perf_data_name, level); 67 } 68 69 void PSOldGen::initialize_virtual_space(ReservedSpace rs, size_t alignment) { 70 71 _virtual_space = new PSVirtualSpace(rs, alignment); 72 if (!_virtual_space->expand_by(_init_gen_size)) { 73 vm_exit_during_initialization("Could not reserve enough space for " 74 "object heap"); 75 } 76 } 77 78 void PSOldGen::initialize_work(const char* perf_data_name, int level) { 79 // 80 // Basic memory initialization 81 // 82 83 MemRegion limit_reserved((HeapWord*)virtual_space()->low_boundary(), 84 heap_word_size(_max_gen_size)); 85 assert(limit_reserved.byte_size() == _max_gen_size, 86 "word vs bytes confusion"); 87 // 88 // Object start stuff 89 // 90 91 start_array()->initialize(limit_reserved); 92 93 _reserved = MemRegion((HeapWord*)virtual_space()->low_boundary(), 94 (HeapWord*)virtual_space()->high_boundary()); 95 96 // 97 // Card table stuff 98 // 99 100 MemRegion cmr((HeapWord*)virtual_space()->low(), 101 (HeapWord*)virtual_space()->high()); 102 if (ZapUnusedHeapArea) { 103 // Mangle newly committed space immediately rather than 104 // waiting for the initialization of the space even though 105 // mangling is related to spaces. Doing it here eliminates 106 // the need to carry along information that a complete mangling 107 // (bottom to end) needs to be done. 108 SpaceMangler::mangle_region(cmr); 109 } 110 111 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 112 BarrierSet* bs = heap->barrier_set(); 113 114 bs->resize_covered_region(cmr); 115 116 CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs); 117 118 // Verify that the start and end of this generation is the start of a card. 119 // If this wasn't true, a single card could span more than one generation, 120 // which would cause problems when we commit/uncommit memory, and when we 121 // clear and dirty cards. 122 guarantee(ct->is_card_aligned(_reserved.start()), "generation must be card aligned"); 123 if (_reserved.end() != heap->reserved_region().end()) { 124 // Don't check at the very end of the heap as we'll assert that we're probing off 125 // the end if we try. 126 guarantee(ct->is_card_aligned(_reserved.end()), "generation must be card aligned"); 127 } 128 129 // 130 // ObjectSpace stuff 131 // 132 133 _object_space = new MutableSpace(virtual_space()->alignment()); 134 135 if (_object_space == NULL) 136 vm_exit_during_initialization("Could not allocate an old gen space"); 137 138 object_space()->initialize(cmr, 139 SpaceDecorator::Clear, 140 SpaceDecorator::Mangle); 141 142 _object_mark_sweep = new PSMarkSweepDecorator(_object_space, start_array(), MarkSweepDeadRatio); 143 144 if (_object_mark_sweep == NULL) 145 vm_exit_during_initialization("Could not complete allocation of old generation"); 146 147 // Update the start_array 148 start_array()->set_covered_region(cmr); 149 } 150 151 void PSOldGen::initialize_performance_counters(const char* perf_data_name, int level) { 152 // Generation Counters, generation 'level', 1 subspace 153 _gen_counters = new PSGenerationCounters(perf_data_name, level, 1, _min_gen_size, 154 _max_gen_size, virtual_space()); 155 _space_counters = new SpaceCounters(perf_data_name, 0, 156 virtual_space()->reserved_size(), 157 _object_space, _gen_counters); 158 } 159 160 // Assume that the generation has been allocated if its 161 // reserved size is not 0. 162 bool PSOldGen::is_allocated() { 163 return virtual_space()->reserved_size() != 0; 164 } 165 166 void PSOldGen::precompact() { 167 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 168 169 // Reset start array first. 170 start_array()->reset(); 171 172 object_mark_sweep()->precompact(); 173 174 // Now compact the young gen 175 heap->young_gen()->precompact(); 176 } 177 178 void PSOldGen::adjust_pointers() { 179 object_mark_sweep()->adjust_pointers(); 180 } 181 182 void PSOldGen::compact() { 183 object_mark_sweep()->compact(ZapUnusedHeapArea); 184 } 185 186 size_t PSOldGen::contiguous_available() const { 187 return object_space()->free_in_bytes() + virtual_space()->uncommitted_size(); 188 } 189 190 // Allocation. We report all successful allocations to the size policy 191 // Note that the perm gen does not use this method, and should not! 192 HeapWord* PSOldGen::allocate(size_t word_size) { 193 assert_locked_or_safepoint(Heap_lock); 194 HeapWord* res = allocate_noexpand(word_size); 195 196 if (res == NULL) { 197 res = expand_and_allocate(word_size); 198 } 199 200 // Allocations in the old generation need to be reported 201 if (res != NULL) { 202 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 203 heap->size_policy()->tenured_allocation(word_size * HeapWordSize); 204 } 205 206 return res; 207 } 208 209 HeapWord* PSOldGen::expand_and_allocate(size_t word_size) { 210 expand(word_size*HeapWordSize); 211 if (GCExpandToAllocateDelayMillis > 0) { 212 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false); 213 } 214 return allocate_noexpand(word_size); 215 } 216 217 HeapWord* PSOldGen::expand_and_cas_allocate(size_t word_size) { 218 expand(word_size*HeapWordSize); 219 if (GCExpandToAllocateDelayMillis > 0) { 220 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false); 221 } 222 return cas_allocate_noexpand(word_size); 223 } 224 225 void PSOldGen::expand(size_t bytes) { 226 if (bytes == 0) { 227 return; 228 } 229 MutexLocker x(ExpandHeap_lock); 230 const size_t alignment = virtual_space()->alignment(); 231 size_t aligned_bytes = align_size_up(bytes, alignment); 232 size_t aligned_expand_bytes = align_size_up(MinHeapDeltaBytes, alignment); 233 234 if (UseNUMA) { 235 // With NUMA we use round-robin page allocation for the old gen. Expand by at least 236 // providing a page per lgroup. Alignment is larger or equal to the page size. 237 aligned_expand_bytes = MAX2(aligned_expand_bytes, alignment * os::numa_get_groups_num()); 238 } 239 if (aligned_bytes == 0){ 240 // The alignment caused the number of bytes to wrap. An expand_by(0) will 241 // return true with the implication that and expansion was done when it 242 // was not. A call to expand implies a best effort to expand by "bytes" 243 // but not a guarantee. Align down to give a best effort. This is likely 244 // the most that the generation can expand since it has some capacity to 245 // start with. 246 aligned_bytes = align_size_down(bytes, alignment); 247 } 248 249 bool success = false; 250 if (aligned_expand_bytes > aligned_bytes) { 251 success = expand_by(aligned_expand_bytes); 252 } 253 if (!success) { 254 success = expand_by(aligned_bytes); 255 } 256 if (!success) { 257 success = expand_to_reserved(); 258 } 259 260 if (success && GC_locker::is_active_and_needs_gc()) { 261 log_debug(gc)("Garbage collection disabled, expanded heap instead"); 262 } 263 } 264 265 bool PSOldGen::expand_by(size_t bytes) { 266 assert_lock_strong(ExpandHeap_lock); 267 assert_locked_or_safepoint(Heap_lock); 268 if (bytes == 0) { 269 return true; // That's what virtual_space()->expand_by(0) would return 270 } 271 bool result = virtual_space()->expand_by(bytes); 272 if (result) { 273 if (ZapUnusedHeapArea) { 274 // We need to mangle the newly expanded area. The memregion spans 275 // end -> new_end, we assume that top -> end is already mangled. 276 // Do the mangling before post_resize() is called because 277 // the space is available for allocation after post_resize(); 278 HeapWord* const virtual_space_high = (HeapWord*) virtual_space()->high(); 279 assert(object_space()->end() < virtual_space_high, 280 "Should be true before post_resize()"); 281 MemRegion mangle_region(object_space()->end(), virtual_space_high); 282 // Note that the object space has not yet been updated to 283 // coincide with the new underlying virtual space. 284 SpaceMangler::mangle_region(mangle_region); 285 } 286 post_resize(); 287 if (UsePerfData) { 288 _space_counters->update_capacity(); 289 _gen_counters->update_all(); 290 } 291 } 292 293 if (result) { 294 size_t new_mem_size = virtual_space()->committed_size(); 295 size_t old_mem_size = new_mem_size - bytes; 296 log_debug(gc)("Expanding %s from " SIZE_FORMAT "K by " SIZE_FORMAT "K to " SIZE_FORMAT "K", 297 name(), old_mem_size/K, bytes/K, new_mem_size/K); 298 } 299 300 return result; 301 } 302 303 bool PSOldGen::expand_to_reserved() { 304 assert_lock_strong(ExpandHeap_lock); 305 assert_locked_or_safepoint(Heap_lock); 306 307 bool result = true; 308 const size_t remaining_bytes = virtual_space()->uncommitted_size(); 309 if (remaining_bytes > 0) { 310 result = expand_by(remaining_bytes); 311 DEBUG_ONLY(if (!result) warning("grow to reserve failed")); 312 } 313 return result; 314 } 315 316 void PSOldGen::shrink(size_t bytes) { 317 assert_lock_strong(ExpandHeap_lock); 318 assert_locked_or_safepoint(Heap_lock); 319 320 size_t size = align_size_down(bytes, virtual_space()->alignment()); 321 if (size > 0) { 322 assert_lock_strong(ExpandHeap_lock); 323 virtual_space()->shrink_by(bytes); 324 post_resize(); 325 326 size_t new_mem_size = virtual_space()->committed_size(); 327 size_t old_mem_size = new_mem_size + bytes; 328 log_debug(gc)("Shrinking %s from " SIZE_FORMAT "K by " SIZE_FORMAT "K to " SIZE_FORMAT "K", 329 name(), old_mem_size/K, bytes/K, new_mem_size/K); 330 } 331 } 332 333 void PSOldGen::resize(size_t desired_free_space) { 334 const size_t alignment = virtual_space()->alignment(); 335 const size_t size_before = virtual_space()->committed_size(); 336 size_t new_size = used_in_bytes() + desired_free_space; 337 if (new_size < used_in_bytes()) { 338 // Overflowed the addition. 339 new_size = gen_size_limit(); 340 } 341 // Adjust according to our min and max 342 new_size = MAX2(MIN2(new_size, gen_size_limit()), min_gen_size()); 343 344 assert(gen_size_limit() >= reserved().byte_size(), "max new size problem?"); 345 new_size = align_size_up(new_size, alignment); 346 347 const size_t current_size = capacity_in_bytes(); 348 349 log_trace(gc, ergo)("AdaptiveSizePolicy::old generation size: " 350 "desired free: " SIZE_FORMAT " used: " SIZE_FORMAT 351 " new size: " SIZE_FORMAT " current size " SIZE_FORMAT 352 " gen limits: " SIZE_FORMAT " / " SIZE_FORMAT, 353 desired_free_space, used_in_bytes(), new_size, current_size, 354 gen_size_limit(), min_gen_size()); 355 356 if (new_size == current_size) { 357 // No change requested 358 return; 359 } 360 if (new_size > current_size) { 361 size_t change_bytes = new_size - current_size; 362 expand(change_bytes); 363 } else { 364 size_t change_bytes = current_size - new_size; 365 // shrink doesn't grab this lock, expand does. Is that right? 366 MutexLocker x(ExpandHeap_lock); 367 shrink(change_bytes); 368 } 369 370 log_trace(gc, ergo)("AdaptiveSizePolicy::old generation size: collection: %d (" SIZE_FORMAT ") -> (" SIZE_FORMAT ") ", 371 ParallelScavengeHeap::heap()->total_collections(), 372 size_before, 373 virtual_space()->committed_size()); 374 } 375 376 // NOTE! We need to be careful about resizing. During a GC, multiple 377 // allocators may be active during heap expansion. If we allow the 378 // heap resizing to become visible before we have correctly resized 379 // all heap related data structures, we may cause program failures. 380 void PSOldGen::post_resize() { 381 // First construct a memregion representing the new size 382 MemRegion new_memregion((HeapWord*)virtual_space()->low(), 383 (HeapWord*)virtual_space()->high()); 384 size_t new_word_size = new_memregion.word_size(); 385 386 start_array()->set_covered_region(new_memregion); 387 ParallelScavengeHeap::heap()->barrier_set()->resize_covered_region(new_memregion); 388 389 // ALWAYS do this last!! 390 object_space()->initialize(new_memregion, 391 SpaceDecorator::DontClear, 392 SpaceDecorator::DontMangle); 393 394 assert(new_word_size == heap_word_size(object_space()->capacity_in_bytes()), 395 "Sanity"); 396 } 397 398 size_t PSOldGen::gen_size_limit() { 399 return _max_gen_size; 400 } 401 402 void PSOldGen::reset_after_change() { 403 ShouldNotReachHere(); 404 return; 405 } 406 407 size_t PSOldGen::available_for_expansion() { 408 ShouldNotReachHere(); 409 return 0; 410 } 411 412 size_t PSOldGen::available_for_contraction() { 413 ShouldNotReachHere(); 414 return 0; 415 } 416 417 void PSOldGen::print() const { print_on(tty);} 418 void PSOldGen::print_on(outputStream* st) const { 419 st->print(" %-15s", name()); 420 st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K", 421 capacity_in_bytes()/K, used_in_bytes()/K); 422 st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")", 423 p2i(virtual_space()->low_boundary()), 424 p2i(virtual_space()->high()), 425 p2i(virtual_space()->high_boundary())); 426 427 st->print(" object"); object_space()->print_on(st); 428 } 429 430 void PSOldGen::print_used_change(size_t prev_used) const { 431 log_info(gc, heap)("%s: " SIZE_FORMAT "K->" SIZE_FORMAT "K(" SIZE_FORMAT "K)", 432 name(), prev_used / K, used_in_bytes() / K, capacity_in_bytes() / K); 433 } 434 435 void PSOldGen::update_counters() { 436 if (UsePerfData) { 437 _space_counters->update_all(); 438 _gen_counters->update_all(); 439 } 440 } 441 442 #ifndef PRODUCT 443 444 void PSOldGen::space_invariants() { 445 assert(object_space()->end() == (HeapWord*) virtual_space()->high(), 446 "Space invariant"); 447 assert(object_space()->bottom() == (HeapWord*) virtual_space()->low(), 448 "Space invariant"); 449 assert(virtual_space()->low_boundary() <= virtual_space()->low(), 450 "Space invariant"); 451 assert(virtual_space()->high_boundary() >= virtual_space()->high(), 452 "Space invariant"); 453 assert(virtual_space()->low_boundary() == (char*) _reserved.start(), 454 "Space invariant"); 455 assert(virtual_space()->high_boundary() == (char*) _reserved.end(), 456 "Space invariant"); 457 assert(virtual_space()->committed_size() <= virtual_space()->reserved_size(), 458 "Space invariant"); 459 } 460 #endif 461 462 void PSOldGen::verify() { 463 object_space()->verify(); 464 } 465 class VerifyObjectStartArrayClosure : public ObjectClosure { 466 PSOldGen* _old_gen; 467 ObjectStartArray* _start_array; 468 469 public: 470 VerifyObjectStartArrayClosure(PSOldGen* old_gen, ObjectStartArray* start_array) : 471 _old_gen(old_gen), _start_array(start_array) { } 472 473 virtual void do_object(oop obj) { 474 HeapWord* test_addr = (HeapWord*)obj + 1; 475 guarantee(_start_array->object_start(test_addr) == (HeapWord*)obj, "ObjectStartArray cannot find start of object"); 476 guarantee(_start_array->is_block_allocated((HeapWord*)obj), "ObjectStartArray missing block allocation"); 477 } 478 }; 479 480 void PSOldGen::verify_object_start_array() { 481 VerifyObjectStartArrayClosure check( this, &_start_array ); 482 object_iterate(&check); 483 } 484 485 #ifndef PRODUCT 486 void PSOldGen::record_spaces_top() { 487 assert(ZapUnusedHeapArea, "Not mangling unused space"); 488 object_space()->set_top_for_allocations(); 489 } 490 #endif