1 /* 2 * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/parallel/objectStartArray.inline.hpp" 27 #include "gc/parallel/parallelScavengeHeap.hpp" 28 #include "gc/parallel/psAdaptiveSizePolicy.hpp" 29 #include "gc/parallel/psMarkSweepDecorator.hpp" 30 #include "gc/parallel/psOldGen.hpp" 31 #include "gc/shared/cardTableModRefBS.hpp" 32 #include "gc/shared/gcLocker.inline.hpp" 33 #include "gc/shared/spaceDecorator.hpp" 34 #include "logging/log.hpp" 35 #include "oops/oop.inline.hpp" 36 #include "runtime/java.hpp" 37 38 inline const char* PSOldGen::select_name() { 39 return UseParallelOldGC ? "ParOldGen" : "PSOldGen"; 40 } 41 42 PSOldGen::PSOldGen(ReservedSpace rs, size_t alignment, 43 size_t initial_size, size_t min_size, size_t max_size, 44 const char* perf_data_name, int level): 45 _name(select_name()), _init_gen_size(initial_size), _min_gen_size(min_size), 46 _max_gen_size(max_size) 47 { 48 initialize(rs, alignment, perf_data_name, level); 49 } 50 51 PSOldGen::PSOldGen(size_t initial_size, 52 size_t min_size, size_t max_size, 53 const char* perf_data_name, int level): 54 _name(select_name()), _init_gen_size(initial_size), _min_gen_size(min_size), 55 _max_gen_size(max_size) 56 {} 57 58 void PSOldGen::initialize(ReservedSpace rs, size_t alignment, 59 const char* perf_data_name, int level) { 60 initialize_virtual_space(rs, alignment); 61 initialize_work(perf_data_name, level); 62 63 // The old gen can grow to gen_size_limit(). _reserve reflects only 64 // the current maximum that can be committed. 65 assert(_reserved.byte_size() <= gen_size_limit(), "Consistency check"); 66 67 initialize_performance_counters(perf_data_name, level); 68 } 69 70 void PSOldGen::initialize_virtual_space(ReservedSpace rs, size_t alignment) { 71 72 _virtual_space = new PSVirtualSpace(rs, alignment); 73 if (!_virtual_space->expand_by(_init_gen_size)) { 74 vm_exit_during_initialization("Could not reserve enough space for " 75 "object heap"); 76 } 77 } 78 79 void PSOldGen::initialize_work(const char* perf_data_name, int level) { 80 // 81 // Basic memory initialization 82 // 83 84 MemRegion limit_reserved((HeapWord*)virtual_space()->low_boundary(), 85 heap_word_size(_max_gen_size)); 86 assert(limit_reserved.byte_size() == _max_gen_size, 87 "word vs bytes confusion"); 88 // 89 // Object start stuff 90 // 91 92 start_array()->initialize(limit_reserved); 93 94 _reserved = MemRegion((HeapWord*)virtual_space()->low_boundary(), 95 (HeapWord*)virtual_space()->high_boundary()); 96 97 // 98 // Card table stuff 99 // 100 101 MemRegion cmr((HeapWord*)virtual_space()->low(), 102 (HeapWord*)virtual_space()->high()); 103 if (ZapUnusedHeapArea) { 104 // Mangle newly committed space immediately rather than 105 // waiting for the initialization of the space even though 106 // mangling is related to spaces. Doing it here eliminates 107 // the need to carry along information that a complete mangling 108 // (bottom to end) needs to be done. 109 SpaceMangler::mangle_region(cmr); 110 } 111 112 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 113 BarrierSet* bs = heap->barrier_set(); 114 115 bs->resize_covered_region(cmr); 116 117 CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs); 118 119 // Verify that the start and end of this generation is the start of a card. 120 // If this wasn't true, a single card could span more than one generation, 121 // which would cause problems when we commit/uncommit memory, and when we 122 // clear and dirty cards. 123 guarantee(ct->is_card_aligned(_reserved.start()), "generation must be card aligned"); 124 if (_reserved.end() != heap->reserved_region().end()) { 125 // Don't check at the very end of the heap as we'll assert that we're probing off 126 // the end if we try. 127 guarantee(ct->is_card_aligned(_reserved.end()), "generation must be card aligned"); 128 } 129 130 // 131 // ObjectSpace stuff 132 // 133 134 _object_space = new MutableSpace(virtual_space()->alignment()); 135 136 if (_object_space == NULL) 137 vm_exit_during_initialization("Could not allocate an old gen space"); 138 139 object_space()->initialize(cmr, 140 SpaceDecorator::Clear, 141 SpaceDecorator::Mangle); 142 143 _object_mark_sweep = new PSMarkSweepDecorator(_object_space, start_array(), MarkSweepDeadRatio); 144 145 if (_object_mark_sweep == NULL) 146 vm_exit_during_initialization("Could not complete allocation of old generation"); 147 148 // Update the start_array 149 start_array()->set_covered_region(cmr); 150 } 151 152 void PSOldGen::initialize_performance_counters(const char* perf_data_name, int level) { 153 // Generation Counters, generation 'level', 1 subspace 154 _gen_counters = new PSGenerationCounters(perf_data_name, level, 1, _min_gen_size, 155 _max_gen_size, virtual_space()); 156 _space_counters = new SpaceCounters(perf_data_name, 0, 157 virtual_space()->reserved_size(), 158 _object_space, _gen_counters); 159 } 160 161 // Assume that the generation has been allocated if its 162 // reserved size is not 0. 163 bool PSOldGen::is_allocated() { 164 return virtual_space()->reserved_size() != 0; 165 } 166 167 void PSOldGen::precompact() { 168 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 169 170 // Reset start array first. 171 start_array()->reset(); 172 173 object_mark_sweep()->precompact(); 174 175 // Now compact the young gen 176 heap->young_gen()->precompact(); 177 } 178 179 void PSOldGen::adjust_pointers() { 180 object_mark_sweep()->adjust_pointers(); 181 } 182 183 void PSOldGen::compact() { 184 object_mark_sweep()->compact(ZapUnusedHeapArea); 185 } 186 187 size_t PSOldGen::contiguous_available() const { 188 return object_space()->free_in_bytes() + virtual_space()->uncommitted_size(); 189 } 190 191 // Allocation. We report all successful allocations to the size policy 192 // Note that the perm gen does not use this method, and should not! 193 HeapWord* PSOldGen::allocate(size_t word_size) { 194 assert_locked_or_safepoint(Heap_lock); 195 HeapWord* res = allocate_noexpand(word_size); 196 197 if (res == NULL) { 198 res = expand_and_allocate(word_size); 199 } 200 201 // Allocations in the old generation need to be reported 202 if (res != NULL) { 203 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 204 heap->size_policy()->tenured_allocation(word_size * HeapWordSize); 205 } 206 207 return res; 208 } 209 210 HeapWord* PSOldGen::expand_and_allocate(size_t word_size) { 211 expand(word_size*HeapWordSize); 212 if (GCExpandToAllocateDelayMillis > 0) { 213 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false); 214 } 215 return allocate_noexpand(word_size); 216 } 217 218 HeapWord* PSOldGen::expand_and_cas_allocate(size_t word_size) { 219 expand(word_size*HeapWordSize); 220 if (GCExpandToAllocateDelayMillis > 0) { 221 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false); 222 } 223 return cas_allocate_noexpand(word_size); 224 } 225 226 void PSOldGen::expand(size_t bytes) { 227 if (bytes == 0) { 228 return; 229 } 230 MutexLocker x(ExpandHeap_lock); 231 const size_t alignment = virtual_space()->alignment(); 232 size_t aligned_bytes = align_size_up(bytes, alignment); 233 size_t aligned_expand_bytes = align_size_up(MinHeapDeltaBytes, alignment); 234 235 if (UseNUMA) { 236 // With NUMA we use round-robin page allocation for the old gen. Expand by at least 237 // providing a page per lgroup. Alignment is larger or equal to the page size. 238 aligned_expand_bytes = MAX2(aligned_expand_bytes, alignment * os::numa_get_groups_num()); 239 } 240 if (aligned_bytes == 0){ 241 // The alignment caused the number of bytes to wrap. An expand_by(0) will 242 // return true with the implication that and expansion was done when it 243 // was not. A call to expand implies a best effort to expand by "bytes" 244 // but not a guarantee. Align down to give a best effort. This is likely 245 // the most that the generation can expand since it has some capacity to 246 // start with. 247 aligned_bytes = align_size_down(bytes, alignment); 248 } 249 250 bool success = false; 251 if (aligned_expand_bytes > aligned_bytes) { 252 success = expand_by(aligned_expand_bytes); 253 } 254 if (!success) { 255 success = expand_by(aligned_bytes); 256 } 257 if (!success) { 258 success = expand_to_reserved(); 259 } 260 261 if (success && GCLocker::is_active_and_needs_gc()) { 262 log_debug(gc)("Garbage collection disabled, expanded heap instead"); 263 } 264 } 265 266 bool PSOldGen::expand_by(size_t bytes) { 267 assert_lock_strong(ExpandHeap_lock); 268 assert_locked_or_safepoint(Heap_lock); 269 if (bytes == 0) { 270 return true; // That's what virtual_space()->expand_by(0) would return 271 } 272 bool result = virtual_space()->expand_by(bytes); 273 if (result) { 274 if (ZapUnusedHeapArea) { 275 // We need to mangle the newly expanded area. The memregion spans 276 // end -> new_end, we assume that top -> end is already mangled. 277 // Do the mangling before post_resize() is called because 278 // the space is available for allocation after post_resize(); 279 HeapWord* const virtual_space_high = (HeapWord*) virtual_space()->high(); 280 assert(object_space()->end() < virtual_space_high, 281 "Should be true before post_resize()"); 282 MemRegion mangle_region(object_space()->end(), virtual_space_high); 283 // Note that the object space has not yet been updated to 284 // coincide with the new underlying virtual space. 285 SpaceMangler::mangle_region(mangle_region); 286 } 287 post_resize(); 288 if (UsePerfData) { 289 _space_counters->update_capacity(); 290 _gen_counters->update_all(); 291 } 292 } 293 294 if (result) { 295 size_t new_mem_size = virtual_space()->committed_size(); 296 size_t old_mem_size = new_mem_size - bytes; 297 log_debug(gc)("Expanding %s from " SIZE_FORMAT "K by " SIZE_FORMAT "K to " SIZE_FORMAT "K", 298 name(), old_mem_size/K, bytes/K, new_mem_size/K); 299 } 300 301 return result; 302 } 303 304 bool PSOldGen::expand_to_reserved() { 305 assert_lock_strong(ExpandHeap_lock); 306 assert_locked_or_safepoint(Heap_lock); 307 308 bool result = true; 309 const size_t remaining_bytes = virtual_space()->uncommitted_size(); 310 if (remaining_bytes > 0) { 311 result = expand_by(remaining_bytes); 312 DEBUG_ONLY(if (!result) warning("grow to reserve failed")); 313 } 314 return result; 315 } 316 317 void PSOldGen::shrink(size_t bytes) { 318 assert_lock_strong(ExpandHeap_lock); 319 assert_locked_or_safepoint(Heap_lock); 320 321 size_t size = align_size_down(bytes, virtual_space()->alignment()); 322 if (size > 0) { 323 assert_lock_strong(ExpandHeap_lock); 324 virtual_space()->shrink_by(bytes); 325 post_resize(); 326 327 size_t new_mem_size = virtual_space()->committed_size(); 328 size_t old_mem_size = new_mem_size + bytes; 329 log_debug(gc)("Shrinking %s from " SIZE_FORMAT "K by " SIZE_FORMAT "K to " SIZE_FORMAT "K", 330 name(), old_mem_size/K, bytes/K, new_mem_size/K); 331 } 332 } 333 334 void PSOldGen::resize(size_t desired_free_space) { 335 const size_t alignment = virtual_space()->alignment(); 336 const size_t size_before = virtual_space()->committed_size(); 337 size_t new_size = used_in_bytes() + desired_free_space; 338 if (new_size < used_in_bytes()) { 339 // Overflowed the addition. 340 new_size = gen_size_limit(); 341 } 342 // Adjust according to our min and max 343 new_size = MAX2(MIN2(new_size, gen_size_limit()), min_gen_size()); 344 345 assert(gen_size_limit() >= reserved().byte_size(), "max new size problem?"); 346 new_size = align_size_up(new_size, alignment); 347 348 const size_t current_size = capacity_in_bytes(); 349 350 log_trace(gc, ergo)("AdaptiveSizePolicy::old generation size: " 351 "desired free: " SIZE_FORMAT " used: " SIZE_FORMAT 352 " new size: " SIZE_FORMAT " current size " SIZE_FORMAT 353 " gen limits: " SIZE_FORMAT " / " SIZE_FORMAT, 354 desired_free_space, used_in_bytes(), new_size, current_size, 355 gen_size_limit(), min_gen_size()); 356 357 if (new_size == current_size) { 358 // No change requested 359 return; 360 } 361 if (new_size > current_size) { 362 size_t change_bytes = new_size - current_size; 363 expand(change_bytes); 364 } else { 365 size_t change_bytes = current_size - new_size; 366 // shrink doesn't grab this lock, expand does. Is that right? 367 MutexLocker x(ExpandHeap_lock); 368 shrink(change_bytes); 369 } 370 371 log_trace(gc, ergo)("AdaptiveSizePolicy::old generation size: collection: %d (" SIZE_FORMAT ") -> (" SIZE_FORMAT ") ", 372 ParallelScavengeHeap::heap()->total_collections(), 373 size_before, 374 virtual_space()->committed_size()); 375 } 376 377 // NOTE! We need to be careful about resizing. During a GC, multiple 378 // allocators may be active during heap expansion. If we allow the 379 // heap resizing to become visible before we have correctly resized 380 // all heap related data structures, we may cause program failures. 381 void PSOldGen::post_resize() { 382 // First construct a memregion representing the new size 383 MemRegion new_memregion((HeapWord*)virtual_space()->low(), 384 (HeapWord*)virtual_space()->high()); 385 size_t new_word_size = new_memregion.word_size(); 386 387 start_array()->set_covered_region(new_memregion); 388 ParallelScavengeHeap::heap()->barrier_set()->resize_covered_region(new_memregion); 389 390 // ALWAYS do this last!! 391 object_space()->initialize(new_memregion, 392 SpaceDecorator::DontClear, 393 SpaceDecorator::DontMangle); 394 395 assert(new_word_size == heap_word_size(object_space()->capacity_in_bytes()), 396 "Sanity"); 397 } 398 399 size_t PSOldGen::gen_size_limit() { 400 return _max_gen_size; 401 } 402 403 void PSOldGen::reset_after_change() { 404 ShouldNotReachHere(); 405 return; 406 } 407 408 size_t PSOldGen::available_for_expansion() { 409 ShouldNotReachHere(); 410 return 0; 411 } 412 413 size_t PSOldGen::available_for_contraction() { 414 ShouldNotReachHere(); 415 return 0; 416 } 417 418 void PSOldGen::print() const { print_on(tty);} 419 void PSOldGen::print_on(outputStream* st) const { 420 st->print(" %-15s", name()); 421 st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K", 422 capacity_in_bytes()/K, used_in_bytes()/K); 423 st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")", 424 p2i(virtual_space()->low_boundary()), 425 p2i(virtual_space()->high()), 426 p2i(virtual_space()->high_boundary())); 427 428 st->print(" object"); object_space()->print_on(st); 429 } 430 431 void PSOldGen::print_used_change(size_t prev_used) const { 432 log_info(gc, heap)("%s: " SIZE_FORMAT "K->" SIZE_FORMAT "K(" SIZE_FORMAT "K)", 433 name(), prev_used / K, used_in_bytes() / K, capacity_in_bytes() / K); 434 } 435 436 void PSOldGen::update_counters() { 437 if (UsePerfData) { 438 _space_counters->update_all(); 439 _gen_counters->update_all(); 440 } 441 } 442 443 #ifndef PRODUCT 444 445 void PSOldGen::space_invariants() { 446 assert(object_space()->end() == (HeapWord*) virtual_space()->high(), 447 "Space invariant"); 448 assert(object_space()->bottom() == (HeapWord*) virtual_space()->low(), 449 "Space invariant"); 450 assert(virtual_space()->low_boundary() <= virtual_space()->low(), 451 "Space invariant"); 452 assert(virtual_space()->high_boundary() >= virtual_space()->high(), 453 "Space invariant"); 454 assert(virtual_space()->low_boundary() == (char*) _reserved.start(), 455 "Space invariant"); 456 assert(virtual_space()->high_boundary() == (char*) _reserved.end(), 457 "Space invariant"); 458 assert(virtual_space()->committed_size() <= virtual_space()->reserved_size(), 459 "Space invariant"); 460 } 461 #endif 462 463 void PSOldGen::verify() { 464 object_space()->verify(); 465 } 466 class VerifyObjectStartArrayClosure : public ObjectClosure { 467 PSOldGen* _old_gen; 468 ObjectStartArray* _start_array; 469 470 public: 471 VerifyObjectStartArrayClosure(PSOldGen* old_gen, ObjectStartArray* start_array) : 472 _old_gen(old_gen), _start_array(start_array) { } 473 474 virtual void do_object(oop obj) { 475 HeapWord* test_addr = (HeapWord*)obj + 1; 476 guarantee(_start_array->object_start(test_addr) == (HeapWord*)obj, "ObjectStartArray cannot find start of object"); 477 guarantee(_start_array->is_block_allocated((HeapWord*)obj), "ObjectStartArray missing block allocation"); 478 } 479 }; 480 481 void PSOldGen::verify_object_start_array() { 482 VerifyObjectStartArrayClosure check( this, &_start_array ); 483 object_iterate(&check); 484 } 485 486 #ifndef PRODUCT 487 void PSOldGen::record_spaces_top() { 488 assert(ZapUnusedHeapArea, "Not mangling unused space"); 489 object_space()->set_top_for_allocations(); 490 } 491 #endif