1 /* 2 * Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/parallel/objectStartArray.inline.hpp" 27 #include "gc/parallel/parallelArguments.hpp" 28 #include "gc/parallel/parallelScavengeHeap.hpp" 29 #include "gc/parallel/psAdaptiveSizePolicy.hpp" 30 #include "gc/parallel/psCardTable.hpp" 31 #include "gc/parallel/psFileBackedVirtualspace.hpp" 32 #include "gc/parallel/psOldGen.hpp" 33 #include "gc/shared/cardTableBarrierSet.hpp" 34 #include "gc/shared/gcLocker.hpp" 35 #include "gc/shared/spaceDecorator.inline.hpp" 36 #include "logging/log.hpp" 37 #include "oops/oop.inline.hpp" 38 #include "runtime/java.hpp" 39 #include "utilities/align.hpp" 40 41 PSOldGen::PSOldGen(ReservedSpace rs, size_t initial_size, size_t min_size, 42 size_t max_size, const char* perf_data_name, int level): 43 _min_gen_size(min_size), 44 _max_gen_size(max_size) 45 { 46 initialize(rs, initial_size, GenAlignment, perf_data_name, level); 47 } 48 49 void PSOldGen::initialize(ReservedSpace rs, size_t initial_size, size_t alignment, 50 const char* perf_data_name, int level) { 51 initialize_virtual_space(rs, initial_size, alignment); 52 initialize_work(perf_data_name, level); 53 54 // The old gen can grow to max_gen_size(). _reserve reflects only 55 // the current maximum that can be committed. 56 assert(_reserved.byte_size() <= max_gen_size(), "Consistency check"); 57 58 initialize_performance_counters(perf_data_name, level); 59 } 60 61 void PSOldGen::initialize_virtual_space(ReservedSpace rs, 62 size_t initial_size, 63 size_t alignment) { 64 65 if(ParallelArguments::is_heterogeneous_heap()) { 66 _virtual_space = new PSFileBackedVirtualSpace(rs, alignment, AllocateOldGenAt); 67 if (!(static_cast <PSFileBackedVirtualSpace*>(_virtual_space))->initialize()) { 68 vm_exit_during_initialization("Could not map space for PSOldGen at given AllocateOldGenAt path"); 69 } 70 } else { 71 _virtual_space = new PSVirtualSpace(rs, alignment); 72 } 73 if (!_virtual_space->expand_by(initial_size)) { 74 vm_exit_during_initialization("Could not reserve enough space for " 75 "object heap"); 76 } 77 } 78 79 void PSOldGen::initialize_work(const char* perf_data_name, int level) { 80 // 81 // Basic memory initialization 82 // 83 84 MemRegion limit_reserved((HeapWord*)virtual_space()->low_boundary(), 85 heap_word_size(max_gen_size())); 86 assert(limit_reserved.byte_size() == max_gen_size(), 87 "word vs bytes confusion"); 88 // 89 // Object start stuff 90 // 91 92 start_array()->initialize(limit_reserved); 93 94 _reserved = MemRegion((HeapWord*)virtual_space()->low_boundary(), 95 (HeapWord*)virtual_space()->high_boundary()); 96 97 // 98 // Card table stuff 99 // 100 101 MemRegion cmr((HeapWord*)virtual_space()->low(), 102 (HeapWord*)virtual_space()->high()); 103 if (ZapUnusedHeapArea) { 104 // Mangle newly committed space immediately rather than 105 // waiting for the initialization of the space even though 106 // mangling is related to spaces. Doing it here eliminates 107 // the need to carry along information that a complete mangling 108 // (bottom to end) needs to be done. 109 SpaceMangler::mangle_region(cmr); 110 } 111 112 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 113 PSCardTable* ct = heap->card_table(); 114 ct->resize_covered_region(cmr); 115 116 // Verify that the start and end of this generation is the start of a card. 117 // If this wasn't true, a single card could span more than one generation, 118 // which would cause problems when we commit/uncommit memory, and when we 119 // clear and dirty cards. 120 guarantee(ct->is_card_aligned(_reserved.start()), "generation must be card aligned"); 121 if (_reserved.end() != heap->reserved_region().end()) { 122 // Don't check at the very end of the heap as we'll assert that we're probing off 123 // the end if we try. 124 guarantee(ct->is_card_aligned(_reserved.end()), "generation must be card aligned"); 125 } 126 127 // 128 // ObjectSpace stuff 129 // 130 131 _object_space = new MutableSpace(virtual_space()->alignment()); 132 object_space()->initialize(cmr, 133 SpaceDecorator::Clear, 134 SpaceDecorator::Mangle); 135 136 // Update the start_array 137 start_array()->set_covered_region(cmr); 138 } 139 140 void PSOldGen::initialize_performance_counters(const char* perf_data_name, int level) { 141 // Generation Counters, generation 'level', 1 subspace 142 _gen_counters = new PSGenerationCounters(perf_data_name, level, 1, min_gen_size(), 143 max_gen_size(), virtual_space()); 144 _space_counters = new SpaceCounters(perf_data_name, 0, 145 virtual_space()->reserved_size(), 146 _object_space, _gen_counters); 147 } 148 149 // Assume that the generation has been allocated if its 150 // reserved size is not 0. 151 bool PSOldGen::is_allocated() { 152 return virtual_space()->reserved_size() != 0; 153 } 154 155 // Allocation. We report all successful allocations to the size policy 156 // Note that the perm gen does not use this method, and should not! 157 HeapWord* PSOldGen::allocate(size_t word_size) { 158 assert_locked_or_safepoint(Heap_lock); 159 HeapWord* res = allocate_noexpand(word_size); 160 161 if (res == NULL) { 162 res = expand_and_allocate(word_size); 163 } 164 165 // Allocations in the old generation need to be reported 166 if (res != NULL) { 167 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 168 heap->size_policy()->tenured_allocation(word_size * HeapWordSize); 169 } 170 171 return res; 172 } 173 174 HeapWord* PSOldGen::expand_and_allocate(size_t word_size) { 175 expand(word_size*HeapWordSize); 176 if (GCExpandToAllocateDelayMillis > 0) { 177 os::naked_sleep(GCExpandToAllocateDelayMillis); 178 } 179 return allocate_noexpand(word_size); 180 } 181 182 HeapWord* PSOldGen::expand_and_cas_allocate(size_t word_size) { 183 expand(word_size*HeapWordSize); 184 if (GCExpandToAllocateDelayMillis > 0) { 185 os::naked_sleep(GCExpandToAllocateDelayMillis); 186 } 187 return cas_allocate_noexpand(word_size); 188 } 189 190 void PSOldGen::expand(size_t bytes) { 191 if (bytes == 0) { 192 return; 193 } 194 MutexLocker x(ExpandHeap_lock); 195 const size_t alignment = virtual_space()->alignment(); 196 size_t aligned_bytes = align_up(bytes, alignment); 197 size_t aligned_expand_bytes = align_up(MinHeapDeltaBytes, alignment); 198 199 if (UseNUMA) { 200 // With NUMA we use round-robin page allocation for the old gen. Expand by at least 201 // providing a page per lgroup. Alignment is larger or equal to the page size. 202 aligned_expand_bytes = MAX2(aligned_expand_bytes, alignment * os::numa_get_groups_num()); 203 } 204 if (aligned_bytes == 0){ 205 // The alignment caused the number of bytes to wrap. An expand_by(0) will 206 // return true with the implication that and expansion was done when it 207 // was not. A call to expand implies a best effort to expand by "bytes" 208 // but not a guarantee. Align down to give a best effort. This is likely 209 // the most that the generation can expand since it has some capacity to 210 // start with. 211 aligned_bytes = align_down(bytes, alignment); 212 } 213 214 bool success = false; 215 if (aligned_expand_bytes > aligned_bytes) { 216 success = expand_by(aligned_expand_bytes); 217 } 218 if (!success) { 219 success = expand_by(aligned_bytes); 220 } 221 if (!success) { 222 success = expand_to_reserved(); 223 } 224 225 if (success && GCLocker::is_active_and_needs_gc()) { 226 log_debug(gc)("Garbage collection disabled, expanded heap instead"); 227 } 228 } 229 230 bool PSOldGen::expand_by(size_t bytes) { 231 assert_lock_strong(ExpandHeap_lock); 232 assert_locked_or_safepoint(Heap_lock); 233 if (bytes == 0) { 234 return true; // That's what virtual_space()->expand_by(0) would return 235 } 236 bool result = virtual_space()->expand_by(bytes); 237 if (result) { 238 if (ZapUnusedHeapArea) { 239 // We need to mangle the newly expanded area. The memregion spans 240 // end -> new_end, we assume that top -> end is already mangled. 241 // Do the mangling before post_resize() is called because 242 // the space is available for allocation after post_resize(); 243 HeapWord* const virtual_space_high = (HeapWord*) virtual_space()->high(); 244 assert(object_space()->end() < virtual_space_high, 245 "Should be true before post_resize()"); 246 MemRegion mangle_region(object_space()->end(), virtual_space_high); 247 // Note that the object space has not yet been updated to 248 // coincide with the new underlying virtual space. 249 SpaceMangler::mangle_region(mangle_region); 250 } 251 post_resize(); 252 if (UsePerfData) { 253 _space_counters->update_capacity(); 254 _gen_counters->update_all(); 255 } 256 } 257 258 if (result) { 259 size_t new_mem_size = virtual_space()->committed_size(); 260 size_t old_mem_size = new_mem_size - bytes; 261 log_debug(gc)("Expanding %s from " SIZE_FORMAT "K by " SIZE_FORMAT "K to " SIZE_FORMAT "K", 262 name(), old_mem_size/K, bytes/K, new_mem_size/K); 263 } 264 265 return result; 266 } 267 268 bool PSOldGen::expand_to_reserved() { 269 assert_lock_strong(ExpandHeap_lock); 270 assert_locked_or_safepoint(Heap_lock); 271 272 bool result = true; 273 const size_t remaining_bytes = virtual_space()->uncommitted_size(); 274 if (remaining_bytes > 0) { 275 result = expand_by(remaining_bytes); 276 DEBUG_ONLY(if (!result) log_warning(gc)("grow to reserve failed")); 277 } 278 return result; 279 } 280 281 void PSOldGen::shrink(size_t bytes) { 282 assert_lock_strong(ExpandHeap_lock); 283 assert_locked_or_safepoint(Heap_lock); 284 285 size_t size = align_down(bytes, virtual_space()->alignment()); 286 if (size > 0) { 287 assert_lock_strong(ExpandHeap_lock); 288 virtual_space()->shrink_by(bytes); 289 post_resize(); 290 291 size_t new_mem_size = virtual_space()->committed_size(); 292 size_t old_mem_size = new_mem_size + bytes; 293 log_debug(gc)("Shrinking %s from " SIZE_FORMAT "K by " SIZE_FORMAT "K to " SIZE_FORMAT "K", 294 name(), old_mem_size/K, bytes/K, new_mem_size/K); 295 } 296 } 297 298 void PSOldGen::resize(size_t desired_free_space) { 299 const size_t alignment = virtual_space()->alignment(); 300 const size_t size_before = virtual_space()->committed_size(); 301 size_t new_size = used_in_bytes() + desired_free_space; 302 if (new_size < used_in_bytes()) { 303 // Overflowed the addition. 304 new_size = max_gen_size(); 305 } 306 // Adjust according to our min and max 307 new_size = clamp(new_size, min_gen_size(), max_gen_size()); 308 309 assert(max_gen_size() >= reserved().byte_size(), "max new size problem?"); 310 new_size = align_up(new_size, alignment); 311 312 const size_t current_size = capacity_in_bytes(); 313 314 log_trace(gc, ergo)("AdaptiveSizePolicy::old generation size: " 315 "desired free: " SIZE_FORMAT " used: " SIZE_FORMAT 316 " new size: " SIZE_FORMAT " current size " SIZE_FORMAT 317 " gen limits: " SIZE_FORMAT " / " SIZE_FORMAT, 318 desired_free_space, used_in_bytes(), new_size, current_size, 319 max_gen_size(), min_gen_size()); 320 321 if (new_size == current_size) { 322 // No change requested 323 return; 324 } 325 if (new_size > current_size) { 326 size_t change_bytes = new_size - current_size; 327 expand(change_bytes); 328 } else { 329 size_t change_bytes = current_size - new_size; 330 // shrink doesn't grab this lock, expand does. Is that right? 331 MutexLocker x(ExpandHeap_lock); 332 shrink(change_bytes); 333 } 334 335 log_trace(gc, ergo)("AdaptiveSizePolicy::old generation size: collection: %d (" SIZE_FORMAT ") -> (" SIZE_FORMAT ") ", 336 ParallelScavengeHeap::heap()->total_collections(), 337 size_before, 338 virtual_space()->committed_size()); 339 } 340 341 // NOTE! We need to be careful about resizing. During a GC, multiple 342 // allocators may be active during heap expansion. If we allow the 343 // heap resizing to become visible before we have correctly resized 344 // all heap related data structures, we may cause program failures. 345 void PSOldGen::post_resize() { 346 // First construct a memregion representing the new size 347 MemRegion new_memregion((HeapWord*)virtual_space()->low(), 348 (HeapWord*)virtual_space()->high()); 349 size_t new_word_size = new_memregion.word_size(); 350 351 start_array()->set_covered_region(new_memregion); 352 ParallelScavengeHeap::heap()->card_table()->resize_covered_region(new_memregion); 353 354 // ALWAYS do this last!! 355 object_space()->initialize(new_memregion, 356 SpaceDecorator::DontClear, 357 SpaceDecorator::DontMangle); 358 359 assert(new_word_size == heap_word_size(object_space()->capacity_in_bytes()), 360 "Sanity"); 361 } 362 363 void PSOldGen::print() const { print_on(tty);} 364 void PSOldGen::print_on(outputStream* st) const { 365 st->print(" %-15s", name()); 366 st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K", 367 capacity_in_bytes()/K, used_in_bytes()/K); 368 st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")", 369 p2i(virtual_space()->low_boundary()), 370 p2i(virtual_space()->high()), 371 p2i(virtual_space()->high_boundary())); 372 373 st->print(" object"); object_space()->print_on(st); 374 } 375 376 void PSOldGen::update_counters() { 377 if (UsePerfData) { 378 _space_counters->update_all(); 379 _gen_counters->update_all(); 380 } 381 } 382 383 void PSOldGen::verify() { 384 object_space()->verify(); 385 } 386 387 class VerifyObjectStartArrayClosure : public ObjectClosure { 388 PSOldGen* _old_gen; 389 ObjectStartArray* _start_array; 390 391 public: 392 VerifyObjectStartArrayClosure(PSOldGen* old_gen, ObjectStartArray* start_array) : 393 _old_gen(old_gen), _start_array(start_array) { } 394 395 virtual void do_object(oop obj) { 396 HeapWord* test_addr = cast_from_oop<HeapWord*>(obj) + 1; 397 guarantee(_start_array->object_start(test_addr) == cast_from_oop<HeapWord*>(obj), "ObjectStartArray cannot find start of object"); 398 guarantee(_start_array->is_block_allocated(cast_from_oop<HeapWord*>(obj)), "ObjectStartArray missing block allocation"); 399 } 400 }; 401 402 void PSOldGen::verify_object_start_array() { 403 VerifyObjectStartArrayClosure check( this, &_start_array ); 404 object_iterate(&check); 405 } 406 407 #ifndef PRODUCT 408 void PSOldGen::record_spaces_top() { 409 assert(ZapUnusedHeapArea, "Not mangling unused space"); 410 object_space()->set_top_for_allocations(); 411 } 412 #endif