1 /* 2 * Copyright (c) 2013, 2015, Red Hat, Inc. and/or its affiliates. 3 * 4 * This code is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License version 2 only, as 6 * published by the Free Software Foundation. 7 * 8 * This code is distributed in the hope that it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 11 * version 2 for more details (a copy is included in the LICENSE file that 12 * accompanied this code). 13 * 14 * You should have received a copy of the GNU General Public License version 15 * 2 along with this work; if not, write to the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 17 * 18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 19 * or visit www.oracle.com if you need additional information or have any 20 * questions. 21 * 22 */ 23 24 #include "memory/allocation.hpp" 25 #include "gc/shenandoah/brooksPointer.hpp" 26 #include "gc/shenandoah/shenandoahConnectionMatrix.hpp" 27 #include "gc/shenandoah/shenandoahHeap.hpp" 28 #include "gc/shenandoah/shenandoahHeap.inline.hpp" 29 #include "gc/shenandoah/shenandoahHeapRegion.hpp" 30 #include "gc/shared/space.inline.hpp" 31 #include "memory/universe.hpp" 32 #include "oops/oop.inline.hpp" 33 #include "runtime/mutexLocker.hpp" 34 #include "runtime/os.hpp" 35 #include "runtime/safepoint.hpp" 36 37 Monitor ShenandoahHeapRegion::_mem_protect_lock(Mutex::special, "ShenandoahMemProtect_lock", true, Monitor::_safepoint_check_never); 38 size_t ShenandoahHeapRegion::RegionSizeShift = 0; 39 size_t ShenandoahHeapRegion::RegionSizeBytes = 0; 40 41 ShenandoahHeapRegion::ShenandoahHeapRegion(ShenandoahHeap* heap, HeapWord* start, 42 size_t regionSizeWords, size_t index) : 43 #ifdef ASSERT 44 _mem_protection_level(0), 45 #endif 46 _heap(heap), 47 _region_number(index), 48 _live_data(0), 49 reserved(MemRegion(start, regionSizeWords)), 50 _humongous_start(false), 51 _humongous_continuation(false), 52 _recycled(true), 53 _new_top(NULL), 54 _critical_pins(0) { 55 56 ContiguousSpace::initialize(reserved, true, false); 57 } 58 59 size_t ShenandoahHeapRegion::region_number() const { 60 return _region_number; 61 } 62 63 bool ShenandoahHeapRegion::rollback_allocation(uint size) { 64 set_top(top() - size); 65 return true; 66 } 67 68 void ShenandoahHeapRegion::clear_live_data() { 69 assert(Thread::current()->is_VM_thread(), "by VM thread"); 70 _live_data = 0; 71 } 72 73 void ShenandoahHeapRegion::set_recently_allocated(bool value) { 74 _recycled = value; 75 } 76 77 bool ShenandoahHeapRegion::is_recently_allocated() const { 78 return _recycled && used() > 0; 79 } 80 81 void ShenandoahHeapRegion::set_live_data(size_t s) { 82 assert(Thread::current()->is_VM_thread(), "by VM thread"); 83 _live_data = (jint) (s / HeapWordSize); 84 } 85 86 size_t ShenandoahHeapRegion::get_live_data_words() const { 87 return (size_t)OrderAccess::load_acquire((volatile jint*)&_live_data); 88 } 89 90 size_t ShenandoahHeapRegion::get_live_data_bytes() const { 91 return get_live_data_words() * HeapWordSize; 92 } 93 94 bool ShenandoahHeapRegion::has_live() const { 95 return get_live_data_words() != 0; 96 } 97 98 size_t ShenandoahHeapRegion::garbage() const { 99 assert(used() >= get_live_data_bytes() || is_humongous(), "Live Data must be a subset of used() live: "SIZE_FORMAT" used: "SIZE_FORMAT, 100 get_live_data_bytes(), used()); 101 size_t result = used() - get_live_data_bytes(); 102 return result; 103 } 104 105 bool ShenandoahHeapRegion::in_collection_set() const { 106 return _heap->region_in_collection_set(_region_number); 107 } 108 109 void ShenandoahHeapRegion::set_in_collection_set(bool b) { 110 assert(! (is_humongous() && b), "never ever enter a humongous region into the collection set"); 111 112 _heap->set_region_in_collection_set(_region_number, b); 113 114 #ifdef ASSERT 115 if (ShenandoahVerifyWritesToFromSpace || ShenandoahVerifyReadsToFromSpace) { 116 if (b) { 117 memProtectionOn(); 118 assert(_mem_protection_level == 0, "need to be protected here"); 119 } else { 120 assert(_mem_protection_level == 0, "need to be protected here"); 121 memProtectionOff(); 122 } 123 } 124 #endif 125 } 126 127 #include <sys/mman.h> 128 129 #ifdef ASSERT 130 131 void ShenandoahHeapRegion::memProtectionOn() { 132 /* 133 log_develop_trace(gc)("Protect memory on region level: "INT32_FORMAT, _mem_protection_level); 134 print(tty); 135 */ 136 MutexLockerEx ml(&_mem_protect_lock, true); 137 assert(_mem_protection_level >= 1, "invariant"); 138 139 if (--_mem_protection_level == 0) { 140 if (ShenandoahVerifyWritesToFromSpace) { 141 assert(! ShenandoahVerifyReadsToFromSpace, "can't verify from-space reads when verifying from-space writes"); 142 os::protect_memory((char*) bottom(), end() - bottom(), os::MEM_PROT_READ); 143 } else { 144 assert(ShenandoahVerifyReadsToFromSpace, "need to be verifying reads here"); 145 os::protect_memory((char*) bottom(), end() - bottom(), os::MEM_PROT_NONE); 146 } 147 } 148 } 149 150 void ShenandoahHeapRegion::memProtectionOff() { 151 /* 152 tty->print_cr("unprotect memory on region level: "INT32_FORMAT, _mem_protection_level); 153 print(tty); 154 */ 155 MutexLockerEx ml(&_mem_protect_lock, true); 156 assert(_mem_protection_level >= 0, "invariant"); 157 if (_mem_protection_level++ == 0) { 158 os::protect_memory((char*) bottom(), end() - bottom(), os::MEM_PROT_RW); 159 } 160 } 161 162 #endif 163 164 void ShenandoahHeapRegion::print_on(outputStream* st) const { 165 st->print("ShenandoahHeapRegion: "PTR_FORMAT"/"SIZE_FORMAT, p2i(this), _region_number); 166 167 if (in_collection_set()) 168 st->print("C"); 169 if (is_humongous_start()) { 170 st->print("H"); 171 } 172 if (is_humongous_continuation()) { 173 st->print("h"); 174 } 175 //else 176 st->print(" "); 177 178 st->print_cr("live = "SIZE_FORMAT" garbage = "SIZE_FORMAT" bottom = "PTR_FORMAT" end = "PTR_FORMAT" top = "PTR_FORMAT, 179 get_live_data_bytes(), garbage(), p2i(bottom()), p2i(end()), p2i(top())); 180 } 181 182 183 void ShenandoahHeapRegion::object_iterate_interruptible(ObjectClosure* blk, bool allow_cancel) { 184 HeapWord* p = bottom() + BrooksPointer::word_size(); 185 while (p < top() && !(allow_cancel && _heap->cancelled_concgc())) { 186 blk->do_object(oop(p)); 187 #ifdef ASSERT 188 if (ShenandoahVerifyReadsToFromSpace) { 189 memProtectionOff(); 190 p += oop(p)->size() + BrooksPointer::word_size(); 191 memProtectionOn(); 192 } else { 193 p += oop(p)->size() + BrooksPointer::word_size(); 194 } 195 #else 196 p += oop(p)->size() + BrooksPointer::word_size(); 197 #endif 198 } 199 } 200 201 HeapWord* ShenandoahHeapRegion::object_iterate_careful(ObjectClosureCareful* blk) { 202 HeapWord * limit = concurrent_iteration_safe_limit(); 203 assert(limit <= top(), "sanity check"); 204 for (HeapWord* p = bottom() + BrooksPointer::word_size(); p < limit;) { 205 size_t size = blk->do_object_careful(oop(p)); 206 if (size == 0) { 207 return p; // failed at p 208 } else { 209 p += size + BrooksPointer::word_size(); 210 } 211 } 212 return NULL; // all done 213 } 214 215 void ShenandoahHeapRegion::oop_iterate(ExtendedOopClosure* blk) { 216 if (is_empty()) return; 217 HeapWord* obj_addr = bottom() + BrooksPointer::word_size(); 218 HeapWord* t = top(); 219 // Could call objects iterate, but this is easier. 220 while (obj_addr < t) { 221 oop obj = oop(obj_addr); 222 obj_addr += obj->oop_iterate_size(blk) + BrooksPointer::word_size(); 223 } 224 } 225 226 void ShenandoahHeapRegion::fill_region() { 227 ShenandoahHeap* sh = (ShenandoahHeap*) Universe::heap(); 228 229 if (free() > (BrooksPointer::word_size() + CollectedHeap::min_fill_size())) { 230 HeapWord* filler = allocate(BrooksPointer::word_size()); 231 HeapWord* obj = allocate(end() - top()); 232 sh->fill_with_object(obj, end() - obj); 233 BrooksPointer::initialize(oop(obj)); 234 } 235 } 236 237 void ShenandoahHeapRegion::set_humongous_start(bool start) { 238 _humongous_start = start; 239 } 240 241 void ShenandoahHeapRegion::set_humongous_continuation(bool continuation) { 242 _humongous_continuation = continuation; 243 } 244 245 bool ShenandoahHeapRegion::is_humongous() const { 246 return _humongous_start || _humongous_continuation; 247 } 248 249 bool ShenandoahHeapRegion::is_humongous_start() const { 250 return _humongous_start; 251 } 252 253 bool ShenandoahHeapRegion::is_humongous_continuation() const { 254 return _humongous_continuation; 255 } 256 257 void ShenandoahHeapRegion::recycle() { 258 ContiguousSpace::initialize(reserved, true, false); 259 clear_live_data(); 260 _humongous_start = false; 261 _humongous_continuation = false; 262 _recycled = true; 263 set_in_collection_set(false); 264 // Reset C-TAMS pointer to ensure size-based iteration, everything 265 // in that regions is going to be new objects. 266 _heap->set_complete_top_at_mark_start(bottom(), bottom()); 267 // We can only safely reset the C-TAMS pointer if the bitmap is clear for that region. 268 assert(_heap->is_complete_bitmap_clear_range(bottom(), end()), "must be clear"); 269 _heap->connection_matrix()->clear_region(region_number()); 270 } 271 272 HeapWord* ShenandoahHeapRegion::block_start_const(const void* p) const { 273 assert(MemRegion(bottom(), end()).contains(p), 274 "p ("PTR_FORMAT") not in space ["PTR_FORMAT", "PTR_FORMAT")", 275 p2i(p), p2i(bottom()), p2i(end())); 276 if (p >= top()) { 277 return top(); 278 } else { 279 HeapWord* last = bottom() + BrooksPointer::word_size(); 280 HeapWord* cur = last; 281 while (cur <= p) { 282 last = cur; 283 cur += oop(cur)->size() + BrooksPointer::word_size(); 284 } 285 assert(oop(last)->is_oop(), 286 PTR_FORMAT" should be an object start", p2i(last)); 287 return last; 288 } 289 } 290 291 void ShenandoahHeapRegion::setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size) { 292 // Absolute minimums we should not ever break: 293 static const size_t MIN_REGION_SIZE = 256*K; 294 static const size_t MIN_NUM_REGIONS = 10; 295 296 uintx region_size; 297 if (FLAG_IS_DEFAULT(ShenandoahHeapRegionSize)) { 298 if (ShenandoahMinRegionSize > initial_heap_size / MIN_NUM_REGIONS) { 299 err_msg message("Initial heap size (" SIZE_FORMAT "K) is too low to afford the minimum number " 300 "of regions (" SIZE_FORMAT ") of minimum region size (" SIZE_FORMAT "K).", 301 initial_heap_size/K, MIN_NUM_REGIONS, ShenandoahMinRegionSize/K); 302 vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message); 303 } 304 if (ShenandoahMinRegionSize < MIN_REGION_SIZE) { 305 err_msg message("" SIZE_FORMAT "K should not be lower than minimum region size (" SIZE_FORMAT "K).", 306 ShenandoahMinRegionSize/K, MIN_REGION_SIZE/K); 307 vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message); 308 } 309 if (ShenandoahMinRegionSize < MinTLABSize) { 310 err_msg message("" SIZE_FORMAT "K should not be lower than TLAB size size (" SIZE_FORMAT "K).", 311 ShenandoahMinRegionSize/K, MinTLABSize/K); 312 vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message); 313 } 314 if (ShenandoahMaxRegionSize < MIN_REGION_SIZE) { 315 err_msg message("" SIZE_FORMAT "K should not be lower than min region size (" SIZE_FORMAT "K).", 316 ShenandoahMaxRegionSize/K, MIN_REGION_SIZE/K); 317 vm_exit_during_initialization("Invalid -XX:ShenandoahMaxRegionSize option", message); 318 } 319 if (ShenandoahMinRegionSize > ShenandoahMaxRegionSize) { 320 err_msg message("Minimum (" SIZE_FORMAT "K) should be larger than maximum (" SIZE_FORMAT "K).", 321 ShenandoahMinRegionSize/K, ShenandoahMaxRegionSize/K); 322 vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize or -XX:ShenandoahMaxRegionSize", message); 323 } 324 size_t average_heap_size = (initial_heap_size + max_heap_size) / 2; 325 region_size = MAX2(average_heap_size / ShenandoahTargetNumRegions, 326 ShenandoahMinRegionSize); 327 328 // Now make sure that we don't go over or under our limits. 329 region_size = MAX2(ShenandoahMinRegionSize, region_size); 330 region_size = MIN2(ShenandoahMaxRegionSize, region_size); 331 332 } else { 333 if (ShenandoahHeapRegionSize > initial_heap_size / MIN_NUM_REGIONS) { 334 err_msg message("Initial heap size (" SIZE_FORMAT "K) is too low to afford the minimum number " 335 "of regions (" SIZE_FORMAT ") of requested size (" SIZE_FORMAT "K).", 336 initial_heap_size/K, MIN_NUM_REGIONS, ShenandoahHeapRegionSize/K); 337 vm_exit_during_initialization("Invalid -XX:ShenandoahHeapRegionSize option", message); 338 } 339 if (ShenandoahHeapRegionSize < ShenandoahMinRegionSize) { 340 err_msg message("Heap region size (" SIZE_FORMAT "K) should be larger than min region size (" SIZE_FORMAT "K).", 341 ShenandoahHeapRegionSize/K, ShenandoahMinRegionSize/K); 342 vm_exit_during_initialization("Invalid -XX:ShenandoahHeapRegionSize option", message); 343 } 344 if (ShenandoahHeapRegionSize > ShenandoahMaxRegionSize) { 345 err_msg message("Heap region size (" SIZE_FORMAT "K) should be lower than max region size (" SIZE_FORMAT "K).", 346 ShenandoahHeapRegionSize/K, ShenandoahMaxRegionSize/K); 347 vm_exit_during_initialization("Invalid -XX:ShenandoahHeapRegionSize option", message); 348 } 349 region_size = ShenandoahHeapRegionSize; 350 } 351 352 // Make sure region size is at least one large page, if enabled. 353 // Otherwise, mem-protecting one region may falsely protect the adjacent 354 // regions too. 355 if (UseLargePages) { 356 region_size = MAX2(region_size, os::large_page_size()); 357 } 358 359 int region_size_log = log2_long((jlong) region_size); 360 // Recalculate the region size to make sure it's a power of 361 // 2. This means that region_size is the largest power of 2 that's 362 // <= what we've calculated so far. 363 region_size = ((uintx)1 << region_size_log); 364 365 // Now, set up the globals. 366 guarantee(RegionSizeShift == 0, "we should only set it once"); 367 RegionSizeShift = region_size_log; 368 369 guarantee(RegionSizeBytes == 0, "we should only set it once"); 370 RegionSizeBytes = (size_t)region_size; 371 372 log_info(gc, heap)("Heap region size: " SIZE_FORMAT "M", RegionSizeBytes / M); 373 log_info(gc, init)("Region size in bytes: "SIZE_FORMAT, RegionSizeBytes); 374 log_info(gc, init)("Region size shift: "SIZE_FORMAT, RegionSizeShift); 375 log_info(gc, init)("Initial number of regions: "SIZE_FORMAT, initial_heap_size / RegionSizeBytes); 376 log_info(gc, init)("Maximum number of regions: "SIZE_FORMAT, max_heap_size / RegionSizeBytes); 377 } 378 379 CompactibleSpace* ShenandoahHeapRegion::next_compaction_space() const { 380 return _heap->next_compaction_region(this); 381 } 382 383 void ShenandoahHeapRegion::prepare_for_compaction(CompactPoint* cp) { 384 scan_and_forward(this, cp); 385 } 386 387 void ShenandoahHeapRegion::adjust_pointers() { 388 // Check first is there is any work to do. 389 if (used() == 0) { 390 return; // Nothing to do. 391 } 392 393 scan_and_adjust_pointers(this); 394 } 395 396 void ShenandoahHeapRegion::compact() { 397 assert(!is_humongous(), "Shouldn't be compacting humongous regions"); 398 scan_and_compact(this); 399 } 400 401 void ShenandoahHeapRegion::pin() { 402 assert(! SafepointSynchronize::is_at_safepoint(), "only outside safepoints"); 403 assert(_critical_pins >= 0, "sanity"); 404 Atomic::inc(&_critical_pins); 405 } 406 407 void ShenandoahHeapRegion::unpin() { 408 assert(! SafepointSynchronize::is_at_safepoint(), "only outside safepoints"); 409 Atomic::dec(&_critical_pins); 410 assert(_critical_pins >= 0, "sanity"); 411 } 412 413 bool ShenandoahHeapRegion::is_pinned() { 414 jint v = OrderAccess::load_acquire(&_critical_pins); 415 assert(v >= 0, "sanity"); 416 return v > 0; 417 }