1 /* 2 * Copyright (c) 2013, 2015, Red Hat, Inc. and/or its affiliates. 3 * 4 * This code is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License version 2 only, as 6 * published by the Free Software Foundation. 7 * 8 * This code is distributed in the hope that it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 11 * version 2 for more details (a copy is included in the LICENSE file that 12 * accompanied this code). 13 * 14 * You should have received a copy of the GNU General Public License version 15 * 2 along with this work; if not, write to the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 17 * 18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 19 * or visit www.oracle.com if you need additional information or have any 20 * questions. 21 * 22 */ 23 24 #include "memory/allocation.hpp" 25 #include "gc/shenandoah/brooksPointer.hpp" 26 #include "gc/shenandoah/shenandoahConnectionMatrix.hpp" 27 #include "gc/shenandoah/shenandoahHeap.hpp" 28 #include "gc/shenandoah/shenandoahHeap.inline.hpp" 29 #include "gc/shenandoah/shenandoahHeapRegion.hpp" 30 #include "gc/shared/space.inline.hpp" 31 #include "memory/universe.hpp" 32 #include "oops/oop.inline.hpp" 33 #include "runtime/mutexLocker.hpp" 34 #include "runtime/os.hpp" 35 #include "runtime/safepoint.hpp" 36 37 Monitor ShenandoahHeapRegion::_mem_protect_lock(Mutex::special, "ShenandoahMemProtect_lock", true, Monitor::_safepoint_check_never); 38 size_t ShenandoahHeapRegion::RegionSizeShift = 0; 39 size_t ShenandoahHeapRegion::RegionSizeBytes = 0; 40 41 ShenandoahHeapRegion::ShenandoahHeapRegion(ShenandoahHeap* heap, HeapWord* start, 42 size_t regionSizeWords, size_t index) : 43 #ifdef ASSERT 44 _mem_protection_level(0), 45 #endif 46 _heap(heap), 47 _region_number(index), 48 _live_data(0), 49 reserved(MemRegion(start, regionSizeWords)), 50 _humongous_start(false), 51 _humongous_continuation(false), 52 _recycled(true), 53 _root(false), 54 _new_top(NULL), 55 _critical_pins(0) { 56 57 ContiguousSpace::initialize(reserved, true, false); 58 } 59 60 size_t ShenandoahHeapRegion::region_number() const { 61 return _region_number; 62 } 63 64 bool ShenandoahHeapRegion::rollback_allocation(uint size) { 65 set_top(top() - size); 66 return true; 67 } 68 69 void ShenandoahHeapRegion::clear_live_data() { 70 assert(Thread::current()->is_VM_thread(), "by VM thread"); 71 _live_data = 0; 72 } 73 74 void ShenandoahHeapRegion::set_recently_allocated(bool value) { 75 _recycled = value; 76 } 77 78 bool ShenandoahHeapRegion::is_recently_allocated() const { 79 return _recycled && used() > 0; 80 } 81 82 void ShenandoahHeapRegion::set_live_data(size_t s) { 83 assert(Thread::current()->is_VM_thread(), "by VM thread"); 84 _live_data = (jint) (s / HeapWordSize); 85 } 86 87 size_t ShenandoahHeapRegion::get_live_data_words() const { 88 return (size_t)OrderAccess::load_acquire((volatile jint*)&_live_data); 89 } 90 91 size_t ShenandoahHeapRegion::get_live_data_bytes() const { 92 return get_live_data_words() * HeapWordSize; 93 } 94 95 bool ShenandoahHeapRegion::has_live() const { 96 return get_live_data_words() != 0; 97 } 98 99 size_t ShenandoahHeapRegion::garbage() const { 100 assert(used() >= get_live_data_bytes() || is_humongous(), "Live Data must be a subset of used() live: "SIZE_FORMAT" used: "SIZE_FORMAT, 101 get_live_data_bytes(), used()); 102 size_t result = used() - get_live_data_bytes(); 103 return result; 104 } 105 106 bool ShenandoahHeapRegion::in_collection_set() const { 107 return _heap->region_in_collection_set(_region_number); 108 } 109 110 void ShenandoahHeapRegion::set_in_collection_set(bool b) { 111 assert(! (is_humongous() && b), "never ever enter a humongous region into the collection set"); 112 113 _heap->set_region_in_collection_set(_region_number, b); 114 115 #ifdef ASSERT 116 if (ShenandoahVerifyWritesToFromSpace || ShenandoahVerifyReadsToFromSpace) { 117 if (b) { 118 memProtectionOn(); 119 assert(_mem_protection_level == 0, "need to be protected here"); 120 } else { 121 assert(_mem_protection_level == 0, "need to be protected here"); 122 memProtectionOff(); 123 } 124 } 125 #endif 126 } 127 128 #include <sys/mman.h> 129 130 #ifdef ASSERT 131 132 void ShenandoahHeapRegion::memProtectionOn() { 133 /* 134 log_develop_trace(gc)("Protect memory on region level: "INT32_FORMAT, _mem_protection_level); 135 print(tty); 136 */ 137 MutexLockerEx ml(&_mem_protect_lock, true); 138 assert(_mem_protection_level >= 1, "invariant"); 139 140 if (--_mem_protection_level == 0) { 141 if (ShenandoahVerifyWritesToFromSpace) { 142 assert(! ShenandoahVerifyReadsToFromSpace, "can't verify from-space reads when verifying from-space writes"); 143 os::protect_memory((char*) bottom(), end() - bottom(), os::MEM_PROT_READ); 144 } else { 145 assert(ShenandoahVerifyReadsToFromSpace, "need to be verifying reads here"); 146 os::protect_memory((char*) bottom(), end() - bottom(), os::MEM_PROT_NONE); 147 } 148 } 149 } 150 151 void ShenandoahHeapRegion::memProtectionOff() { 152 /* 153 tty->print_cr("unprotect memory on region level: "INT32_FORMAT, _mem_protection_level); 154 print(tty); 155 */ 156 MutexLockerEx ml(&_mem_protect_lock, true); 157 assert(_mem_protection_level >= 0, "invariant"); 158 if (_mem_protection_level++ == 0) { 159 os::protect_memory((char*) bottom(), end() - bottom(), os::MEM_PROT_RW); 160 } 161 } 162 163 #endif 164 165 void ShenandoahHeapRegion::print_on(outputStream* st) const { 166 st->print("ShenandoahHeapRegion: "PTR_FORMAT"/"SIZE_FORMAT, p2i(this), _region_number); 167 168 if (in_collection_set()) 169 st->print("C"); 170 if (is_humongous_start()) { 171 st->print("H"); 172 } 173 if (is_humongous_continuation()) { 174 st->print("h"); 175 } 176 //else 177 st->print(" "); 178 179 st->print_cr("live = "SIZE_FORMAT" garbage = "SIZE_FORMAT" bottom = "PTR_FORMAT" end = "PTR_FORMAT" top = "PTR_FORMAT, 180 get_live_data_bytes(), garbage(), p2i(bottom()), p2i(end()), p2i(top())); 181 } 182 183 184 void ShenandoahHeapRegion::object_iterate_interruptible(ObjectClosure* blk, bool allow_cancel) { 185 HeapWord* p = bottom() + BrooksPointer::word_size(); 186 while (p < top() && !(allow_cancel && _heap->cancelled_concgc())) { 187 blk->do_object(oop(p)); 188 #ifdef ASSERT 189 if (ShenandoahVerifyReadsToFromSpace) { 190 memProtectionOff(); 191 p += oop(p)->size() + BrooksPointer::word_size(); 192 memProtectionOn(); 193 } else { 194 p += oop(p)->size() + BrooksPointer::word_size(); 195 } 196 #else 197 p += oop(p)->size() + BrooksPointer::word_size(); 198 #endif 199 } 200 } 201 202 HeapWord* ShenandoahHeapRegion::object_iterate_careful(ObjectClosureCareful* blk) { 203 HeapWord * limit = concurrent_iteration_safe_limit(); 204 assert(limit <= top(), "sanity check"); 205 for (HeapWord* p = bottom() + BrooksPointer::word_size(); p < limit;) { 206 size_t size = blk->do_object_careful(oop(p)); 207 if (size == 0) { 208 return p; // failed at p 209 } else { 210 p += size + BrooksPointer::word_size(); 211 } 212 } 213 return NULL; // all done 214 } 215 216 void ShenandoahHeapRegion::oop_iterate(ExtendedOopClosure* blk) { 217 if (is_empty()) return; 218 if (is_humongous()) { 219 oop_iterate_humongous(blk); 220 } else { 221 oop_iterate_objects(blk); 222 } 223 } 224 225 void ShenandoahHeapRegion::oop_iterate_objects(ExtendedOopClosure* blk) { 226 assert(! is_humongous(), "no humongous region here"); 227 HeapWord* obj_addr = bottom() + BrooksPointer::word_size(); 228 HeapWord* t = top(); 229 // Could call objects iterate, but this is easier. 230 while (obj_addr < t) { 231 oop obj = oop(obj_addr); 232 obj_addr += obj->oop_iterate_size(blk) + BrooksPointer::word_size(); 233 } 234 } 235 236 void ShenandoahHeapRegion::oop_iterate_humongous(ExtendedOopClosure* blk) { 237 assert(is_humongous(), "only humongous region here"); 238 // Find head. 239 ShenandoahHeapRegionSet* regions = _heap->regions(); 240 uint idx = region_number(); 241 ShenandoahHeapRegion* r = regions->get(idx); 242 while (! r->is_humongous_start()) { 243 idx--; 244 r = regions->get(idx); 245 } 246 assert(r->is_humongous_start(), "need humongous head here"); 247 oop obj = oop(r->bottom() + BrooksPointer::word_size()); 248 obj->oop_iterate(blk, MemRegion(bottom(), top())); 249 } 250 251 void ShenandoahHeapRegion::fill_region() { 252 ShenandoahHeap* sh = (ShenandoahHeap*) Universe::heap(); 253 254 if (free() > (BrooksPointer::word_size() + CollectedHeap::min_fill_size())) { 255 HeapWord* filler = allocate(BrooksPointer::word_size()); 256 HeapWord* obj = allocate(end() - top()); 257 sh->fill_with_object(obj, end() - obj); 258 BrooksPointer::initialize(oop(obj)); 259 } 260 } 261 262 void ShenandoahHeapRegion::set_humongous_start(bool start) { 263 _humongous_start = start; 264 } 265 266 void ShenandoahHeapRegion::set_humongous_continuation(bool continuation) { 267 _humongous_continuation = continuation; 268 } 269 270 bool ShenandoahHeapRegion::is_humongous() const { 271 return _humongous_start || _humongous_continuation; 272 } 273 274 bool ShenandoahHeapRegion::is_humongous_start() const { 275 return _humongous_start; 276 } 277 278 bool ShenandoahHeapRegion::is_humongous_continuation() const { 279 return _humongous_continuation; 280 } 281 282 void ShenandoahHeapRegion::recycle() { 283 ContiguousSpace::initialize(reserved, true, false); 284 clear_live_data(); 285 _humongous_start = false; 286 _humongous_continuation = false; 287 _recycled = true; 288 _root = false; 289 set_in_collection_set(false); 290 // Reset C-TAMS pointer to ensure size-based iteration, everything 291 // in that regions is going to be new objects. 292 _heap->set_complete_top_at_mark_start(bottom(), bottom()); 293 // We can only safely reset the C-TAMS pointer if the bitmap is clear for that region. 294 assert(_heap->is_complete_bitmap_clear_range(bottom(), end()), "must be clear"); 295 _heap->connection_matrix()->clear_region(region_number()); 296 } 297 298 HeapWord* ShenandoahHeapRegion::block_start_const(const void* p) const { 299 assert(MemRegion(bottom(), end()).contains(p), 300 "p ("PTR_FORMAT") not in space ["PTR_FORMAT", "PTR_FORMAT")", 301 p2i(p), p2i(bottom()), p2i(end())); 302 if (p >= top()) { 303 return top(); 304 } else { 305 HeapWord* last = bottom() + BrooksPointer::word_size(); 306 HeapWord* cur = last; 307 while (cur <= p) { 308 last = cur; 309 cur += oop(cur)->size() + BrooksPointer::word_size(); 310 } 311 assert(oop(last)->is_oop(), 312 PTR_FORMAT" should be an object start", p2i(last)); 313 return last; 314 } 315 } 316 317 void ShenandoahHeapRegion::setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size) { 318 // Absolute minimums we should not ever break: 319 static const size_t MIN_REGION_SIZE = 256*K; 320 static const size_t MIN_NUM_REGIONS = 10; 321 322 uintx region_size; 323 if (FLAG_IS_DEFAULT(ShenandoahHeapRegionSize)) { 324 if (ShenandoahMinRegionSize > initial_heap_size / MIN_NUM_REGIONS) { 325 err_msg message("Initial heap size (" SIZE_FORMAT "K) is too low to afford the minimum number " 326 "of regions (" SIZE_FORMAT ") of minimum region size (" SIZE_FORMAT "K).", 327 initial_heap_size/K, MIN_NUM_REGIONS, ShenandoahMinRegionSize/K); 328 vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message); 329 } 330 if (ShenandoahMinRegionSize < MIN_REGION_SIZE) { 331 err_msg message("" SIZE_FORMAT "K should not be lower than minimum region size (" SIZE_FORMAT "K).", 332 ShenandoahMinRegionSize/K, MIN_REGION_SIZE/K); 333 vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message); 334 } 335 if (ShenandoahMinRegionSize < MinTLABSize) { 336 err_msg message("" SIZE_FORMAT "K should not be lower than TLAB size size (" SIZE_FORMAT "K).", 337 ShenandoahMinRegionSize/K, MinTLABSize/K); 338 vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message); 339 } 340 if (ShenandoahMaxRegionSize < MIN_REGION_SIZE) { 341 err_msg message("" SIZE_FORMAT "K should not be lower than min region size (" SIZE_FORMAT "K).", 342 ShenandoahMaxRegionSize/K, MIN_REGION_SIZE/K); 343 vm_exit_during_initialization("Invalid -XX:ShenandoahMaxRegionSize option", message); 344 } 345 if (ShenandoahMinRegionSize > ShenandoahMaxRegionSize) { 346 err_msg message("Minimum (" SIZE_FORMAT "K) should be larger than maximum (" SIZE_FORMAT "K).", 347 ShenandoahMinRegionSize/K, ShenandoahMaxRegionSize/K); 348 vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize or -XX:ShenandoahMaxRegionSize", message); 349 } 350 size_t average_heap_size = (initial_heap_size + max_heap_size) / 2; 351 region_size = MAX2(average_heap_size / ShenandoahTargetNumRegions, 352 ShenandoahMinRegionSize); 353 354 // Now make sure that we don't go over or under our limits. 355 region_size = MAX2(ShenandoahMinRegionSize, region_size); 356 region_size = MIN2(ShenandoahMaxRegionSize, region_size); 357 358 } else { 359 if (ShenandoahHeapRegionSize > initial_heap_size / MIN_NUM_REGIONS) { 360 err_msg message("Initial heap size (" SIZE_FORMAT "K) is too low to afford the minimum number " 361 "of regions (" SIZE_FORMAT ") of requested size (" SIZE_FORMAT "K).", 362 initial_heap_size/K, MIN_NUM_REGIONS, ShenandoahHeapRegionSize/K); 363 vm_exit_during_initialization("Invalid -XX:ShenandoahHeapRegionSize option", message); 364 } 365 if (ShenandoahHeapRegionSize < ShenandoahMinRegionSize) { 366 err_msg message("Heap region size (" SIZE_FORMAT "K) should be larger than min region size (" SIZE_FORMAT "K).", 367 ShenandoahHeapRegionSize/K, ShenandoahMinRegionSize/K); 368 vm_exit_during_initialization("Invalid -XX:ShenandoahHeapRegionSize option", message); 369 } 370 if (ShenandoahHeapRegionSize > ShenandoahMaxRegionSize) { 371 err_msg message("Heap region size (" SIZE_FORMAT "K) should be lower than max region size (" SIZE_FORMAT "K).", 372 ShenandoahHeapRegionSize/K, ShenandoahMaxRegionSize/K); 373 vm_exit_during_initialization("Invalid -XX:ShenandoahHeapRegionSize option", message); 374 } 375 region_size = ShenandoahHeapRegionSize; 376 } 377 378 // Make sure region size is at least one large page, if enabled. 379 // Otherwise, mem-protecting one region may falsely protect the adjacent 380 // regions too. 381 if (UseLargePages) { 382 region_size = MAX2(region_size, os::large_page_size()); 383 } 384 385 int region_size_log = log2_long((jlong) region_size); 386 // Recalculate the region size to make sure it's a power of 387 // 2. This means that region_size is the largest power of 2 that's 388 // <= what we've calculated so far. 389 region_size = ((uintx)1 << region_size_log); 390 391 // Now, set up the globals. 392 guarantee(RegionSizeShift == 0, "we should only set it once"); 393 RegionSizeShift = region_size_log; 394 395 guarantee(RegionSizeBytes == 0, "we should only set it once"); 396 RegionSizeBytes = (size_t)region_size; 397 398 log_info(gc, heap)("Heap region size: " SIZE_FORMAT "M", RegionSizeBytes / M); 399 log_info(gc, init)("Region size in bytes: "SIZE_FORMAT, RegionSizeBytes); 400 log_info(gc, init)("Region size shift: "SIZE_FORMAT, RegionSizeShift); 401 log_info(gc, init)("Initial number of regions: "SIZE_FORMAT, initial_heap_size / RegionSizeBytes); 402 log_info(gc, init)("Maximum number of regions: "SIZE_FORMAT, max_heap_size / RegionSizeBytes); 403 } 404 405 CompactibleSpace* ShenandoahHeapRegion::next_compaction_space() const { 406 return _heap->next_compaction_region(this); 407 } 408 409 void ShenandoahHeapRegion::prepare_for_compaction(CompactPoint* cp) { 410 scan_and_forward(this, cp); 411 } 412 413 void ShenandoahHeapRegion::adjust_pointers() { 414 // Check first is there is any work to do. 415 if (used() == 0) { 416 return; // Nothing to do. 417 } 418 419 scan_and_adjust_pointers(this); 420 } 421 422 void ShenandoahHeapRegion::compact() { 423 assert(!is_humongous(), "Shouldn't be compacting humongous regions"); 424 scan_and_compact(this); 425 } 426 427 void ShenandoahHeapRegion::pin() { 428 assert(! SafepointSynchronize::is_at_safepoint(), "only outside safepoints"); 429 assert(_critical_pins >= 0, "sanity"); 430 Atomic::inc(&_critical_pins); 431 } 432 433 void ShenandoahHeapRegion::unpin() { 434 assert(! SafepointSynchronize::is_at_safepoint(), "only outside safepoints"); 435 Atomic::dec(&_critical_pins); 436 assert(_critical_pins >= 0, "sanity"); 437 } 438 439 bool ShenandoahHeapRegion::is_pinned() { 440 jint v = OrderAccess::load_acquire(&_critical_pins); 441 assert(v >= 0, "sanity"); 442 return v > 0; 443 }