1 /* 2 * Copyright (c) 2013, 2015, Red Hat, Inc. and/or its affiliates. 3 * 4 * This code is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License version 2 only, as 6 * published by the Free Software Foundation. 7 * 8 * This code is distributed in the hope that it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 11 * version 2 for more details (a copy is included in the LICENSE file that 12 * accompanied this code). 13 * 14 * You should have received a copy of the GNU General Public License version 15 * 2 along with this work; if not, write to the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 17 * 18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 19 * or visit www.oracle.com if you need additional information or have any 20 * questions. 21 * 22 */ 23 24 #include "memory/allocation.hpp" 25 #include "gc/shenandoah/brooksPointer.hpp" 26 #include "gc/shenandoah/shenandoahConnectionMatrix.hpp" 27 #include "gc/shenandoah/shenandoahHeap.hpp" 28 #include "gc/shenandoah/shenandoahHeap.inline.hpp" 29 #include "gc/shenandoah/shenandoahHeapRegion.hpp" 30 #include "gc/shared/space.inline.hpp" 31 #include "memory/universe.hpp" 32 #include "oops/oop.inline.hpp" 33 #include "runtime/mutexLocker.hpp" 34 #include "runtime/os.hpp" 35 #include "runtime/safepoint.hpp" 36 37 Monitor ShenandoahHeapRegion::_mem_protect_lock(Mutex::special, "ShenandoahMemProtect_lock", true, Monitor::_safepoint_check_never); 38 size_t ShenandoahHeapRegion::RegionSizeShift = 0; 39 size_t ShenandoahHeapRegion::RegionSizeBytes = 0; 40 41 ShenandoahHeapRegion::ShenandoahHeapRegion(ShenandoahHeap* heap, HeapWord* start, 42 size_t regionSizeWords, size_t index) : 43 #ifdef ASSERT 44 _mem_protection_level(0), 45 #endif 46 _heap(heap), 47 _region_number(index), 48 _live_data(0), 49 reserved(MemRegion(start, regionSizeWords)), 50 _humongous_start(false), 51 _humongous_continuation(false), 52 _recycled(true), 53 _new_top(NULL), 54 _critical_pins(0) { 55 56 ContiguousSpace::initialize(reserved, true, false); 57 } 58 59 size_t ShenandoahHeapRegion::region_number() const { 60 return _region_number; 61 } 62 63 bool ShenandoahHeapRegion::rollback_allocation(uint size) { 64 set_top(top() - size); 65 return true; 66 } 67 68 void ShenandoahHeapRegion::clear_live_data() { 69 assert(Thread::current()->is_VM_thread(), "by VM thread"); 70 _live_data = 0; 71 } 72 73 void ShenandoahHeapRegion::set_recently_allocated(bool value) { 74 _recycled = value; 75 } 76 77 bool ShenandoahHeapRegion::is_recently_allocated() const { 78 return _recycled && used() > 0; 79 } 80 81 void ShenandoahHeapRegion::set_live_data(size_t s) { 82 assert(Thread::current()->is_VM_thread(), "by VM thread"); 83 _live_data = (jint) (s / HeapWordSize); 84 } 85 86 size_t ShenandoahHeapRegion::get_live_data_words() const { 87 return (size_t)OrderAccess::load_acquire((volatile jint*)&_live_data); 88 } 89 90 size_t ShenandoahHeapRegion::get_live_data_bytes() const { 91 return get_live_data_words() * HeapWordSize; 92 } 93 94 bool ShenandoahHeapRegion::has_live() const { 95 return get_live_data_words() != 0; 96 } 97 98 size_t ShenandoahHeapRegion::garbage() const { 99 assert(used() >= get_live_data_bytes() || is_humongous(), "Live Data must be a subset of used() live: "SIZE_FORMAT" used: "SIZE_FORMAT, 100 get_live_data_bytes(), used()); 101 size_t result = used() - get_live_data_bytes(); 102 return result; 103 } 104 105 bool ShenandoahHeapRegion::in_collection_set() const { 106 return _heap->region_in_collection_set(_region_number); 107 } 108 109 void ShenandoahHeapRegion::set_in_collection_set(bool b) { 110 assert(! (is_humongous() && b), "never ever enter a humongous region into the collection set"); 111 112 _heap->set_region_in_collection_set(_region_number, b); 113 114 #ifdef ASSERT 115 if (ShenandoahVerifyWritesToFromSpace || ShenandoahVerifyReadsToFromSpace) { 116 if (b) { 117 memProtectionOn(); 118 assert(_mem_protection_level == 0, "need to be protected here"); 119 } else { 120 assert(_mem_protection_level == 0, "need to be protected here"); 121 memProtectionOff(); 122 } 123 } 124 #endif 125 } 126 127 #include <sys/mman.h> 128 129 #ifdef ASSERT 130 131 void ShenandoahHeapRegion::memProtectionOn() { 132 /* 133 log_develop_trace(gc)("Protect memory on region level: "INT32_FORMAT, _mem_protection_level); 134 print(tty); 135 */ 136 MutexLockerEx ml(&_mem_protect_lock, true); 137 assert(_mem_protection_level >= 1, "invariant"); 138 139 if (--_mem_protection_level == 0) { 140 if (ShenandoahVerifyWritesToFromSpace) { 141 assert(! ShenandoahVerifyReadsToFromSpace, "can't verify from-space reads when verifying from-space writes"); 142 os::protect_memory((char*) bottom(), end() - bottom(), os::MEM_PROT_READ); 143 } else { 144 assert(ShenandoahVerifyReadsToFromSpace, "need to be verifying reads here"); 145 os::protect_memory((char*) bottom(), end() - bottom(), os::MEM_PROT_NONE); 146 } 147 } 148 } 149 150 void ShenandoahHeapRegion::memProtectionOff() { 151 /* 152 tty->print_cr("unprotect memory on region level: "INT32_FORMAT, _mem_protection_level); 153 print(tty); 154 */ 155 MutexLockerEx ml(&_mem_protect_lock, true); 156 assert(_mem_protection_level >= 0, "invariant"); 157 if (_mem_protection_level++ == 0) { 158 os::protect_memory((char*) bottom(), end() - bottom(), os::MEM_PROT_RW); 159 } 160 } 161 162 #endif 163 164 void ShenandoahHeapRegion::print_on(outputStream* st) const { 165 st->print("ShenandoahHeapRegion: "PTR_FORMAT"/"SIZE_FORMAT, p2i(this), _region_number); 166 167 if (in_collection_set()) 168 st->print("C"); 169 if (is_humongous_start()) { 170 st->print("H"); 171 } 172 if (is_humongous_continuation()) { 173 st->print("h"); 174 } 175 //else 176 st->print(" "); 177 178 st->print_cr("live = "SIZE_FORMAT" garbage = "SIZE_FORMAT" bottom = "PTR_FORMAT" end = "PTR_FORMAT" top = "PTR_FORMAT, 179 get_live_data_bytes(), garbage(), p2i(bottom()), p2i(end()), p2i(top())); 180 } 181 182 183 void ShenandoahHeapRegion::object_iterate_interruptible(ObjectClosure* blk, bool allow_cancel) { 184 HeapWord* p = bottom() + BrooksPointer::word_size(); 185 while (p < top() && !(allow_cancel && _heap->cancelled_concgc())) { 186 blk->do_object(oop(p)); 187 #ifdef ASSERT 188 if (ShenandoahVerifyReadsToFromSpace) { 189 memProtectionOff(); 190 p += oop(p)->size() + BrooksPointer::word_size(); 191 memProtectionOn(); 192 } else { 193 p += oop(p)->size() + BrooksPointer::word_size(); 194 } 195 #else 196 p += oop(p)->size() + BrooksPointer::word_size(); 197 #endif 198 } 199 } 200 201 HeapWord* ShenandoahHeapRegion::object_iterate_careful(ObjectClosureCareful* blk) { 202 HeapWord * limit = concurrent_iteration_safe_limit(); 203 assert(limit <= top(), "sanity check"); 204 for (HeapWord* p = bottom() + BrooksPointer::word_size(); p < limit;) { 205 size_t size = blk->do_object_careful(oop(p)); 206 if (size == 0) { 207 return p; // failed at p 208 } else { 209 p += size + BrooksPointer::word_size(); 210 } 211 } 212 return NULL; // all done 213 } 214 215 void ShenandoahHeapRegion::fill_region() { 216 ShenandoahHeap* sh = (ShenandoahHeap*) Universe::heap(); 217 218 if (free() > (BrooksPointer::word_size() + CollectedHeap::min_fill_size())) { 219 HeapWord* filler = allocate(BrooksPointer::word_size()); 220 HeapWord* obj = allocate(end() - top()); 221 sh->fill_with_object(obj, end() - obj); 222 BrooksPointer::initialize(oop(obj)); 223 } 224 } 225 226 void ShenandoahHeapRegion::set_humongous_start(bool start) { 227 _humongous_start = start; 228 } 229 230 void ShenandoahHeapRegion::set_humongous_continuation(bool continuation) { 231 _humongous_continuation = continuation; 232 } 233 234 bool ShenandoahHeapRegion::is_humongous() const { 235 return _humongous_start || _humongous_continuation; 236 } 237 238 bool ShenandoahHeapRegion::is_humongous_start() const { 239 return _humongous_start; 240 } 241 242 bool ShenandoahHeapRegion::is_humongous_continuation() const { 243 return _humongous_continuation; 244 } 245 246 void ShenandoahHeapRegion::recycle() { 247 ContiguousSpace::initialize(reserved, true, false); 248 clear_live_data(); 249 _humongous_start = false; 250 _humongous_continuation = false; 251 _recycled = true; 252 set_in_collection_set(false); 253 // Reset C-TAMS pointer to ensure size-based iteration, everything 254 // in that regions is going to be new objects. 255 _heap->set_complete_top_at_mark_start(bottom(), bottom()); 256 // We can only safely reset the C-TAMS pointer if the bitmap is clear for that region. 257 assert(_heap->is_complete_bitmap_clear_range(bottom(), end()), "must be clear"); 258 _heap->connection_matrix()->clear_region(region_number()); 259 } 260 261 HeapWord* ShenandoahHeapRegion::block_start_const(const void* p) const { 262 assert(MemRegion(bottom(), end()).contains(p), 263 "p ("PTR_FORMAT") not in space ["PTR_FORMAT", "PTR_FORMAT")", 264 p2i(p), p2i(bottom()), p2i(end())); 265 if (p >= top()) { 266 return top(); 267 } else { 268 HeapWord* last = bottom() + BrooksPointer::word_size(); 269 HeapWord* cur = last; 270 while (cur <= p) { 271 last = cur; 272 cur += oop(cur)->size() + BrooksPointer::word_size(); 273 } 274 assert(oop(last)->is_oop(), 275 PTR_FORMAT" should be an object start", p2i(last)); 276 return last; 277 } 278 } 279 280 void ShenandoahHeapRegion::setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size) { 281 // Absolute minimums we should not ever break: 282 static const size_t MIN_REGION_SIZE = 256*K; 283 static const size_t MIN_NUM_REGIONS = 10; 284 285 uintx region_size; 286 if (FLAG_IS_DEFAULT(ShenandoahHeapRegionSize)) { 287 if (ShenandoahMinRegionSize > initial_heap_size / MIN_NUM_REGIONS) { 288 err_msg message("Initial heap size (" SIZE_FORMAT "K) is too low to afford the minimum number " 289 "of regions (" SIZE_FORMAT ") of minimum region size (" SIZE_FORMAT "K).", 290 initial_heap_size/K, MIN_NUM_REGIONS, ShenandoahMinRegionSize/K); 291 vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message); 292 } 293 if (ShenandoahMinRegionSize < MIN_REGION_SIZE) { 294 err_msg message("" SIZE_FORMAT "K should not be lower than minimum region size (" SIZE_FORMAT "K).", 295 ShenandoahMinRegionSize/K, MIN_REGION_SIZE/K); 296 vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message); 297 } 298 if (ShenandoahMinRegionSize < MinTLABSize) { 299 err_msg message("" SIZE_FORMAT "K should not be lower than TLAB size size (" SIZE_FORMAT "K).", 300 ShenandoahMinRegionSize/K, MinTLABSize/K); 301 vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message); 302 } 303 if (ShenandoahMaxRegionSize < MIN_REGION_SIZE) { 304 err_msg message("" SIZE_FORMAT "K should not be lower than min region size (" SIZE_FORMAT "K).", 305 ShenandoahMaxRegionSize/K, MIN_REGION_SIZE/K); 306 vm_exit_during_initialization("Invalid -XX:ShenandoahMaxRegionSize option", message); 307 } 308 if (ShenandoahMinRegionSize > ShenandoahMaxRegionSize) { 309 err_msg message("Minimum (" SIZE_FORMAT "K) should be larger than maximum (" SIZE_FORMAT "K).", 310 ShenandoahMinRegionSize/K, ShenandoahMaxRegionSize/K); 311 vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize or -XX:ShenandoahMaxRegionSize", message); 312 } 313 size_t average_heap_size = (initial_heap_size + max_heap_size) / 2; 314 region_size = MAX2(average_heap_size / ShenandoahTargetNumRegions, 315 ShenandoahMinRegionSize); 316 317 // Now make sure that we don't go over or under our limits. 318 region_size = MAX2(ShenandoahMinRegionSize, region_size); 319 region_size = MIN2(ShenandoahMaxRegionSize, region_size); 320 321 } else { 322 if (ShenandoahHeapRegionSize > initial_heap_size / MIN_NUM_REGIONS) { 323 err_msg message("Initial heap size (" SIZE_FORMAT "K) is too low to afford the minimum number " 324 "of regions (" SIZE_FORMAT ") of requested size (" SIZE_FORMAT "K).", 325 initial_heap_size/K, MIN_NUM_REGIONS, ShenandoahHeapRegionSize/K); 326 vm_exit_during_initialization("Invalid -XX:ShenandoahHeapRegionSize option", message); 327 } 328 if (ShenandoahHeapRegionSize < ShenandoahMinRegionSize) { 329 err_msg message("Heap region size (" SIZE_FORMAT "K) should be larger than min region size (" SIZE_FORMAT "K).", 330 ShenandoahHeapRegionSize/K, ShenandoahMinRegionSize/K); 331 vm_exit_during_initialization("Invalid -XX:ShenandoahHeapRegionSize option", message); 332 } 333 if (ShenandoahHeapRegionSize > ShenandoahMaxRegionSize) { 334 err_msg message("Heap region size (" SIZE_FORMAT "K) should be lower than max region size (" SIZE_FORMAT "K).", 335 ShenandoahHeapRegionSize/K, ShenandoahMaxRegionSize/K); 336 vm_exit_during_initialization("Invalid -XX:ShenandoahHeapRegionSize option", message); 337 } 338 region_size = ShenandoahHeapRegionSize; 339 } 340 341 // Make sure region size is at least one large page, if enabled. 342 // Otherwise, mem-protecting one region may falsely protect the adjacent 343 // regions too. 344 if (UseLargePages) { 345 region_size = MAX2(region_size, os::large_page_size()); 346 } 347 348 int region_size_log = log2_long((jlong) region_size); 349 // Recalculate the region size to make sure it's a power of 350 // 2. This means that region_size is the largest power of 2 that's 351 // <= what we've calculated so far. 352 region_size = ((uintx)1 << region_size_log); 353 354 // Now, set up the globals. 355 guarantee(RegionSizeShift == 0, "we should only set it once"); 356 RegionSizeShift = region_size_log; 357 358 guarantee(RegionSizeBytes == 0, "we should only set it once"); 359 RegionSizeBytes = (size_t)region_size; 360 361 log_info(gc, heap)("Heap region size: " SIZE_FORMAT "M", RegionSizeBytes / M); 362 log_info(gc, init)("Region size in bytes: "SIZE_FORMAT, RegionSizeBytes); 363 log_info(gc, init)("Region size shift: "SIZE_FORMAT, RegionSizeShift); 364 log_info(gc, init)("Initial number of regions: "SIZE_FORMAT, initial_heap_size / RegionSizeBytes); 365 log_info(gc, init)("Maximum number of regions: "SIZE_FORMAT, max_heap_size / RegionSizeBytes); 366 } 367 368 CompactibleSpace* ShenandoahHeapRegion::next_compaction_space() const { 369 return _heap->next_compaction_region(this); 370 } 371 372 void ShenandoahHeapRegion::prepare_for_compaction(CompactPoint* cp) { 373 scan_and_forward(this, cp); 374 } 375 376 void ShenandoahHeapRegion::adjust_pointers() { 377 // Check first is there is any work to do. 378 if (used() == 0) { 379 return; // Nothing to do. 380 } 381 382 scan_and_adjust_pointers(this); 383 } 384 385 void ShenandoahHeapRegion::compact() { 386 assert(!is_humongous(), "Shouldn't be compacting humongous regions"); 387 scan_and_compact(this); 388 } 389 390 void ShenandoahHeapRegion::pin() { 391 assert(! SafepointSynchronize::is_at_safepoint(), "only outside safepoints"); 392 assert(_critical_pins >= 0, "sanity"); 393 Atomic::inc(&_critical_pins); 394 } 395 396 void ShenandoahHeapRegion::unpin() { 397 assert(! SafepointSynchronize::is_at_safepoint(), "only outside safepoints"); 398 Atomic::dec(&_critical_pins); 399 assert(_critical_pins >= 0, "sanity"); 400 } 401 402 bool ShenandoahHeapRegion::is_pinned() { 403 jint v = OrderAccess::load_acquire(&_critical_pins); 404 assert(v >= 0, "sanity"); 405 return v > 0; 406 }