1 /* 2 * Copyright (c) 2013, 2015, Red Hat, Inc. and/or its affiliates. 3 * 4 * This code is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License version 2 only, as 6 * published by the Free Software Foundation. 7 * 8 * This code is distributed in the hope that it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 11 * version 2 for more details (a copy is included in the LICENSE file that 12 * accompanied this code). 13 * 14 * You should have received a copy of the GNU General Public License version 15 * 2 along with this work; if not, write to the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 17 * 18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 19 * or visit www.oracle.com if you need additional information or have any 20 * questions. 21 * 22 */ 23 24 #include "memory/allocation.hpp" 25 #include "gc/shenandoah/brooksPointer.hpp" 26 #include "gc/shenandoah/shenandoahHeap.hpp" 27 #include "gc/shenandoah/shenandoahHeap.inline.hpp" 28 #include "gc/shenandoah/shenandoahHeapRegion.hpp" 29 #include "gc/shared/space.inline.hpp" 30 #include "memory/universe.hpp" 31 #include "oops/oop.inline.hpp" 32 #include "runtime/mutexLocker.hpp" 33 #include "runtime/os.hpp" 34 #include "runtime/safepoint.hpp" 35 36 Monitor ShenandoahHeapRegion::_mem_protect_lock(Mutex::special, "ShenandoahMemProtect_lock", true, Monitor::_safepoint_check_never); 37 size_t ShenandoahHeapRegion::RegionSizeShift = 0; 38 size_t ShenandoahHeapRegion::RegionSizeBytes = 0; 39 40 ShenandoahHeapRegion::ShenandoahHeapRegion(ShenandoahHeap* heap, HeapWord* start, 41 size_t regionSizeWords, size_t index) : 42 #ifdef ASSERT 43 _mem_protection_level(0), 44 #endif 45 _heap(heap), 46 _region_number(index), 47 _live_data(0), 48 reserved(MemRegion(start, regionSizeWords)), 49 _humongous_start(false), 50 _humongous_continuation(false), 51 _recycled(true), 52 _new_top(NULL), 53 _critical_pins(0) { 54 55 ContiguousSpace::initialize(reserved, true, false); 56 } 57 58 size_t ShenandoahHeapRegion::region_number() const { 59 return _region_number; 60 } 61 62 bool ShenandoahHeapRegion::rollback_allocation(uint size) { 63 set_top(top() - size); 64 return true; 65 } 66 67 void ShenandoahHeapRegion::clear_live_data() { 68 assert(Thread::current()->is_VM_thread(), "by VM thread"); 69 _live_data = 0; 70 } 71 72 void ShenandoahHeapRegion::set_recently_allocated(bool value) { 73 _recycled = value; 74 } 75 76 bool ShenandoahHeapRegion::is_recently_allocated() const { 77 return _recycled && used() > 0; 78 } 79 80 void ShenandoahHeapRegion::set_live_data(size_t s) { 81 assert(Thread::current()->is_VM_thread(), "by VM thread"); 82 _live_data = (jint) (s / HeapWordSize); 83 } 84 85 size_t ShenandoahHeapRegion::get_live_data_words() const { 86 return (size_t)OrderAccess::load_acquire((volatile jint*)&_live_data); 87 } 88 89 size_t ShenandoahHeapRegion::get_live_data_bytes() const { 90 return get_live_data_words() * HeapWordSize; 91 } 92 93 bool ShenandoahHeapRegion::has_live() const { 94 return get_live_data_words() != 0; 95 } 96 97 size_t ShenandoahHeapRegion::garbage() const { 98 assert(used() >= get_live_data_bytes() || is_humongous(), "Live Data must be a subset of used() live: "SIZE_FORMAT" used: "SIZE_FORMAT, 99 get_live_data_bytes(), used()); 100 size_t result = used() - get_live_data_bytes(); 101 return result; 102 } 103 104 bool ShenandoahHeapRegion::in_collection_set() const { 105 return _heap->region_in_collection_set(_region_number); 106 } 107 108 void ShenandoahHeapRegion::set_in_collection_set(bool b) { 109 assert(! (is_humongous() && b), "never ever enter a humongous region into the collection set"); 110 111 _heap->set_region_in_collection_set(_region_number, b); 112 113 #ifdef ASSERT 114 if (ShenandoahVerifyWritesToFromSpace || ShenandoahVerifyReadsToFromSpace) { 115 if (b) { 116 memProtectionOn(); 117 assert(_mem_protection_level == 0, "need to be protected here"); 118 } else { 119 assert(_mem_protection_level == 0, "need to be protected here"); 120 memProtectionOff(); 121 } 122 } 123 #endif 124 } 125 126 #include <sys/mman.h> 127 128 #ifdef ASSERT 129 130 void ShenandoahHeapRegion::memProtectionOn() { 131 /* 132 log_develop_trace(gc)("Protect memory on region level: "INT32_FORMAT, _mem_protection_level); 133 print(tty); 134 */ 135 MutexLockerEx ml(&_mem_protect_lock, true); 136 assert(_mem_protection_level >= 1, "invariant"); 137 138 if (--_mem_protection_level == 0) { 139 if (ShenandoahVerifyWritesToFromSpace) { 140 assert(! ShenandoahVerifyReadsToFromSpace, "can't verify from-space reads when verifying from-space writes"); 141 os::protect_memory((char*) bottom(), end() - bottom(), os::MEM_PROT_READ); 142 } else { 143 assert(ShenandoahVerifyReadsToFromSpace, "need to be verifying reads here"); 144 os::protect_memory((char*) bottom(), end() - bottom(), os::MEM_PROT_NONE); 145 } 146 } 147 } 148 149 void ShenandoahHeapRegion::memProtectionOff() { 150 /* 151 tty->print_cr("unprotect memory on region level: "INT32_FORMAT, _mem_protection_level); 152 print(tty); 153 */ 154 MutexLockerEx ml(&_mem_protect_lock, true); 155 assert(_mem_protection_level >= 0, "invariant"); 156 if (_mem_protection_level++ == 0) { 157 os::protect_memory((char*) bottom(), end() - bottom(), os::MEM_PROT_RW); 158 } 159 } 160 161 #endif 162 163 void ShenandoahHeapRegion::print_on(outputStream* st) const { 164 st->print("ShenandoahHeapRegion: "PTR_FORMAT"/"SIZE_FORMAT, p2i(this), _region_number); 165 166 if (in_collection_set()) 167 st->print("C"); 168 if (is_humongous_start()) { 169 st->print("H"); 170 } 171 if (is_humongous_continuation()) { 172 st->print("h"); 173 } 174 //else 175 st->print(" "); 176 177 st->print_cr("live = "SIZE_FORMAT" garbage = "SIZE_FORMAT" bottom = "PTR_FORMAT" end = "PTR_FORMAT" top = "PTR_FORMAT, 178 get_live_data_bytes(), garbage(), p2i(bottom()), p2i(end()), p2i(top())); 179 } 180 181 182 void ShenandoahHeapRegion::object_iterate_interruptible(ObjectClosure* blk, bool allow_cancel) { 183 HeapWord* p = bottom() + BrooksPointer::word_size(); 184 while (p < top() && !(allow_cancel && _heap->cancelled_concgc())) { 185 blk->do_object(oop(p)); 186 #ifdef ASSERT 187 if (ShenandoahVerifyReadsToFromSpace) { 188 memProtectionOff(); 189 p += oop(p)->size() + BrooksPointer::word_size(); 190 memProtectionOn(); 191 } else { 192 p += oop(p)->size() + BrooksPointer::word_size(); 193 } 194 #else 195 p += oop(p)->size() + BrooksPointer::word_size(); 196 #endif 197 } 198 } 199 200 HeapWord* ShenandoahHeapRegion::object_iterate_careful(ObjectClosureCareful* blk) { 201 HeapWord * limit = concurrent_iteration_safe_limit(); 202 assert(limit <= top(), "sanity check"); 203 for (HeapWord* p = bottom() + BrooksPointer::word_size(); p < limit;) { 204 size_t size = blk->do_object_careful(oop(p)); 205 if (size == 0) { 206 return p; // failed at p 207 } else { 208 p += size + BrooksPointer::word_size(); 209 } 210 } 211 return NULL; // all done 212 } 213 214 void ShenandoahHeapRegion::fill_region() { 215 ShenandoahHeap* sh = (ShenandoahHeap*) Universe::heap(); 216 217 if (free() > (BrooksPointer::word_size() + CollectedHeap::min_fill_size())) { 218 HeapWord* filler = allocate(BrooksPointer::word_size()); 219 HeapWord* obj = allocate(end() - top()); 220 sh->fill_with_object(obj, end() - obj); 221 BrooksPointer::initialize(oop(obj)); 222 } 223 } 224 225 void ShenandoahHeapRegion::set_humongous_start(bool start) { 226 _humongous_start = start; 227 } 228 229 void ShenandoahHeapRegion::set_humongous_continuation(bool continuation) { 230 _humongous_continuation = continuation; 231 } 232 233 bool ShenandoahHeapRegion::is_humongous() const { 234 return _humongous_start || _humongous_continuation; 235 } 236 237 bool ShenandoahHeapRegion::is_humongous_start() const { 238 return _humongous_start; 239 } 240 241 bool ShenandoahHeapRegion::is_humongous_continuation() const { 242 return _humongous_continuation; 243 } 244 245 void ShenandoahHeapRegion::recycle() { 246 ContiguousSpace::initialize(reserved, true, false); 247 clear_live_data(); 248 _humongous_start = false; 249 _humongous_continuation = false; 250 _recycled = true; 251 set_in_collection_set(false); 252 // Reset C-TAMS pointer to ensure size-based iteration, everything 253 // in that regions is going to be new objects. 254 _heap->set_complete_top_at_mark_start(bottom(), bottom()); 255 // We can only safely reset the C-TAMS pointer if the bitmap is clear for that region. 256 assert(_heap->is_complete_bitmap_clear_range(bottom(), end()), "must be clear"); 257 } 258 259 HeapWord* ShenandoahHeapRegion::block_start_const(const void* p) const { 260 assert(MemRegion(bottom(), end()).contains(p), 261 "p ("PTR_FORMAT") not in space ["PTR_FORMAT", "PTR_FORMAT")", 262 p2i(p), p2i(bottom()), p2i(end())); 263 if (p >= top()) { 264 return top(); 265 } else { 266 HeapWord* last = bottom() + BrooksPointer::word_size(); 267 HeapWord* cur = last; 268 while (cur <= p) { 269 last = cur; 270 cur += oop(cur)->size() + BrooksPointer::word_size(); 271 } 272 assert(oop(last)->is_oop(), 273 PTR_FORMAT" should be an object start", p2i(last)); 274 return last; 275 } 276 } 277 278 void ShenandoahHeapRegion::setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size) { 279 // Absolute minimums we should not ever break: 280 static const size_t MIN_REGION_SIZE = 256*K; 281 static const size_t MIN_NUM_REGIONS = 10; 282 283 uintx region_size; 284 if (FLAG_IS_DEFAULT(ShenandoahHeapRegionSize)) { 285 if (ShenandoahMinRegionSize > initial_heap_size / MIN_NUM_REGIONS) { 286 err_msg message("Initial heap size (" SIZE_FORMAT "K) is too low to afford the minimum number " 287 "of regions (" SIZE_FORMAT ") of minimum region size (" SIZE_FORMAT "K).", 288 initial_heap_size/K, MIN_NUM_REGIONS, ShenandoahMinRegionSize/K); 289 vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message); 290 } 291 if (ShenandoahMinRegionSize < MIN_REGION_SIZE) { 292 err_msg message("" SIZE_FORMAT "K should not be lower than minimum region size (" SIZE_FORMAT "K).", 293 ShenandoahMinRegionSize/K, MIN_REGION_SIZE/K); 294 vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message); 295 } 296 if (ShenandoahMinRegionSize < MinTLABSize) { 297 err_msg message("" SIZE_FORMAT "K should not be lower than TLAB size size (" SIZE_FORMAT "K).", 298 ShenandoahMinRegionSize/K, MinTLABSize/K); 299 vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message); 300 } 301 if (ShenandoahMaxRegionSize < MIN_REGION_SIZE) { 302 err_msg message("" SIZE_FORMAT "K should not be lower than min region size (" SIZE_FORMAT "K).", 303 ShenandoahMaxRegionSize/K, MIN_REGION_SIZE/K); 304 vm_exit_during_initialization("Invalid -XX:ShenandoahMaxRegionSize option", message); 305 } 306 if (ShenandoahMinRegionSize > ShenandoahMaxRegionSize) { 307 err_msg message("Minimum (" SIZE_FORMAT "K) should be larger than maximum (" SIZE_FORMAT "K).", 308 ShenandoahMinRegionSize/K, ShenandoahMaxRegionSize/K); 309 vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize or -XX:ShenandoahMaxRegionSize", message); 310 } 311 size_t average_heap_size = (initial_heap_size + max_heap_size) / 2; 312 region_size = MAX2(average_heap_size / ShenandoahTargetNumRegions, 313 ShenandoahMinRegionSize); 314 315 // Now make sure that we don't go over or under our limits. 316 region_size = MAX2(ShenandoahMinRegionSize, region_size); 317 region_size = MIN2(ShenandoahMaxRegionSize, region_size); 318 319 } else { 320 if (ShenandoahHeapRegionSize > initial_heap_size / MIN_NUM_REGIONS) { 321 err_msg message("Initial heap size (" SIZE_FORMAT "K) is too low to afford the minimum number " 322 "of regions (" SIZE_FORMAT ") of requested size (" SIZE_FORMAT "K).", 323 initial_heap_size/K, MIN_NUM_REGIONS, ShenandoahHeapRegionSize/K); 324 vm_exit_during_initialization("Invalid -XX:ShenandoahHeapRegionSize option", message); 325 } 326 if (ShenandoahHeapRegionSize < MIN_REGION_SIZE) { 327 err_msg message("" SIZE_FORMAT " should not be lower than min region size (" SIZE_FORMAT "K).", 328 ShenandoahHeapRegionSize/K, MIN_REGION_SIZE/K); 329 vm_exit_during_initialization("Invalid -XX:ShenandoahHeapRegionSize option", message); 330 } 331 region_size = ShenandoahHeapRegionSize; 332 } 333 334 // Make sure region size is at least one large page, if enabled. 335 // Otherwise, mem-protecting one region may falsely protect the adjacent 336 // regions too. 337 if (UseLargePages) { 338 region_size = MAX2(region_size, os::large_page_size()); 339 } 340 341 int region_size_log = log2_long((jlong) region_size); 342 // Recalculate the region size to make sure it's a power of 343 // 2. This means that region_size is the largest power of 2 that's 344 // <= what we've calculated so far. 345 region_size = ((uintx)1 << region_size_log); 346 347 // Now, set up the globals. 348 guarantee(RegionSizeShift == 0, "we should only set it once"); 349 RegionSizeShift = region_size_log; 350 351 guarantee(RegionSizeBytes == 0, "we should only set it once"); 352 RegionSizeBytes = (size_t)region_size; 353 354 log_info(gc, heap)("Heap region size: " SIZE_FORMAT "M", RegionSizeBytes / M); 355 log_info(gc, init)("Region size in bytes: "SIZE_FORMAT, RegionSizeBytes); 356 log_info(gc, init)("Region size shift: "SIZE_FORMAT, RegionSizeShift); 357 log_info(gc, init)("Initial number of regions: "SIZE_FORMAT, initial_heap_size / RegionSizeBytes); 358 log_info(gc, init)("Maximum number of regions: "SIZE_FORMAT, max_heap_size / RegionSizeBytes); 359 } 360 361 CompactibleSpace* ShenandoahHeapRegion::next_compaction_space() const { 362 return _heap->next_compaction_region(this); 363 } 364 365 void ShenandoahHeapRegion::prepare_for_compaction(CompactPoint* cp) { 366 scan_and_forward(this, cp); 367 } 368 369 void ShenandoahHeapRegion::adjust_pointers() { 370 // Check first is there is any work to do. 371 if (used() == 0) { 372 return; // Nothing to do. 373 } 374 375 scan_and_adjust_pointers(this); 376 } 377 378 void ShenandoahHeapRegion::compact() { 379 assert(!is_humongous(), "Shouldn't be compacting humongous regions"); 380 scan_and_compact(this); 381 } 382 383 void ShenandoahHeapRegion::pin() { 384 assert(! SafepointSynchronize::is_at_safepoint(), "only outside safepoints"); 385 assert(_critical_pins >= 0, "sanity"); 386 Atomic::inc(&_critical_pins); 387 } 388 389 void ShenandoahHeapRegion::unpin() { 390 assert(! SafepointSynchronize::is_at_safepoint(), "only outside safepoints"); 391 Atomic::dec(&_critical_pins); 392 assert(_critical_pins >= 0, "sanity"); 393 } 394 395 bool ShenandoahHeapRegion::is_pinned() { 396 jint v = OrderAccess::load_acquire(&_critical_pins); 397 assert(v >= 0, "sanity"); 398 return v > 0; 399 }