1 /* 2 * Copyright (c) 2013, 2015, Red Hat, Inc. and/or its affiliates. 3 * 4 * This code is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License version 2 only, as 6 * published by the Free Software Foundation. 7 * 8 * This code is distributed in the hope that it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 11 * version 2 for more details (a copy is included in the LICENSE file that 12 * accompanied this code). 13 * 14 * You should have received a copy of the GNU General Public License version 15 * 2 along with this work; if not, write to the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 17 * 18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 19 * or visit www.oracle.com if you need additional information or have any 20 * questions. 21 * 22 */ 23 24 #include "memory/allocation.hpp" 25 #include "gc/shenandoah/brooksPointer.hpp" 26 #include "gc/shenandoah/shenandoahHeap.hpp" 27 #include "gc/shenandoah/shenandoahHeap.inline.hpp" 28 #include "gc/shenandoah/shenandoahHeapRegion.hpp" 29 #include "gc/shared/space.inline.hpp" 30 #include "memory/universe.hpp" 31 #include "oops/oop.inline.hpp" 32 #include "runtime/mutexLocker.hpp" 33 #include "runtime/os.hpp" 34 #include "runtime/safepoint.hpp" 35 36 Monitor ShenandoahHeapRegion::_mem_protect_lock(Mutex::special, "ShenandoahMemProtect_lock", true, Monitor::_safepoint_check_never); 37 size_t ShenandoahHeapRegion::RegionSizeShift = 0; 38 size_t ShenandoahHeapRegion::RegionSizeBytes = 0; 39 40 ShenandoahHeapRegion::ShenandoahHeapRegion(ShenandoahHeap* heap, HeapWord* start, 41 size_t regionSizeWords, size_t index) : 42 #ifdef ASSERT 43 _mem_protection_level(0), 44 #endif 45 _heap(heap), 46 _region_number(index), 47 _live_data(0), 48 reserved(MemRegion(start, regionSizeWords)), 49 _humongous_start(false), 50 _humongous_continuation(false), 51 _recycled(true), 52 _new_top(NULL), 53 _critical_pins(0) { 54 55 ContiguousSpace::initialize(reserved, true, false); 56 } 57 58 size_t ShenandoahHeapRegion::region_number() const { 59 return _region_number; 60 } 61 62 bool ShenandoahHeapRegion::rollback_allocation(uint size) { 63 set_top(top() - size); 64 return true; 65 } 66 67 void ShenandoahHeapRegion::clear_live_data() { 68 assert(Thread::current()->is_VM_thread(), "by VM thread"); 69 _live_data = 0; 70 } 71 72 void ShenandoahHeapRegion::set_recently_allocated(bool value) { 73 _recycled = value; 74 } 75 76 bool ShenandoahHeapRegion::is_recently_allocated() const { 77 return _recycled && used() > 0; 78 } 79 80 void ShenandoahHeapRegion::set_live_data(size_t s) { 81 assert(Thread::current()->is_VM_thread(), "by VM thread"); 82 _live_data = (jint) (s / HeapWordSize); 83 } 84 85 size_t ShenandoahHeapRegion::get_live_data_words() const { 86 return (size_t)OrderAccess::load_acquire((volatile jint*)&_live_data); 87 } 88 89 size_t ShenandoahHeapRegion::get_live_data_bytes() const { 90 return get_live_data_words() * HeapWordSize; 91 } 92 93 bool ShenandoahHeapRegion::has_live() const { 94 return get_live_data_words() != 0; 95 } 96 97 size_t ShenandoahHeapRegion::garbage() const { 98 assert(used() >= get_live_data_bytes() || is_humongous(), "Live Data must be a subset of used() live: "SIZE_FORMAT" used: "SIZE_FORMAT, 99 get_live_data_bytes(), used()); 100 size_t result = used() - get_live_data_bytes(); 101 return result; 102 } 103 104 bool ShenandoahHeapRegion::in_collection_set() const { 105 return _heap->region_in_collection_set(_region_number); 106 } 107 108 void ShenandoahHeapRegion::set_in_collection_set(bool b) { 109 assert(! (is_humongous() && b), "never ever enter a humongous region into the collection set"); 110 111 _heap->set_region_in_collection_set(_region_number, b); 112 113 #ifdef ASSERT 114 if (ShenandoahVerifyWritesToFromSpace || ShenandoahVerifyReadsToFromSpace) { 115 if (b) { 116 memProtectionOn(); 117 assert(_mem_protection_level == 0, "need to be protected here"); 118 } else { 119 assert(_mem_protection_level == 0, "need to be protected here"); 120 memProtectionOff(); 121 } 122 } 123 #endif 124 } 125 126 #include <sys/mman.h> 127 128 #ifdef ASSERT 129 130 void ShenandoahHeapRegion::memProtectionOn() { 131 /* 132 log_develop_trace(gc)("Protect memory on region level: "INT32_FORMAT, _mem_protection_level); 133 print(tty); 134 */ 135 MutexLockerEx ml(&_mem_protect_lock, true); 136 assert(_mem_protection_level >= 1, "invariant"); 137 138 if (--_mem_protection_level == 0) { 139 if (ShenandoahVerifyWritesToFromSpace) { 140 assert(! ShenandoahVerifyReadsToFromSpace, "can't verify from-space reads when verifying from-space writes"); 141 os::protect_memory((char*) bottom(), end() - bottom(), os::MEM_PROT_READ); 142 } else { 143 assert(ShenandoahVerifyReadsToFromSpace, "need to be verifying reads here"); 144 os::protect_memory((char*) bottom(), end() - bottom(), os::MEM_PROT_NONE); 145 } 146 } 147 } 148 149 void ShenandoahHeapRegion::memProtectionOff() { 150 /* 151 tty->print_cr("unprotect memory on region level: "INT32_FORMAT, _mem_protection_level); 152 print(tty); 153 */ 154 MutexLockerEx ml(&_mem_protect_lock, true); 155 assert(_mem_protection_level >= 0, "invariant"); 156 if (_mem_protection_level++ == 0) { 157 os::protect_memory((char*) bottom(), end() - bottom(), os::MEM_PROT_RW); 158 } 159 } 160 161 #endif 162 163 void ShenandoahHeapRegion::print_on(outputStream* st) const { 164 st->print("ShenandoahHeapRegion: "PTR_FORMAT"/"SIZE_FORMAT, p2i(this), _region_number); 165 166 if (in_collection_set()) 167 st->print("C"); 168 if (is_humongous_start()) { 169 st->print("H"); 170 } 171 if (is_humongous_continuation()) { 172 st->print("h"); 173 } 174 //else 175 st->print(" "); 176 177 st->print_cr("live = "SIZE_FORMAT" garbage = "SIZE_FORMAT" bottom = "PTR_FORMAT" end = "PTR_FORMAT" top = "PTR_FORMAT, 178 get_live_data_bytes(), garbage(), p2i(bottom()), p2i(end()), p2i(top())); 179 } 180 181 182 void ShenandoahHeapRegion::object_iterate_interruptible(ObjectClosure* blk, bool allow_cancel) { 183 HeapWord* p = bottom() + BrooksPointer::word_size(); 184 while (p < top() && !(allow_cancel && _heap->cancelled_concgc())) { 185 blk->do_object(oop(p)); 186 #ifdef ASSERT 187 if (ShenandoahVerifyReadsToFromSpace) { 188 memProtectionOff(); 189 p += oop(p)->size() + BrooksPointer::word_size(); 190 memProtectionOn(); 191 } else { 192 p += oop(p)->size() + BrooksPointer::word_size(); 193 } 194 #else 195 p += oop(p)->size() + BrooksPointer::word_size(); 196 #endif 197 } 198 } 199 200 HeapWord* ShenandoahHeapRegion::object_iterate_careful(ObjectClosureCareful* blk) { 201 HeapWord * limit = concurrent_iteration_safe_limit(); 202 assert(limit <= top(), "sanity check"); 203 for (HeapWord* p = bottom() + BrooksPointer::word_size(); p < limit;) { 204 size_t size = blk->do_object_careful(oop(p)); 205 if (size == 0) { 206 return p; // failed at p 207 } else { 208 p += size + BrooksPointer::word_size(); 209 } 210 } 211 return NULL; // all done 212 } 213 214 void ShenandoahHeapRegion::fill_region() { 215 ShenandoahHeap* sh = (ShenandoahHeap*) Universe::heap(); 216 217 if (free() > (BrooksPointer::word_size() + CollectedHeap::min_fill_size())) { 218 HeapWord* filler = allocate(BrooksPointer::word_size()); 219 HeapWord* obj = allocate(end() - top()); 220 sh->fill_with_object(obj, end() - obj); 221 BrooksPointer::initialize(oop(obj)); 222 } 223 } 224 225 void ShenandoahHeapRegion::set_humongous_start(bool start) { 226 _humongous_start = start; 227 } 228 229 void ShenandoahHeapRegion::set_humongous_continuation(bool continuation) { 230 _humongous_continuation = continuation; 231 } 232 233 bool ShenandoahHeapRegion::is_humongous() const { 234 return _humongous_start || _humongous_continuation; 235 } 236 237 bool ShenandoahHeapRegion::is_humongous_start() const { 238 return _humongous_start; 239 } 240 241 bool ShenandoahHeapRegion::is_humongous_continuation() const { 242 return _humongous_continuation; 243 } 244 245 void ShenandoahHeapRegion::recycle() { 246 ContiguousSpace::initialize(reserved, true, false); 247 clear_live_data(); 248 _humongous_start = false; 249 _humongous_continuation = false; 250 _recycled = true; 251 set_in_collection_set(false); 252 // Reset C-TAMS pointer to ensure size-based iteration, everything 253 // in that regions is going to be new objects. 254 _heap->set_complete_top_at_mark_start(bottom(), bottom()); 255 // We can only safely reset the C-TAMS pointer if the bitmap is clear for that region. 256 assert(_heap->is_complete_bitmap_clear_range(bottom(), end()), "must be clear"); 257 } 258 259 HeapWord* ShenandoahHeapRegion::block_start_const(const void* p) const { 260 assert(MemRegion(bottom(), end()).contains(p), 261 "p ("PTR_FORMAT") not in space ["PTR_FORMAT", "PTR_FORMAT")", 262 p2i(p), p2i(bottom()), p2i(end())); 263 if (p >= top()) { 264 return top(); 265 } else { 266 HeapWord* last = bottom() + BrooksPointer::word_size(); 267 HeapWord* cur = last; 268 while (cur <= p) { 269 last = cur; 270 cur += oop(cur)->size() + BrooksPointer::word_size(); 271 } 272 assert(oop(last)->is_oop(), 273 PTR_FORMAT" should be an object start", p2i(last)); 274 return last; 275 } 276 } 277 278 void ShenandoahHeapRegion::setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size) { 279 uintx region_size; 280 if (FLAG_IS_DEFAULT(ShenandoahHeapRegionSize)) { 281 if (ShenandoahMinRegionSize > initial_heap_size / MIN_NUM_REGIONS) { 282 vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option"); 283 } 284 if (ShenandoahMinRegionSize < MIN_REGION_SIZE) { 285 vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option"); 286 } 287 if (ShenandoahMinRegionSize < MinTLABSize) { 288 vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option"); 289 } 290 if (ShenandoahMaxRegionSize < MIN_REGION_SIZE) { 291 vm_exit_during_initialization("Invalid -XX:ShenandoahMaxRegionSize option"); 292 } 293 if (ShenandoahMinRegionSize > ShenandoahMaxRegionSize) { 294 vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize or -XX:ShenandoahMaxRegionSize"); 295 } 296 size_t average_heap_size = (initial_heap_size + max_heap_size) / 2; 297 region_size = MAX2(average_heap_size / ShenandoahTargetNumRegions, 298 ShenandoahMinRegionSize); 299 300 // Now make sure that we don't go over or under our limits. 301 region_size = MAX2(ShenandoahMinRegionSize, region_size); 302 region_size = MIN2(ShenandoahMaxRegionSize, region_size); 303 304 } else { 305 if (ShenandoahHeapRegionSize > initial_heap_size / MIN_NUM_REGIONS) { 306 vm_exit_during_initialization("Invalid -XX:ShenandoahHeapRegionSize option"); 307 } 308 if (ShenandoahHeapRegionSize < MIN_REGION_SIZE) { 309 vm_exit_during_initialization("Invalid -XX:ShenandoahHeapRegionSize option"); 310 } 311 region_size = ShenandoahHeapRegionSize; 312 } 313 314 // Make sure region size is at least one large page, if enabled. 315 // Otherwise, mem-protecting one region may falsely protect the adjacent 316 // regions too. 317 if (UseLargePages) { 318 region_size = MAX2(region_size, os::large_page_size()); 319 } 320 321 int region_size_log = log2_long((jlong) region_size); 322 // Recalculate the region size to make sure it's a power of 323 // 2. This means that region_size is the largest power of 2 that's 324 // <= what we've calculated so far. 325 region_size = ((uintx)1 << region_size_log); 326 327 // Now, set up the globals. 328 guarantee(RegionSizeShift == 0, "we should only set it once"); 329 RegionSizeShift = region_size_log; 330 331 guarantee(RegionSizeBytes == 0, "we should only set it once"); 332 RegionSizeBytes = (size_t)region_size; 333 334 log_info(gc, heap)("Heap region size: " SIZE_FORMAT "M", RegionSizeBytes / M); 335 log_info(gc, init)("Region size in bytes: "SIZE_FORMAT, RegionSizeBytes); 336 log_info(gc, init)("Region size shift: "SIZE_FORMAT, RegionSizeShift); 337 log_info(gc, init)("Initial number of regions: "SIZE_FORMAT, initial_heap_size / RegionSizeBytes); 338 log_info(gc, init)("Maximum number of regions: "SIZE_FORMAT, max_heap_size / RegionSizeBytes); 339 } 340 341 CompactibleSpace* ShenandoahHeapRegion::next_compaction_space() const { 342 return _heap->next_compaction_region(this); 343 } 344 345 void ShenandoahHeapRegion::prepare_for_compaction(CompactPoint* cp) { 346 scan_and_forward(this, cp); 347 } 348 349 void ShenandoahHeapRegion::adjust_pointers() { 350 // Check first is there is any work to do. 351 if (used() == 0) { 352 return; // Nothing to do. 353 } 354 355 scan_and_adjust_pointers(this); 356 } 357 358 void ShenandoahHeapRegion::compact() { 359 assert(!is_humongous(), "Shouldn't be compacting humongous regions"); 360 scan_and_compact(this); 361 } 362 363 void ShenandoahHeapRegion::pin() { 364 assert(! SafepointSynchronize::is_at_safepoint(), "only outside safepoints"); 365 assert(_critical_pins >= 0, "sanity"); 366 Atomic::inc(&_critical_pins); 367 } 368 369 void ShenandoahHeapRegion::unpin() { 370 assert(! SafepointSynchronize::is_at_safepoint(), "only outside safepoints"); 371 Atomic::dec(&_critical_pins); 372 assert(_critical_pins >= 0, "sanity"); 373 } 374 375 bool ShenandoahHeapRegion::is_pinned() { 376 jint v = OrderAccess::load_acquire(&_critical_pins); 377 assert(v >= 0, "sanity"); 378 return v > 0; 379 }