1 /* 2 * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_GC_G1_HEAPREGION_INLINE_HPP 26 #define SHARE_GC_G1_HEAPREGION_INLINE_HPP 27 28 #include "gc/g1/g1BlockOffsetTable.inline.hpp" 29 #include "gc/g1/g1CollectedHeap.inline.hpp" 30 #include "gc/g1/g1ConcurrentMarkBitMap.inline.hpp" 31 #include "gc/g1/heapRegion.hpp" 32 #include "oops/oop.inline.hpp" 33 #include "runtime/atomic.hpp" 34 #include "runtime/prefetch.inline.hpp" 35 #include "utilities/align.hpp" 36 #include "utilities/globalDefinitions.hpp" 37 38 inline HeapWord* HeapRegion::allocate_impl(size_t min_word_size, 39 size_t desired_word_size, 40 size_t* actual_size) { 41 HeapWord* obj = top(); 42 size_t available = pointer_delta(end(), obj); 43 size_t want_to_allocate = MIN2(available, desired_word_size); 44 if (want_to_allocate >= min_word_size) { 45 HeapWord* new_top = obj + want_to_allocate; 46 set_top(new_top); 47 assert(is_object_aligned(obj) && is_object_aligned(new_top), "checking alignment"); 48 *actual_size = want_to_allocate; 49 return obj; 50 } else { 51 return NULL; 52 } 53 } 54 55 inline HeapWord* HeapRegion::par_allocate_impl(size_t min_word_size, 56 size_t desired_word_size, 57 size_t* actual_size) { 58 do { 59 HeapWord* obj = top(); 60 size_t available = pointer_delta(end(), obj); 61 size_t want_to_allocate = MIN2(available, desired_word_size); 62 if (want_to_allocate >= min_word_size) { 63 HeapWord* new_top = obj + want_to_allocate; 64 HeapWord* result = Atomic::cmpxchg(new_top, &_top, obj); 65 // result can be one of two: 66 // the old top value: the exchange succeeded 67 // otherwise: the new value of the top is returned. 68 if (result == obj) { 69 assert(is_object_aligned(obj) && is_object_aligned(new_top), "checking alignment"); 70 *actual_size = want_to_allocate; 71 return obj; 72 } 73 } else { 74 return NULL; 75 } 76 } while (true); 77 } 78 79 inline HeapWord* HeapRegion::allocate(size_t min_word_size, 80 size_t desired_word_size, 81 size_t* actual_size) { 82 HeapWord* res = allocate_impl(min_word_size, desired_word_size, actual_size); 83 if (res != NULL) { 84 _bot_part.alloc_block(res, *actual_size); 85 } 86 return res; 87 } 88 89 inline HeapWord* HeapRegion::allocate(size_t word_size) { 90 size_t temp; 91 return allocate(word_size, word_size, &temp); 92 } 93 94 inline HeapWord* HeapRegion::par_allocate(size_t word_size) { 95 size_t temp; 96 return par_allocate(word_size, word_size, &temp); 97 } 98 99 // Because of the requirement of keeping "_offsets" up to date with the 100 // allocations, we sequentialize these with a lock. Therefore, best if 101 // this is used for larger LAB allocations only. 102 inline HeapWord* HeapRegion::par_allocate(size_t min_word_size, 103 size_t desired_word_size, 104 size_t* actual_size) { 105 MutexLocker x(&_par_alloc_lock); 106 return allocate(min_word_size, desired_word_size, actual_size); 107 } 108 109 inline HeapWord* HeapRegion::block_start(const void* p) { 110 return _bot_part.block_start(p); 111 } 112 113 inline HeapWord* HeapRegion::block_start_const(const void* p) const { 114 return _bot_part.block_start_const(p); 115 } 116 117 inline bool HeapRegion::is_obj_dead_with_size(const oop obj, const G1CMBitMap* const prev_bitmap, size_t* size) const { 118 HeapWord* addr = (HeapWord*) obj; 119 120 assert(addr < top(), "must be"); 121 assert(!is_closed_archive(), 122 "Closed archive regions should not have references into other regions"); 123 assert(!is_humongous(), "Humongous objects not handled here"); 124 bool obj_is_dead = is_obj_dead(obj, prev_bitmap); 125 126 if (ClassUnloadingWithConcurrentMark && obj_is_dead) { 127 assert(!block_is_obj(addr), "must be"); 128 *size = block_size_using_bitmap(addr, prev_bitmap); 129 } else { 130 assert(block_is_obj(addr), "must be"); 131 *size = obj->size(); 132 } 133 return obj_is_dead; 134 } 135 136 inline bool HeapRegion::block_is_obj(const HeapWord* p) const { 137 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 138 139 if (!this->is_in(p)) { 140 assert(is_continues_humongous(), "This case can only happen for humongous regions"); 141 return (p == humongous_start_region()->bottom()); 142 } 143 if (ClassUnloadingWithConcurrentMark) { 144 return !g1h->is_obj_dead(oop(p), this); 145 } 146 return p < top(); 147 } 148 149 inline size_t HeapRegion::block_size_using_bitmap(const HeapWord* addr, const G1CMBitMap* const prev_bitmap) const { 150 assert(ClassUnloadingWithConcurrentMark, 151 "All blocks should be objects if class unloading isn't used, so this method should not be called. " 152 "HR: [" PTR_FORMAT ", " PTR_FORMAT ", " PTR_FORMAT ") " 153 "addr: " PTR_FORMAT, 154 p2i(bottom()), p2i(top()), p2i(end()), p2i(addr)); 155 156 // Old regions' dead objects may have dead classes 157 // We need to find the next live object using the bitmap 158 HeapWord* next = prev_bitmap->get_next_marked_addr(addr, prev_top_at_mark_start()); 159 160 assert(next > addr, "must get the next live object"); 161 return pointer_delta(next, addr); 162 } 163 164 inline bool HeapRegion::is_obj_dead(const oop obj, const G1CMBitMap* const prev_bitmap) const { 165 assert(is_in_reserved(obj), "Object " PTR_FORMAT " must be in region", p2i(obj)); 166 return !obj_allocated_since_prev_marking(obj) && 167 !prev_bitmap->is_marked((HeapWord*)obj) && 168 !is_open_archive(); 169 } 170 171 inline size_t HeapRegion::block_size(const HeapWord *addr) const { 172 if (addr == top()) { 173 return pointer_delta(end(), addr); 174 } 175 176 if (block_is_obj(addr)) { 177 return oop(addr)->size(); 178 } 179 180 return block_size_using_bitmap(addr, G1CollectedHeap::heap()->concurrent_mark()->prev_mark_bitmap()); 181 } 182 183 inline void HeapRegion::complete_compaction() { 184 // Reset space and bot after compaction is complete if needed. 185 reset_after_compaction(); 186 if (is_empty()) { 187 reset_bot(); 188 } 189 190 // After a compaction the mark bitmap is invalid, so we must 191 // treat all objects as being inside the unmarked area. 192 zero_marked_bytes(); 193 init_top_at_mark_start(); 194 195 // Clear unused heap memory in debug builds. 196 if (ZapUnusedHeapArea) { 197 mangle_unused_area(); 198 } 199 } 200 201 template<typename ApplyToMarkedClosure> 202 inline void HeapRegion::apply_to_marked_objects(G1CMBitMap* bitmap, ApplyToMarkedClosure* closure) { 203 HeapWord* limit = top(); 204 HeapWord* next_addr = bottom(); 205 206 while (next_addr < limit) { 207 Prefetch::write(next_addr, PrefetchScanIntervalInBytes); 208 // This explicit is_marked check is a way to avoid 209 // some extra work done by get_next_marked_addr for 210 // the case where next_addr is marked. 211 if (bitmap->is_marked(next_addr)) { 212 oop current = oop(next_addr); 213 next_addr += closure->apply(current); 214 } else { 215 next_addr = bitmap->get_next_marked_addr(next_addr, limit); 216 } 217 } 218 219 assert(next_addr == limit, "Should stop the scan at the limit."); 220 } 221 222 inline HeapWord* HeapRegion::par_allocate_no_bot_updates(size_t min_word_size, 223 size_t desired_word_size, 224 size_t* actual_word_size) { 225 assert(is_young(), "we can only skip BOT updates on young regions"); 226 return par_allocate_impl(min_word_size, desired_word_size, actual_word_size); 227 } 228 229 inline HeapWord* HeapRegion::allocate_no_bot_updates(size_t word_size) { 230 size_t temp; 231 return allocate_no_bot_updates(word_size, word_size, &temp); 232 } 233 234 inline HeapWord* HeapRegion::allocate_no_bot_updates(size_t min_word_size, 235 size_t desired_word_size, 236 size_t* actual_word_size) { 237 assert(is_young(), "we can only skip BOT updates on young regions"); 238 return allocate_impl(min_word_size, desired_word_size, actual_word_size); 239 } 240 241 inline void HeapRegion::note_start_of_marking() { 242 _next_marked_bytes = 0; 243 _next_top_at_mark_start = top(); 244 } 245 246 inline void HeapRegion::note_end_of_marking() { 247 _prev_top_at_mark_start = _next_top_at_mark_start; 248 _next_top_at_mark_start = bottom(); 249 _prev_marked_bytes = _next_marked_bytes; 250 _next_marked_bytes = 0; 251 } 252 253 inline bool HeapRegion::in_collection_set() const { 254 return G1CollectedHeap::heap()->is_in_cset(this); 255 } 256 257 template <class Closure, bool is_gc_active> 258 HeapWord* HeapRegion::do_oops_on_memregion_in_humongous(MemRegion mr, 259 Closure* cl, 260 G1CollectedHeap* g1h) { 261 assert(is_humongous(), "precondition"); 262 HeapRegion* sr = humongous_start_region(); 263 oop obj = oop(sr->bottom()); 264 265 // If concurrent and klass_or_null is NULL, then space has been 266 // allocated but the object has not yet been published by setting 267 // the klass. That can only happen if the card is stale. However, 268 // we've already set the card clean, so we must return failure, 269 // since the allocating thread could have performed a write to the 270 // card that might be missed otherwise. 271 if (!is_gc_active && (obj->klass_or_null_acquire() == NULL)) { 272 return NULL; 273 } 274 275 // We have a well-formed humongous object at the start of sr. 276 // Only filler objects follow a humongous object in the containing 277 // regions, and we can ignore those. So only process the one 278 // humongous object. 279 if (g1h->is_obj_dead(obj, sr)) { 280 // The object is dead. There can be no other object in this region, so return 281 // the end of that region. 282 return end(); 283 } 284 if (obj->is_objArray() || (sr->bottom() < mr.start())) { 285 // objArrays are always marked precisely, so limit processing 286 // with mr. Non-objArrays might be precisely marked, and since 287 // it's humongous it's worthwhile avoiding full processing. 288 // However, the card could be stale and only cover filler 289 // objects. That should be rare, so not worth checking for; 290 // instead let it fall out from the bounded iteration. 291 obj->oop_iterate(cl, mr); 292 return mr.end(); 293 } else { 294 // If obj is not an objArray and mr contains the start of the 295 // obj, then this could be an imprecise mark, and we need to 296 // process the entire object. 297 int size = obj->oop_iterate_size(cl); 298 // We have scanned to the end of the object, but since there can be no objects 299 // after this humongous object in the region, we can return the end of the 300 // region if it is greater. 301 return MAX2((HeapWord*)obj + size, mr.end()); 302 } 303 } 304 305 template <bool is_gc_active, class Closure> 306 HeapWord* HeapRegion::oops_on_memregion_seq_iterate_careful(MemRegion mr, 307 Closure* cl) { 308 assert(MemRegion(bottom(), end()).contains(mr), "Card region not in heap region"); 309 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 310 311 // Special handling for humongous regions. 312 if (is_humongous()) { 313 return do_oops_on_memregion_in_humongous<Closure, is_gc_active>(mr, cl, g1h); 314 } 315 assert(is_old() || is_archive(), "Wrongly trying to iterate over region %u type %s", _hrm_index, get_type_str()); 316 317 // Because mr has been trimmed to what's been allocated in this 318 // region, the parts of the heap that are examined here are always 319 // parsable; there's no need to use klass_or_null to detect 320 // in-progress allocation. 321 322 // Cache the boundaries of the memory region in some const locals 323 HeapWord* const start = mr.start(); 324 HeapWord* const end = mr.end(); 325 326 // Find the obj that extends onto mr.start(). 327 // Update BOT as needed while finding start of (possibly dead) 328 // object containing the start of the region. 329 HeapWord* cur = block_start(start); 330 331 #ifdef ASSERT 332 { 333 assert(cur <= start, 334 "cur: " PTR_FORMAT ", start: " PTR_FORMAT, p2i(cur), p2i(start)); 335 HeapWord* next = cur + block_size(cur); 336 assert(start < next, 337 "start: " PTR_FORMAT ", next: " PTR_FORMAT, p2i(start), p2i(next)); 338 } 339 #endif 340 341 const G1CMBitMap* const bitmap = g1h->concurrent_mark()->prev_mark_bitmap(); 342 while (true) { 343 oop obj = oop(cur); 344 assert(oopDesc::is_oop(obj, true), "Not an oop at " PTR_FORMAT, p2i(cur)); 345 assert(obj->klass_or_null() != NULL, 346 "Unparsable heap at " PTR_FORMAT, p2i(cur)); 347 348 size_t size; 349 bool is_dead = is_obj_dead_with_size(obj, bitmap, &size); 350 bool is_precise = false; 351 352 cur += size; 353 if (!is_dead) { 354 // Process live object's references. 355 356 // Non-objArrays are usually marked imprecise at the object 357 // start, in which case we need to iterate over them in full. 358 // objArrays are precisely marked, but can still be iterated 359 // over in full if completely covered. 360 if (!obj->is_objArray() || (((HeapWord*)obj) >= start && cur <= end)) { 361 obj->oop_iterate(cl); 362 } else { 363 obj->oop_iterate(cl, mr); 364 is_precise = true; 365 } 366 } 367 if (cur >= end) { 368 return is_precise ? end : cur; 369 } 370 } 371 } 372 373 #endif // SHARE_GC_G1_HEAPREGION_INLINE_HPP