1 /* 2 * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_G1_HEAPREGION_INLINE_HPP 26 #define SHARE_VM_GC_G1_HEAPREGION_INLINE_HPP 27 28 #include "gc/g1/g1BlockOffsetTable.inline.hpp" 29 #include "gc/g1/g1CollectedHeap.inline.hpp" 30 #include "gc/g1/heapRegion.hpp" 31 #include "gc/shared/space.hpp" 32 #include "oops/oop.inline.hpp" 33 #include "runtime/atomic.hpp" 34 #include "utilities/align.hpp" 35 36 inline HeapWord* G1ContiguousSpace::allocate_impl(size_t min_word_size, 37 size_t desired_word_size, 38 size_t* actual_size) { 39 HeapWord* obj = top(); 40 size_t available = pointer_delta(end(), obj); 41 size_t want_to_allocate = MIN2(available, desired_word_size); 42 if (want_to_allocate >= min_word_size) { 43 HeapWord* new_top = obj + want_to_allocate; 44 set_top(new_top); 45 assert(is_aligned(obj) && is_aligned(new_top), "checking alignment"); 46 *actual_size = want_to_allocate; 47 return obj; 48 } else { 49 return NULL; 50 } 51 } 52 53 inline HeapWord* G1ContiguousSpace::par_allocate_impl(size_t min_word_size, 54 size_t desired_word_size, 55 size_t* actual_size) { 56 do { 57 HeapWord* obj = top(); 58 size_t available = pointer_delta(end(), obj); 59 size_t want_to_allocate = MIN2(available, desired_word_size); 60 if (want_to_allocate >= min_word_size) { 61 HeapWord* new_top = obj + want_to_allocate; 62 HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj); 63 // result can be one of two: 64 // the old top value: the exchange succeeded 65 // otherwise: the new value of the top is returned. 66 if (result == obj) { 67 assert(is_aligned(obj) && is_aligned(new_top), "checking alignment"); 68 *actual_size = want_to_allocate; 69 return obj; 70 } 71 } else { 72 return NULL; 73 } 74 } while (true); 75 } 76 77 inline HeapWord* G1ContiguousSpace::allocate(size_t min_word_size, 78 size_t desired_word_size, 79 size_t* actual_size) { 80 HeapWord* res = allocate_impl(min_word_size, desired_word_size, actual_size); 81 if (res != NULL) { 82 _bot_part.alloc_block(res, *actual_size); 83 } 84 return res; 85 } 86 87 inline HeapWord* G1ContiguousSpace::allocate(size_t word_size) { 88 size_t temp; 89 return allocate(word_size, word_size, &temp); 90 } 91 92 inline HeapWord* G1ContiguousSpace::par_allocate(size_t word_size) { 93 size_t temp; 94 return par_allocate(word_size, word_size, &temp); 95 } 96 97 // Because of the requirement of keeping "_offsets" up to date with the 98 // allocations, we sequentialize these with a lock. Therefore, best if 99 // this is used for larger LAB allocations only. 100 inline HeapWord* G1ContiguousSpace::par_allocate(size_t min_word_size, 101 size_t desired_word_size, 102 size_t* actual_size) { 103 MutexLocker x(&_par_alloc_lock); 104 return allocate(min_word_size, desired_word_size, actual_size); 105 } 106 107 inline HeapWord* G1ContiguousSpace::block_start(const void* p) { 108 return _bot_part.block_start(p); 109 } 110 111 inline HeapWord* 112 G1ContiguousSpace::block_start_const(const void* p) const { 113 return _bot_part.block_start_const(p); 114 } 115 116 inline bool HeapRegion::is_obj_dead_with_size(const oop obj, G1CMBitMapRO* prev_bitmap, size_t* size) const { 117 HeapWord* addr = (HeapWord*) obj; 118 119 assert(addr < top(), "must be"); 120 assert(!is_closed_archive(), "Archive regions should not have references into interesting regions."); 121 assert(!is_humongous(), "Humongous objects not handled here"); 122 bool obj_is_dead = is_obj_dead(obj, prev_bitmap); 123 124 if (ClassUnloadingWithConcurrentMark && obj_is_dead) { 125 assert(!block_is_obj(addr) || is_open_archive(), "must be"); 126 *size = block_size_using_bitmap(addr, prev_bitmap); 127 } else { 128 assert(block_is_obj(addr), "must be"); 129 *size = obj->size(); 130 } 131 return obj_is_dead; 132 } 133 134 inline bool 135 HeapRegion::block_is_obj(const HeapWord* p) const { 136 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 137 138 if (!this->is_in(p)) { 139 assert(is_continues_humongous(), "This case can only happen for humongous regions"); 140 return (p == humongous_start_region()->bottom()); 141 } 142 if (ClassUnloadingWithConcurrentMark) { 143 return !g1h->is_obj_dead(oop(p), this); 144 } 145 return p < top(); 146 } 147 148 inline size_t HeapRegion::block_size_using_bitmap(const HeapWord* addr, const G1CMBitMapRO* prev_bitmap) const { 149 assert(ClassUnloadingWithConcurrentMark, 150 "All blocks should be objects if class unloading isn't used, so this method should not be called. " 151 "HR: [" PTR_FORMAT ", " PTR_FORMAT ", " PTR_FORMAT ") " 152 "addr: " PTR_FORMAT, 153 p2i(bottom()), p2i(top()), p2i(end()), p2i(addr)); 154 155 // Old regions' dead objects may have dead classes 156 // We need to find the next live object using the bitmap 157 HeapWord* next = prev_bitmap->getNextMarkedWordAddress(addr, prev_top_at_mark_start()); 158 159 assert(next > addr, "must get the next live object"); 160 return pointer_delta(next, addr); 161 } 162 163 inline bool HeapRegion::is_obj_dead(const oop obj, const G1CMBitMapRO* prev_bitmap) const { 164 assert(is_in_reserved(obj), "Object " PTR_FORMAT " must be in region", p2i(obj)); 165 return !obj_allocated_since_prev_marking(obj) && !prev_bitmap->isMarked((HeapWord*)obj); 166 } 167 168 inline size_t HeapRegion::block_size(const HeapWord *addr) const { 169 if (addr == top()) { 170 return pointer_delta(end(), addr); 171 } 172 173 if (block_is_obj(addr)) { 174 return oop(addr)->size(); 175 } 176 177 return block_size_using_bitmap(addr, G1CollectedHeap::heap()->concurrent_mark()->prevMarkBitMap()); 178 } 179 180 inline HeapWord* HeapRegion::par_allocate_no_bot_updates(size_t min_word_size, 181 size_t desired_word_size, 182 size_t* actual_word_size) { 183 assert(is_young(), "we can only skip BOT updates on young regions"); 184 return par_allocate_impl(min_word_size, desired_word_size, actual_word_size); 185 } 186 187 inline HeapWord* HeapRegion::allocate_no_bot_updates(size_t word_size) { 188 size_t temp; 189 return allocate_no_bot_updates(word_size, word_size, &temp); 190 } 191 192 inline HeapWord* HeapRegion::allocate_no_bot_updates(size_t min_word_size, 193 size_t desired_word_size, 194 size_t* actual_word_size) { 195 assert(is_young(), "we can only skip BOT updates on young regions"); 196 return allocate_impl(min_word_size, desired_word_size, actual_word_size); 197 } 198 199 inline void HeapRegion::note_start_of_marking() { 200 _next_marked_bytes = 0; 201 _next_top_at_mark_start = top(); 202 } 203 204 inline void HeapRegion::note_end_of_marking() { 205 _prev_top_at_mark_start = _next_top_at_mark_start; 206 _prev_marked_bytes = _next_marked_bytes; 207 _next_marked_bytes = 0; 208 } 209 210 inline void HeapRegion::note_start_of_copying(bool during_initial_mark) { 211 if (is_survivor()) { 212 // This is how we always allocate survivors. 213 assert(_next_top_at_mark_start == bottom(), "invariant"); 214 } else { 215 if (during_initial_mark) { 216 // During initial-mark we'll explicitly mark any objects on old 217 // regions that are pointed to by roots. Given that explicit 218 // marks only make sense under NTAMS it'd be nice if we could 219 // check that condition if we wanted to. Given that we don't 220 // know where the top of this region will end up, we simply set 221 // NTAMS to the end of the region so all marks will be below 222 // NTAMS. We'll set it to the actual top when we retire this region. 223 _next_top_at_mark_start = end(); 224 } else { 225 // We could have re-used this old region as to-space over a 226 // couple of GCs since the start of the concurrent marking 227 // cycle. This means that [bottom,NTAMS) will contain objects 228 // copied up to and including initial-mark and [NTAMS, top) 229 // will contain objects copied during the concurrent marking cycle. 230 assert(top() >= _next_top_at_mark_start, "invariant"); 231 } 232 } 233 } 234 235 inline void HeapRegion::note_end_of_copying(bool during_initial_mark) { 236 if (is_survivor()) { 237 // This is how we always allocate survivors. 238 assert(_next_top_at_mark_start == bottom(), "invariant"); 239 } else { 240 if (during_initial_mark) { 241 // See the comment for note_start_of_copying() for the details 242 // on this. 243 assert(_next_top_at_mark_start == end(), "pre-condition"); 244 _next_top_at_mark_start = top(); 245 } else { 246 // See the comment for note_start_of_copying() for the details 247 // on this. 248 assert(top() >= _next_top_at_mark_start, "invariant"); 249 } 250 } 251 } 252 253 inline bool HeapRegion::in_collection_set() const { 254 return G1CollectedHeap::heap()->is_in_cset(this); 255 } 256 257 template <class Closure, bool is_gc_active> 258 bool HeapRegion::do_oops_on_card_in_humongous(MemRegion mr, 259 Closure* cl, 260 G1CollectedHeap* g1h) { 261 assert(is_humongous(), "precondition"); 262 HeapRegion* sr = humongous_start_region(); 263 oop obj = oop(sr->bottom()); 264 265 // If concurrent and klass_or_null is NULL, then space has been 266 // allocated but the object has not yet been published by setting 267 // the klass. That can only happen if the card is stale. However, 268 // we've already set the card clean, so we must return failure, 269 // since the allocating thread could have performed a write to the 270 // card that might be missed otherwise. 271 if (!is_gc_active && (obj->klass_or_null_acquire() == NULL)) { 272 return false; 273 } 274 275 // We have a well-formed humongous object at the start of sr. 276 // Only filler objects follow a humongous object in the containing 277 // regions, and we can ignore those. So only process the one 278 // humongous object. 279 if (!g1h->is_obj_dead(obj, sr)) { 280 if (obj->is_objArray() || (sr->bottom() < mr.start())) { 281 // objArrays are always marked precisely, so limit processing 282 // with mr. Non-objArrays might be precisely marked, and since 283 // it's humongous it's worthwhile avoiding full processing. 284 // However, the card could be stale and only cover filler 285 // objects. That should be rare, so not worth checking for; 286 // instead let it fall out from the bounded iteration. 287 obj->oop_iterate(cl, mr); 288 } else { 289 // If obj is not an objArray and mr contains the start of the 290 // obj, then this could be an imprecise mark, and we need to 291 // process the entire object. 292 obj->oop_iterate(cl); 293 } 294 } 295 return true; 296 } 297 298 template <bool is_gc_active, class Closure> 299 bool HeapRegion::oops_on_card_seq_iterate_careful(MemRegion mr, 300 Closure* cl) { 301 assert(MemRegion(bottom(), end()).contains(mr), "Card region not in heap region"); 302 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 303 304 // Special handling for humongous regions. 305 if (is_humongous()) { 306 return do_oops_on_card_in_humongous<Closure, is_gc_active>(mr, cl, g1h); 307 } 308 assert(is_old(), "precondition"); 309 310 // Because mr has been trimmed to what's been allocated in this 311 // region, the parts of the heap that are examined here are always 312 // parsable; there's no need to use klass_or_null to detect 313 // in-progress allocation. 314 315 // Cache the boundaries of the memory region in some const locals 316 HeapWord* const start = mr.start(); 317 HeapWord* const end = mr.end(); 318 319 // Find the obj that extends onto mr.start(). 320 // Update BOT as needed while finding start of (possibly dead) 321 // object containing the start of the region. 322 HeapWord* cur = block_start(start); 323 324 #ifdef ASSERT 325 { 326 assert(cur <= start, 327 "cur: " PTR_FORMAT ", start: " PTR_FORMAT, p2i(cur), p2i(start)); 328 HeapWord* next = cur + block_size(cur); 329 assert(start < next, 330 "start: " PTR_FORMAT ", next: " PTR_FORMAT, p2i(start), p2i(next)); 331 } 332 #endif 333 334 G1CMBitMapRO* bitmap = g1h->concurrent_mark()->prevMarkBitMap(); 335 do { 336 oop obj = oop(cur); 337 assert(obj->is_oop(true), "Not an oop at " PTR_FORMAT, p2i(cur)); 338 assert(obj->klass_or_null() != NULL, 339 "Unparsable heap at " PTR_FORMAT, p2i(cur)); 340 341 size_t size; 342 bool is_dead = is_obj_dead_with_size(obj, bitmap, &size); 343 344 cur += size; 345 if (!is_dead) { 346 // Process live object's references. 347 348 // Non-objArrays are usually marked imprecise at the object 349 // start, in which case we need to iterate over them in full. 350 // objArrays are precisely marked, but can still be iterated 351 // over in full if completely covered. 352 if (!obj->is_objArray() || (((HeapWord*)obj) >= start && cur <= end)) { 353 obj->oop_iterate(cl); 354 } else { 355 obj->oop_iterate(cl, mr); 356 } 357 } 358 } while (cur < end); 359 360 return true; 361 } 362 363 #endif // SHARE_VM_GC_G1_HEAPREGION_INLINE_HPP