1 /* 2 * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_G1_HEAPREGION_INLINE_HPP 26 #define SHARE_VM_GC_G1_HEAPREGION_INLINE_HPP 27 28 #include "gc/g1/g1BlockOffsetTable.inline.hpp" 29 #include "gc/g1/g1CollectedHeap.inline.hpp" 30 #include "gc/g1/heapRegion.hpp" 31 #include "gc/shared/space.hpp" 32 #include "oops/oop.inline.hpp" 33 #include "runtime/atomic.inline.hpp" 34 35 inline HeapWord* G1OffsetTableContigSpace::allocate_impl(size_t min_word_size, 36 size_t desired_word_size, 37 size_t* actual_size) { 38 HeapWord* obj = top(); 39 size_t available = pointer_delta(end(), obj); 40 size_t want_to_allocate = MIN2(available, desired_word_size); 41 if (want_to_allocate >= min_word_size) { 42 HeapWord* new_top = obj + want_to_allocate; 43 set_top(new_top); 44 assert(is_aligned(obj) && is_aligned(new_top), "checking alignment"); 45 *actual_size = want_to_allocate; 46 return obj; 47 } else { 48 return NULL; 49 } 50 } 51 52 inline HeapWord* G1OffsetTableContigSpace::par_allocate_impl(size_t min_word_size, 53 size_t desired_word_size, 54 size_t* actual_size) { 55 do { 56 HeapWord* obj = top(); 57 size_t available = pointer_delta(end(), obj); 58 size_t want_to_allocate = MIN2(available, desired_word_size); 59 if (want_to_allocate >= min_word_size) { 60 HeapWord* new_top = obj + want_to_allocate; 61 HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj); 62 // result can be one of two: 63 // the old top value: the exchange succeeded 64 // otherwise: the new value of the top is returned. 65 if (result == obj) { 66 assert(is_aligned(obj) && is_aligned(new_top), "checking alignment"); 67 *actual_size = want_to_allocate; 68 return obj; 69 } 70 } else { 71 return NULL; 72 } 73 } while (true); 74 } 75 76 inline HeapWord* G1OffsetTableContigSpace::allocate(size_t min_word_size, 77 size_t desired_word_size, 78 size_t* actual_size) { 79 HeapWord* res = allocate_impl(min_word_size, desired_word_size, actual_size); 80 if (res != NULL) { 81 _offsets.alloc_block(res, *actual_size); 82 } 83 return res; 84 } 85 86 inline HeapWord* G1OffsetTableContigSpace::allocate(size_t word_size) { 87 size_t temp; 88 return allocate(word_size, word_size, &temp); 89 } 90 91 inline HeapWord* G1OffsetTableContigSpace::par_allocate(size_t word_size) { 92 size_t temp; 93 return par_allocate(word_size, word_size, &temp); 94 } 95 96 // Because of the requirement of keeping "_offsets" up to date with the 97 // allocations, we sequentialize these with a lock. Therefore, best if 98 // this is used for larger LAB allocations only. 99 inline HeapWord* G1OffsetTableContigSpace::par_allocate(size_t min_word_size, 100 size_t desired_word_size, 101 size_t* actual_size) { 102 MutexLocker x(&_par_alloc_lock); 103 return allocate(min_word_size, desired_word_size, actual_size); 104 } 105 106 inline HeapWord* G1OffsetTableContigSpace::block_start(const void* p) { 107 return _offsets.block_start(p); 108 } 109 110 inline HeapWord* 111 G1OffsetTableContigSpace::block_start_const(const void* p) const { 112 return _offsets.block_start_const(p); 113 } 114 115 inline bool 116 HeapRegion::block_is_obj(const HeapWord* p) const { 117 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 118 119 if (!this->is_in(p)) { 120 HeapRegion* hr = g1h->heap_region_containing(p); 121 #ifdef ASSERT 122 assert(hr->is_humongous(), "This case can only happen for humongous regions"); 123 oop obj = oop(hr->humongous_start_region()->bottom()); 124 assert((HeapWord*)obj <= p, "p must be in humongous object"); 125 assert(p <= (HeapWord*)obj + obj->size(), "p must be in humongous object"); 126 #endif 127 return hr->block_is_obj(p); 128 } 129 if (ClassUnloadingWithConcurrentMark) { 130 return !g1h->is_obj_dead(oop(p), this); 131 } 132 return p < top(); 133 } 134 135 inline size_t 136 HeapRegion::block_size(const HeapWord *addr) const { 137 if (addr == top()) { 138 return pointer_delta(end(), addr); 139 } 140 141 if (block_is_obj(addr)) { 142 return oop(addr)->size(); 143 } 144 145 assert(ClassUnloadingWithConcurrentMark, 146 "All blocks should be objects if G1 Class Unloading isn't used. " 147 "HR: [" PTR_FORMAT ", " PTR_FORMAT ", " PTR_FORMAT ") " 148 "addr: " PTR_FORMAT, 149 p2i(bottom()), p2i(top()), p2i(end()), p2i(addr)); 150 151 // Old regions' dead objects may have dead classes 152 // We need to find the next live object in some other 153 // manner than getting the oop size 154 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 155 HeapWord* next = g1h->concurrent_mark()->prevMarkBitMap()-> 156 getNextMarkedWordAddress(addr, prev_top_at_mark_start()); 157 158 assert(next > addr, "must get the next live object"); 159 return pointer_delta(next, addr); 160 } 161 162 inline HeapWord* HeapRegion::par_allocate_no_bot_updates(size_t min_word_size, 163 size_t desired_word_size, 164 size_t* actual_word_size) { 165 assert(is_young(), "we can only skip BOT updates on young regions"); 166 return par_allocate_impl(min_word_size, desired_word_size, actual_word_size); 167 } 168 169 inline HeapWord* HeapRegion::allocate_no_bot_updates(size_t word_size) { 170 size_t temp; 171 return allocate_no_bot_updates(word_size, word_size, &temp); 172 } 173 174 inline HeapWord* HeapRegion::allocate_no_bot_updates(size_t min_word_size, 175 size_t desired_word_size, 176 size_t* actual_word_size) { 177 assert(is_young(), "we can only skip BOT updates on young regions"); 178 return allocate_impl(min_word_size, desired_word_size, actual_word_size); 179 } 180 181 inline void HeapRegion::note_start_of_marking() { 182 _next_marked_bytes = 0; 183 _next_top_at_mark_start = top(); 184 } 185 186 inline void HeapRegion::note_end_of_marking() { 187 _prev_top_at_mark_start = _next_top_at_mark_start; 188 _prev_marked_bytes = _next_marked_bytes; 189 _next_marked_bytes = 0; 190 } 191 192 inline void HeapRegion::note_start_of_copying(bool during_initial_mark) { 193 if (is_survivor()) { 194 // This is how we always allocate survivors. 195 assert(_next_top_at_mark_start == bottom(), "invariant"); 196 } else { 197 if (during_initial_mark) { 198 // During initial-mark we'll explicitly mark any objects on old 199 // regions that are pointed to by roots. Given that explicit 200 // marks only make sense under NTAMS it'd be nice if we could 201 // check that condition if we wanted to. Given that we don't 202 // know where the top of this region will end up, we simply set 203 // NTAMS to the end of the region so all marks will be below 204 // NTAMS. We'll set it to the actual top when we retire this region. 205 _next_top_at_mark_start = end(); 206 } else { 207 // We could have re-used this old region as to-space over a 208 // couple of GCs since the start of the concurrent marking 209 // cycle. This means that [bottom,NTAMS) will contain objects 210 // copied up to and including initial-mark and [NTAMS, top) 211 // will contain objects copied during the concurrent marking cycle. 212 assert(top() >= _next_top_at_mark_start, "invariant"); 213 } 214 } 215 } 216 217 inline void HeapRegion::note_end_of_copying(bool during_initial_mark) { 218 if (is_survivor()) { 219 // This is how we always allocate survivors. 220 assert(_next_top_at_mark_start == bottom(), "invariant"); 221 } else { 222 if (during_initial_mark) { 223 // See the comment for note_start_of_copying() for the details 224 // on this. 225 assert(_next_top_at_mark_start == end(), "pre-condition"); 226 _next_top_at_mark_start = top(); 227 } else { 228 // See the comment for note_start_of_copying() for the details 229 // on this. 230 assert(top() >= _next_top_at_mark_start, "invariant"); 231 } 232 } 233 } 234 235 inline bool HeapRegion::in_collection_set() const { 236 return G1CollectedHeap::heap()->is_in_cset(this); 237 } 238 239 inline HeapRegion* HeapRegion::next_in_collection_set() const { 240 assert(in_collection_set(), "should only invoke on member of CS."); 241 assert(_next_in_special_set == NULL || 242 _next_in_special_set->in_collection_set(), 243 "Malformed CS."); 244 return _next_in_special_set; 245 } 246 247 void HeapRegion::set_next_in_collection_set(HeapRegion* r) { 248 assert(in_collection_set(), "should only invoke on member of CS."); 249 assert(r == NULL || r->in_collection_set(), "Malformed CS."); 250 _next_in_special_set = r; 251 } 252 253 #endif // SHARE_VM_GC_G1_HEAPREGION_INLINE_HPP