12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_GC_G1_HEAPREGION_INLINE_HPP
26 #define SHARE_GC_G1_HEAPREGION_INLINE_HPP
27
28 #include "gc/g1/g1BlockOffsetTable.inline.hpp"
29 #include "gc/g1/g1CollectedHeap.inline.hpp"
30 #include "gc/g1/g1ConcurrentMarkBitMap.inline.hpp"
31 #include "gc/g1/heapRegion.hpp"
32 #include "gc/shared/space.hpp"
33 #include "oops/oop.inline.hpp"
34 #include "runtime/atomic.hpp"
35 #include "runtime/prefetch.inline.hpp"
36 #include "utilities/align.hpp"
37
38 inline HeapWord* G1ContiguousSpace::allocate_impl(size_t min_word_size,
39 size_t desired_word_size,
40 size_t* actual_size) {
41 HeapWord* obj = top();
42 size_t available = pointer_delta(end(), obj);
43 size_t want_to_allocate = MIN2(available, desired_word_size);
44 if (want_to_allocate >= min_word_size) {
45 HeapWord* new_top = obj + want_to_allocate;
46 set_top(new_top);
47 assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
48 *actual_size = want_to_allocate;
49 return obj;
50 } else {
51 return NULL;
52 }
53 }
54
55 inline HeapWord* G1ContiguousSpace::par_allocate_impl(size_t min_word_size,
56 size_t desired_word_size,
57 size_t* actual_size) {
58 do {
59 HeapWord* obj = top();
60 size_t available = pointer_delta(end(), obj);
61 size_t want_to_allocate = MIN2(available, desired_word_size);
62 if (want_to_allocate >= min_word_size) {
63 HeapWord* new_top = obj + want_to_allocate;
64 HeapWord* result = Atomic::cmpxchg(new_top, top_addr(), obj);
65 // result can be one of two:
66 // the old top value: the exchange succeeded
67 // otherwise: the new value of the top is returned.
68 if (result == obj) {
69 assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
70 *actual_size = want_to_allocate;
71 return obj;
72 }
73 } else {
74 return NULL;
75 }
76 } while (true);
77 }
78
79 inline HeapWord* G1ContiguousSpace::allocate(size_t min_word_size,
80 size_t desired_word_size,
81 size_t* actual_size) {
82 HeapWord* res = allocate_impl(min_word_size, desired_word_size, actual_size);
83 if (res != NULL) {
84 _bot_part.alloc_block(res, *actual_size);
85 }
86 return res;
87 }
88
89 inline HeapWord* G1ContiguousSpace::allocate(size_t word_size) {
90 size_t temp;
91 return allocate(word_size, word_size, &temp);
92 }
93
94 inline HeapWord* G1ContiguousSpace::par_allocate(size_t word_size) {
95 size_t temp;
96 return par_allocate(word_size, word_size, &temp);
97 }
98
99 // Because of the requirement of keeping "_offsets" up to date with the
100 // allocations, we sequentialize these with a lock. Therefore, best if
101 // this is used for larger LAB allocations only.
102 inline HeapWord* G1ContiguousSpace::par_allocate(size_t min_word_size,
103 size_t desired_word_size,
104 size_t* actual_size) {
105 MutexLocker x(&_par_alloc_lock);
106 return allocate(min_word_size, desired_word_size, actual_size);
107 }
108
109 inline HeapWord* G1ContiguousSpace::block_start(const void* p) {
110 return _bot_part.block_start(p);
111 }
112
113 inline HeapWord*
114 G1ContiguousSpace::block_start_const(const void* p) const {
115 return _bot_part.block_start_const(p);
116 }
117
118 inline bool HeapRegion::is_obj_dead_with_size(const oop obj, const G1CMBitMap* const prev_bitmap, size_t* size) const {
119 HeapWord* addr = (HeapWord*) obj;
120
121 assert(addr < top(), "must be");
122 assert(!is_closed_archive(),
123 "Closed archive regions should not have references into other regions");
124 assert(!is_humongous(), "Humongous objects not handled here");
125 bool obj_is_dead = is_obj_dead(obj, prev_bitmap);
126
127 if (ClassUnloadingWithConcurrentMark && obj_is_dead) {
128 assert(!block_is_obj(addr), "must be");
129 *size = block_size_using_bitmap(addr, prev_bitmap);
130 } else {
131 assert(block_is_obj(addr), "must be");
132 *size = obj->size();
133 }
134 return obj_is_dead;
135 }
136
137 inline bool
138 HeapRegion::block_is_obj(const HeapWord* p) const {
139 G1CollectedHeap* g1h = G1CollectedHeap::heap();
140
141 if (!this->is_in(p)) {
142 assert(is_continues_humongous(), "This case can only happen for humongous regions");
143 return (p == humongous_start_region()->bottom());
144 }
145 if (ClassUnloadingWithConcurrentMark) {
146 return !g1h->is_obj_dead(oop(p), this);
147 }
148 return p < top();
149 }
150
151 inline size_t HeapRegion::block_size_using_bitmap(const HeapWord* addr, const G1CMBitMap* const prev_bitmap) const {
152 assert(ClassUnloadingWithConcurrentMark,
153 "All blocks should be objects if class unloading isn't used, so this method should not be called. "
154 "HR: [" PTR_FORMAT ", " PTR_FORMAT ", " PTR_FORMAT ") "
155 "addr: " PTR_FORMAT,
156 p2i(bottom()), p2i(top()), p2i(end()), p2i(addr));
157
158 // Old regions' dead objects may have dead classes
168 return !obj_allocated_since_prev_marking(obj) &&
169 !prev_bitmap->is_marked((HeapWord*)obj) &&
170 !is_open_archive();
171 }
172
173 inline size_t HeapRegion::block_size(const HeapWord *addr) const {
174 if (addr == top()) {
175 return pointer_delta(end(), addr);
176 }
177
178 if (block_is_obj(addr)) {
179 return oop(addr)->size();
180 }
181
182 return block_size_using_bitmap(addr, G1CollectedHeap::heap()->concurrent_mark()->prev_mark_bitmap());
183 }
184
185 inline void HeapRegion::complete_compaction() {
186 // Reset space and bot after compaction is complete if needed.
187 reset_after_compaction();
188 if (used_region().is_empty()) {
189 reset_bot();
190 }
191
192 // After a compaction the mark bitmap is invalid, so we must
193 // treat all objects as being inside the unmarked area.
194 zero_marked_bytes();
195 init_top_at_mark_start();
196
197 // Clear unused heap memory in debug builds.
198 if (ZapUnusedHeapArea) {
199 mangle_unused_area();
200 }
201 }
202
203 template<typename ApplyToMarkedClosure>
204 inline void HeapRegion::apply_to_marked_objects(G1CMBitMap* bitmap, ApplyToMarkedClosure* closure) {
205 HeapWord* limit = scan_limit();
206 HeapWord* next_addr = bottom();
207
208 while (next_addr < limit) {
209 Prefetch::write(next_addr, PrefetchScanIntervalInBytes);
210 // This explicit is_marked check is a way to avoid
211 // some extra work done by get_next_marked_addr for
212 // the case where next_addr is marked.
213 if (bitmap->is_marked(next_addr)) {
214 oop current = oop(next_addr);
215 next_addr += closure->apply(current);
216 } else {
217 next_addr = bitmap->get_next_marked_addr(next_addr, limit);
218 }
219 }
220
221 assert(next_addr == limit, "Should stop the scan at the limit.");
222 }
223
224 inline HeapWord* HeapRegion::par_allocate_no_bot_updates(size_t min_word_size,
225 size_t desired_word_size,
|
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_GC_G1_HEAPREGION_INLINE_HPP
26 #define SHARE_GC_G1_HEAPREGION_INLINE_HPP
27
28 #include "gc/g1/g1BlockOffsetTable.inline.hpp"
29 #include "gc/g1/g1CollectedHeap.inline.hpp"
30 #include "gc/g1/g1ConcurrentMarkBitMap.inline.hpp"
31 #include "gc/g1/heapRegion.hpp"
32 #include "oops/oop.inline.hpp"
33 #include "runtime/atomic.hpp"
34 #include "runtime/prefetch.inline.hpp"
35 #include "utilities/align.hpp"
36 #include "utilities/globalDefinitions.hpp"
37
38 inline HeapWord* HeapRegion::allocate_impl(size_t min_word_size,
39 size_t desired_word_size,
40 size_t* actual_size) {
41 HeapWord* obj = top();
42 size_t available = pointer_delta(end(), obj);
43 size_t want_to_allocate = MIN2(available, desired_word_size);
44 if (want_to_allocate >= min_word_size) {
45 HeapWord* new_top = obj + want_to_allocate;
46 set_top(new_top);
47 assert(is_object_aligned(obj) && is_object_aligned(new_top), "checking alignment");
48 *actual_size = want_to_allocate;
49 return obj;
50 } else {
51 return NULL;
52 }
53 }
54
55 inline HeapWord* HeapRegion::par_allocate_impl(size_t min_word_size,
56 size_t desired_word_size,
57 size_t* actual_size) {
58 do {
59 HeapWord* obj = top();
60 size_t available = pointer_delta(end(), obj);
61 size_t want_to_allocate = MIN2(available, desired_word_size);
62 if (want_to_allocate >= min_word_size) {
63 HeapWord* new_top = obj + want_to_allocate;
64 HeapWord* result = Atomic::cmpxchg(new_top, &_top, obj);
65 // result can be one of two:
66 // the old top value: the exchange succeeded
67 // otherwise: the new value of the top is returned.
68 if (result == obj) {
69 assert(is_object_aligned(obj) && is_object_aligned(new_top), "checking alignment");
70 *actual_size = want_to_allocate;
71 return obj;
72 }
73 } else {
74 return NULL;
75 }
76 } while (true);
77 }
78
79 inline HeapWord* HeapRegion::allocate(size_t min_word_size,
80 size_t desired_word_size,
81 size_t* actual_size) {
82 HeapWord* res = allocate_impl(min_word_size, desired_word_size, actual_size);
83 if (res != NULL) {
84 _bot_part.alloc_block(res, *actual_size);
85 }
86 return res;
87 }
88
89 inline HeapWord* HeapRegion::allocate(size_t word_size) {
90 size_t temp;
91 return allocate(word_size, word_size, &temp);
92 }
93
94 inline HeapWord* HeapRegion::par_allocate(size_t word_size) {
95 size_t temp;
96 return par_allocate(word_size, word_size, &temp);
97 }
98
99 // Because of the requirement of keeping "_offsets" up to date with the
100 // allocations, we sequentialize these with a lock. Therefore, best if
101 // this is used for larger LAB allocations only.
102 inline HeapWord* HeapRegion::par_allocate(size_t min_word_size,
103 size_t desired_word_size,
104 size_t* actual_size) {
105 MutexLocker x(&_par_alloc_lock);
106 return allocate(min_word_size, desired_word_size, actual_size);
107 }
108
109 inline HeapWord* HeapRegion::block_start(const void* p) {
110 return _bot_part.block_start(p);
111 }
112
113 inline HeapWord* HeapRegion::block_start_const(const void* p) const {
114 return _bot_part.block_start_const(p);
115 }
116
117 inline bool HeapRegion::is_obj_dead_with_size(const oop obj, const G1CMBitMap* const prev_bitmap, size_t* size) const {
118 HeapWord* addr = (HeapWord*) obj;
119
120 assert(addr < top(), "must be");
121 assert(!is_closed_archive(),
122 "Closed archive regions should not have references into other regions");
123 assert(!is_humongous(), "Humongous objects not handled here");
124 bool obj_is_dead = is_obj_dead(obj, prev_bitmap);
125
126 if (ClassUnloadingWithConcurrentMark && obj_is_dead) {
127 assert(!block_is_obj(addr), "must be");
128 *size = block_size_using_bitmap(addr, prev_bitmap);
129 } else {
130 assert(block_is_obj(addr), "must be");
131 *size = obj->size();
132 }
133 return obj_is_dead;
134 }
135
136 inline bool HeapRegion::block_is_obj(const HeapWord* p) const {
137 G1CollectedHeap* g1h = G1CollectedHeap::heap();
138
139 if (!this->is_in(p)) {
140 assert(is_continues_humongous(), "This case can only happen for humongous regions");
141 return (p == humongous_start_region()->bottom());
142 }
143 if (ClassUnloadingWithConcurrentMark) {
144 return !g1h->is_obj_dead(oop(p), this);
145 }
146 return p < top();
147 }
148
149 inline size_t HeapRegion::block_size_using_bitmap(const HeapWord* addr, const G1CMBitMap* const prev_bitmap) const {
150 assert(ClassUnloadingWithConcurrentMark,
151 "All blocks should be objects if class unloading isn't used, so this method should not be called. "
152 "HR: [" PTR_FORMAT ", " PTR_FORMAT ", " PTR_FORMAT ") "
153 "addr: " PTR_FORMAT,
154 p2i(bottom()), p2i(top()), p2i(end()), p2i(addr));
155
156 // Old regions' dead objects may have dead classes
166 return !obj_allocated_since_prev_marking(obj) &&
167 !prev_bitmap->is_marked((HeapWord*)obj) &&
168 !is_open_archive();
169 }
170
171 inline size_t HeapRegion::block_size(const HeapWord *addr) const {
172 if (addr == top()) {
173 return pointer_delta(end(), addr);
174 }
175
176 if (block_is_obj(addr)) {
177 return oop(addr)->size();
178 }
179
180 return block_size_using_bitmap(addr, G1CollectedHeap::heap()->concurrent_mark()->prev_mark_bitmap());
181 }
182
183 inline void HeapRegion::complete_compaction() {
184 // Reset space and bot after compaction is complete if needed.
185 reset_after_compaction();
186 if (is_empty()) {
187 reset_bot();
188 }
189
190 // After a compaction the mark bitmap is invalid, so we must
191 // treat all objects as being inside the unmarked area.
192 zero_marked_bytes();
193 init_top_at_mark_start();
194
195 // Clear unused heap memory in debug builds.
196 if (ZapUnusedHeapArea) {
197 mangle_unused_area();
198 }
199 }
200
201 template<typename ApplyToMarkedClosure>
202 inline void HeapRegion::apply_to_marked_objects(G1CMBitMap* bitmap, ApplyToMarkedClosure* closure) {
203 HeapWord* limit = top();
204 HeapWord* next_addr = bottom();
205
206 while (next_addr < limit) {
207 Prefetch::write(next_addr, PrefetchScanIntervalInBytes);
208 // This explicit is_marked check is a way to avoid
209 // some extra work done by get_next_marked_addr for
210 // the case where next_addr is marked.
211 if (bitmap->is_marked(next_addr)) {
212 oop current = oop(next_addr);
213 next_addr += closure->apply(current);
214 } else {
215 next_addr = bitmap->get_next_marked_addr(next_addr, limit);
216 }
217 }
218
219 assert(next_addr == limit, "Should stop the scan at the limit.");
220 }
221
222 inline HeapWord* HeapRegion::par_allocate_no_bot_updates(size_t min_word_size,
223 size_t desired_word_size,
|