15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_GC_G1_HEAPREGION_INLINE_HPP
26 #define SHARE_VM_GC_G1_HEAPREGION_INLINE_HPP
27
28 #include "gc/g1/g1BlockOffsetTable.inline.hpp"
29 #include "gc/g1/g1CollectedHeap.inline.hpp"
30 #include "gc/g1/heapRegion.hpp"
31 #include "gc/shared/space.hpp"
32 #include "oops/oop.inline.hpp"
33 #include "runtime/atomic.inline.hpp"
34
35 // This version requires locking.
36 inline HeapWord* G1OffsetTableContigSpace::allocate_impl(size_t min_word_size,
37 size_t* actual_size,
38 HeapWord* const end_value) {
39 HeapWord* obj = top();
40 size_t available = pointer_delta(end_value, obj);
41 size_t want_to_allocate = MIN2(available, *actual_size);
42 if (want_to_allocate >= min_word_size) {
43 HeapWord* new_top = obj + want_to_allocate;
44 set_top(new_top);
45 assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
46 *actual_size = want_to_allocate;
47 return obj;
48 } else {
49 return NULL;
50 }
51 }
52
53 // This version is lock-free.
54 inline HeapWord* G1OffsetTableContigSpace::par_allocate_impl(size_t min_word_size,
55 size_t* actual_size,
56 HeapWord* const end_value) {
57 do {
58 HeapWord* obj = top();
59 size_t available = pointer_delta(end_value, obj);
60 size_t want_to_allocate = MIN2(available, *actual_size);
61 if (want_to_allocate >= min_word_size) {
62 HeapWord* new_top = obj + want_to_allocate;
63 HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj);
64 // result can be one of two:
65 // the old top value: the exchange succeeded
66 // otherwise: the new value of the top is returned.
67 if (result == obj) {
68 assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
69 *actual_size = want_to_allocate;
70 return obj;
71 }
72 } else {
73 return NULL;
74 }
75 } while (true);
76 }
77
78 inline HeapWord* G1OffsetTableContigSpace::allocate(size_t min_word_size, size_t* actual_size) {
79 HeapWord* res = allocate_impl(min_word_size, actual_size, end());
80 if (res != NULL) {
81 _offsets.alloc_block(res, *actual_size);
82 }
83 return res;
84 }
85
86 inline HeapWord* G1OffsetTableContigSpace::allocate(size_t word_size) { return allocate(word_size, &word_size); }
87 inline HeapWord* G1OffsetTableContigSpace::par_allocate(size_t word_size) { return par_allocate(word_size, &word_size); }
88
89 // Because of the requirement of keeping "_offsets" up to date with the
90 // allocations, we sequentialize these with a lock. Therefore, best if
91 // this is used for larger LAB allocations only.
92 inline HeapWord* G1OffsetTableContigSpace::par_allocate(size_t min_word_size, size_t* actual_size) {
93 MutexLocker x(&_par_alloc_lock);
94 return allocate(min_word_size, actual_size);
95 }
96
97 inline HeapWord* G1OffsetTableContigSpace::block_start(const void* p) {
98 return _offsets.block_start(p);
99 }
100
101 inline HeapWord*
102 G1OffsetTableContigSpace::block_start_const(const void* p) const {
103 return _offsets.block_start_const(p);
104 }
105
106 inline bool
107 HeapRegion::block_is_obj(const HeapWord* p) const {
108 G1CollectedHeap* g1h = G1CollectedHeap::heap();
109 if (ClassUnloadingWithConcurrentMark) {
110 return !g1h->is_obj_dead(oop(p), this);
111 }
112 return p < top();
113 }
114
122 return oop(addr)->size();
123 }
124
125 assert(ClassUnloadingWithConcurrentMark,
126 err_msg("All blocks should be objects if G1 Class Unloading isn't used. "
127 "HR: [" PTR_FORMAT ", " PTR_FORMAT ", " PTR_FORMAT ") "
128 "addr: " PTR_FORMAT,
129 p2i(bottom()), p2i(top()), p2i(end()), p2i(addr)));
130
131 // Old regions' dead objects may have dead classes
132 // We need to find the next live object in some other
133 // manner than getting the oop size
134 G1CollectedHeap* g1h = G1CollectedHeap::heap();
135 HeapWord* next = g1h->concurrent_mark()->prevMarkBitMap()->
136 getNextMarkedWordAddress(addr, prev_top_at_mark_start());
137
138 assert(next > addr, "must get the next live object");
139 return pointer_delta(next, addr);
140 }
141
142 inline HeapWord* HeapRegion::par_allocate_no_bot_updates(size_t min_word_size, size_t* actual_word_size) {
143 assert(is_young(), "we can only skip BOT updates on young regions");
144 return par_allocate_impl(min_word_size, actual_word_size, end());
145 }
146
147 inline HeapWord* HeapRegion::allocate_no_bot_updates(size_t word_size) {
148 return allocate_no_bot_updates(word_size, &word_size);
149 }
150
151 inline HeapWord* HeapRegion::allocate_no_bot_updates(size_t min_word_size, size_t* actual_word_size) {
152 assert(is_young(), "we can only skip BOT updates on young regions");
153 return allocate_impl(min_word_size, actual_word_size, end());
154 }
155
156 inline void HeapRegion::note_start_of_marking() {
157 _next_marked_bytes = 0;
158 _next_top_at_mark_start = top();
159 }
160
161 inline void HeapRegion::note_end_of_marking() {
162 _prev_top_at_mark_start = _next_top_at_mark_start;
163 _prev_marked_bytes = _next_marked_bytes;
164 _next_marked_bytes = 0;
165
166 assert(_prev_marked_bytes <=
167 (size_t) pointer_delta(prev_top_at_mark_start(), bottom()) *
168 HeapWordSize, "invariant");
169 }
170
171 inline void HeapRegion::note_start_of_copying(bool during_initial_mark) {
172 if (is_survivor()) {
173 // This is how we always allocate survivors.
|
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_GC_G1_HEAPREGION_INLINE_HPP
26 #define SHARE_VM_GC_G1_HEAPREGION_INLINE_HPP
27
28 #include "gc/g1/g1BlockOffsetTable.inline.hpp"
29 #include "gc/g1/g1CollectedHeap.inline.hpp"
30 #include "gc/g1/heapRegion.hpp"
31 #include "gc/shared/space.hpp"
32 #include "oops/oop.inline.hpp"
33 #include "runtime/atomic.inline.hpp"
34
35 inline HeapWord* G1OffsetTableContigSpace::allocate_impl(size_t min_word_size,
36 size_t desired_word_size,
37 size_t* actual_size) {
38 HeapWord* obj = top();
39 size_t available = pointer_delta(end(), obj);
40 size_t want_to_allocate = MIN2(available, desired_word_size);
41 if (want_to_allocate >= min_word_size) {
42 HeapWord* new_top = obj + want_to_allocate;
43 set_top(new_top);
44 assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
45 *actual_size = want_to_allocate;
46 return obj;
47 } else {
48 return NULL;
49 }
50 }
51
52 inline HeapWord* G1OffsetTableContigSpace::par_allocate_impl(size_t min_word_size,
53 size_t desired_word_size,
54 size_t* actual_size) {
55 do {
56 HeapWord* obj = top();
57 size_t available = pointer_delta(end(), obj);
58 size_t want_to_allocate = MIN2(available, desired_word_size);
59 if (want_to_allocate >= min_word_size) {
60 HeapWord* new_top = obj + want_to_allocate;
61 HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj);
62 // result can be one of two:
63 // the old top value: the exchange succeeded
64 // otherwise: the new value of the top is returned.
65 if (result == obj) {
66 assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
67 *actual_size = want_to_allocate;
68 return obj;
69 }
70 } else {
71 return NULL;
72 }
73 } while (true);
74 }
75
76 inline HeapWord* G1OffsetTableContigSpace::allocate(size_t min_word_size,
77 size_t desired_word_size,
78 size_t* actual_size) {
79 HeapWord* res = allocate_impl(min_word_size, desired_word_size, actual_size);
80 if (res != NULL) {
81 _offsets.alloc_block(res, *actual_size);
82 }
83 return res;
84 }
85
86 inline HeapWord* G1OffsetTableContigSpace::allocate(size_t word_size) {
87 size_t temp;
88 return allocate(word_size, word_size, &temp);
89 }
90
91 inline HeapWord* G1OffsetTableContigSpace::par_allocate(size_t word_size) {
92 size_t temp;
93 return par_allocate(word_size, word_size, &temp);
94 }
95
96 // Because of the requirement of keeping "_offsets" up to date with the
97 // allocations, we sequentialize these with a lock. Therefore, best if
98 // this is used for larger LAB allocations only.
99 inline HeapWord* G1OffsetTableContigSpace::par_allocate(size_t min_word_size,
100 size_t desired_word_size,
101 size_t* actual_size) {
102 MutexLocker x(&_par_alloc_lock);
103 return allocate(min_word_size, desired_word_size, actual_size);
104 }
105
106 inline HeapWord* G1OffsetTableContigSpace::block_start(const void* p) {
107 return _offsets.block_start(p);
108 }
109
110 inline HeapWord*
111 G1OffsetTableContigSpace::block_start_const(const void* p) const {
112 return _offsets.block_start_const(p);
113 }
114
115 inline bool
116 HeapRegion::block_is_obj(const HeapWord* p) const {
117 G1CollectedHeap* g1h = G1CollectedHeap::heap();
118 if (ClassUnloadingWithConcurrentMark) {
119 return !g1h->is_obj_dead(oop(p), this);
120 }
121 return p < top();
122 }
123
131 return oop(addr)->size();
132 }
133
134 assert(ClassUnloadingWithConcurrentMark,
135 err_msg("All blocks should be objects if G1 Class Unloading isn't used. "
136 "HR: [" PTR_FORMAT ", " PTR_FORMAT ", " PTR_FORMAT ") "
137 "addr: " PTR_FORMAT,
138 p2i(bottom()), p2i(top()), p2i(end()), p2i(addr)));
139
140 // Old regions' dead objects may have dead classes
141 // We need to find the next live object in some other
142 // manner than getting the oop size
143 G1CollectedHeap* g1h = G1CollectedHeap::heap();
144 HeapWord* next = g1h->concurrent_mark()->prevMarkBitMap()->
145 getNextMarkedWordAddress(addr, prev_top_at_mark_start());
146
147 assert(next > addr, "must get the next live object");
148 return pointer_delta(next, addr);
149 }
150
151 inline HeapWord* HeapRegion::par_allocate_no_bot_updates(size_t min_word_size,
152 size_t desired_word_size,
153 size_t* actual_word_size) {
154 assert(is_young(), "we can only skip BOT updates on young regions");
155 return par_allocate_impl(min_word_size, desired_word_size, actual_word_size);
156 }
157
158 inline HeapWord* HeapRegion::allocate_no_bot_updates(size_t word_size) {
159 size_t temp;
160 return allocate_no_bot_updates(word_size, word_size, &temp);
161 }
162
163 inline HeapWord* HeapRegion::allocate_no_bot_updates(size_t min_word_size,
164 size_t desired_word_size,
165 size_t* actual_word_size) {
166 assert(is_young(), "we can only skip BOT updates on young regions");
167 return allocate_impl(min_word_size, desired_word_size, actual_word_size);
168 }
169
170 inline void HeapRegion::note_start_of_marking() {
171 _next_marked_bytes = 0;
172 _next_top_at_mark_start = top();
173 }
174
175 inline void HeapRegion::note_end_of_marking() {
176 _prev_top_at_mark_start = _next_top_at_mark_start;
177 _prev_marked_bytes = _next_marked_bytes;
178 _next_marked_bytes = 0;
179
180 assert(_prev_marked_bytes <=
181 (size_t) pointer_delta(prev_top_at_mark_start(), bottom()) *
182 HeapWordSize, "invariant");
183 }
184
185 inline void HeapRegion::note_start_of_copying(bool during_initial_mark) {
186 if (is_survivor()) {
187 // This is how we always allocate survivors.
|