16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_INLINE_HPP
26 #define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_INLINE_HPP
27
28 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
29 #include "gc_implementation/g1/g1CollectedHeap.hpp"
30 #include "gc_implementation/g1/heapRegion.hpp"
31 #include "memory/space.hpp"
32 #include "oops/oop.inline.hpp"
33 #include "runtime/atomic.inline.hpp"
34
35 // This version requires locking.
36 inline HeapWord* G1OffsetTableContigSpace::allocate_impl(size_t size,
37 HeapWord* const end_value) {
38 HeapWord* obj = top();
39 if (pointer_delta(end_value, obj) >= size) {
40 HeapWord* new_top = obj + size;
41 set_top(new_top);
42 assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
43 return obj;
44 } else {
45 return NULL;
46 }
47 }
48
49 // This version is lock-free.
50 inline HeapWord* G1OffsetTableContigSpace::par_allocate_impl(size_t size,
51 HeapWord* const end_value) {
52 do {
53 HeapWord* obj = top();
54 if (pointer_delta(end_value, obj) >= size) {
55 HeapWord* new_top = obj + size;
56 HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj);
57 // result can be one of two:
58 // the old top value: the exchange succeeded
59 // otherwise: the new value of the top is returned.
60 if (result == obj) {
61 assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
62 return obj;
63 }
64 } else {
65 return NULL;
66 }
67 } while (true);
68 }
69
70 inline HeapWord* G1OffsetTableContigSpace::allocate(size_t size) {
71 HeapWord* res = allocate_impl(size, end());
72 if (res != NULL) {
73 _offsets.alloc_block(res, size);
74 }
75 return res;
76 }
77
78 // Because of the requirement of keeping "_offsets" up to date with the
79 // allocations, we sequentialize these with a lock. Therefore, best if
80 // this is used for larger LAB allocations only.
81 inline HeapWord* G1OffsetTableContigSpace::par_allocate(size_t size) {
82 MutexLocker x(&_par_alloc_lock);
83 return allocate(size);
84 }
85
86 inline HeapWord* G1OffsetTableContigSpace::block_start(const void* p) {
87 return _offsets.block_start(p);
88 }
89
90 inline HeapWord*
91 G1OffsetTableContigSpace::block_start_const(const void* p) const {
92 return _offsets.block_start_const(p);
93 }
94
95 inline bool
96 HeapRegion::block_is_obj(const HeapWord* p) const {
97 G1CollectedHeap* g1h = G1CollectedHeap::heap();
98 if (ClassUnloadingWithConcurrentMark) {
99 return !g1h->is_obj_dead(oop(p), this);
100 }
101 return p < top();
102 }
103
111 return oop(addr)->size();
112 }
113
114 assert(ClassUnloadingWithConcurrentMark,
115 err_msg("All blocks should be objects if G1 Class Unloading isn't used. "
116 "HR: ["PTR_FORMAT", "PTR_FORMAT", "PTR_FORMAT") "
117 "addr: " PTR_FORMAT,
118 p2i(bottom()), p2i(top()), p2i(end()), p2i(addr)));
119
120 // Old regions' dead objects may have dead classes
121 // We need to find the next live object in some other
122 // manner than getting the oop size
123 G1CollectedHeap* g1h = G1CollectedHeap::heap();
124 HeapWord* next = g1h->concurrent_mark()->prevMarkBitMap()->
125 getNextMarkedWordAddress(addr, prev_top_at_mark_start());
126
127 assert(next > addr, "must get the next live object");
128 return pointer_delta(next, addr);
129 }
130
131 inline HeapWord* HeapRegion::par_allocate_no_bot_updates(size_t word_size) {
132 assert(is_young(), "we can only skip BOT updates on young regions");
133 return par_allocate_impl(word_size, end());
134 }
135
136 inline HeapWord* HeapRegion::allocate_no_bot_updates(size_t word_size) {
137 assert(is_young(), "we can only skip BOT updates on young regions");
138 return allocate_impl(word_size, end());
139 }
140
141 inline void HeapRegion::note_start_of_marking() {
142 _next_marked_bytes = 0;
143 _next_top_at_mark_start = top();
144 }
145
146 inline void HeapRegion::note_end_of_marking() {
147 _prev_top_at_mark_start = _next_top_at_mark_start;
148 _prev_marked_bytes = _next_marked_bytes;
149 _next_marked_bytes = 0;
150
151 assert(_prev_marked_bytes <=
152 (size_t) pointer_delta(prev_top_at_mark_start(), bottom()) *
153 HeapWordSize, "invariant");
154 }
155
156 inline void HeapRegion::note_start_of_copying(bool during_initial_mark) {
157 if (is_survivor()) {
158 // This is how we always allocate survivors.
|
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_INLINE_HPP
26 #define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_INLINE_HPP
27
28 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
29 #include "gc_implementation/g1/g1CollectedHeap.hpp"
30 #include "gc_implementation/g1/heapRegion.hpp"
31 #include "memory/space.hpp"
32 #include "oops/oop.inline.hpp"
33 #include "runtime/atomic.inline.hpp"
34
35 // This version requires locking.
36 inline HeapWord* G1OffsetTableContigSpace::allocate_impl(size_t min_word_size,
37 size_t& size,
38 HeapWord* const end_value) {
39 HeapWord* obj = top();
40 size_t available = pointer_delta(end_value, obj);
41 size_t want_to_allocate = MIN2(available, size);
42 if (want_to_allocate >= min_word_size) {
43 HeapWord* new_top = obj + want_to_allocate;
44 set_top(new_top);
45 assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
46 size = want_to_allocate;
47 return obj;
48 } else {
49 return NULL;
50 }
51 }
52
53 // This version is lock-free.
54 inline HeapWord* G1OffsetTableContigSpace::par_allocate_impl(size_t min_word_size,
55 size_t& size,
56 HeapWord* const end_value) {
57 do {
58 HeapWord* obj = top();
59 size_t available = pointer_delta(end_value, obj);
60 size_t want_to_allocate = MIN2(available, size);
61 if (want_to_allocate >= min_word_size) {
62 HeapWord* new_top = obj + want_to_allocate;
63 HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj);
64 // result can be one of two:
65 // the old top value: the exchange succeeded
66 // otherwise: the new value of the top is returned.
67 if (result == obj) {
68 assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
69 size = want_to_allocate;
70 return obj;
71 }
72 } else {
73 return NULL;
74 }
75 } while (true);
76 }
77
78 inline HeapWord* G1OffsetTableContigSpace::allocate(size_t min_word_size, size_t& size) {
79 HeapWord* res = allocate_impl(min_word_size, size, end());
80 if (res != NULL) {
81 _offsets.alloc_block(res, size);
82 }
83 return res;
84 }
85
86 // Because of the requirement of keeping "_offsets" up to date with the
87 // allocations, we sequentialize these with a lock. Therefore, best if
88 // this is used for larger LAB allocations only.
89 inline HeapWord* G1OffsetTableContigSpace::par_allocate(size_t min_word_size, size_t& size) {
90 MutexLocker x(&_par_alloc_lock);
91 return allocate(min_word_size, size);
92 }
93
94 inline HeapWord* G1OffsetTableContigSpace::block_start(const void* p) {
95 return _offsets.block_start(p);
96 }
97
98 inline HeapWord*
99 G1OffsetTableContigSpace::block_start_const(const void* p) const {
100 return _offsets.block_start_const(p);
101 }
102
103 inline bool
104 HeapRegion::block_is_obj(const HeapWord* p) const {
105 G1CollectedHeap* g1h = G1CollectedHeap::heap();
106 if (ClassUnloadingWithConcurrentMark) {
107 return !g1h->is_obj_dead(oop(p), this);
108 }
109 return p < top();
110 }
111
119 return oop(addr)->size();
120 }
121
122 assert(ClassUnloadingWithConcurrentMark,
123 err_msg("All blocks should be objects if G1 Class Unloading isn't used. "
124 "HR: ["PTR_FORMAT", "PTR_FORMAT", "PTR_FORMAT") "
125 "addr: " PTR_FORMAT,
126 p2i(bottom()), p2i(top()), p2i(end()), p2i(addr)));
127
128 // Old regions' dead objects may have dead classes
129 // We need to find the next live object in some other
130 // manner than getting the oop size
131 G1CollectedHeap* g1h = G1CollectedHeap::heap();
132 HeapWord* next = g1h->concurrent_mark()->prevMarkBitMap()->
133 getNextMarkedWordAddress(addr, prev_top_at_mark_start());
134
135 assert(next > addr, "must get the next live object");
136 return pointer_delta(next, addr);
137 }
138
139 inline HeapWord* HeapRegion::par_allocate_no_bot_updates(size_t min_word_size, size_t& word_size) {
140 assert(is_young(), "we can only skip BOT updates on young regions");
141 return par_allocate_impl(min_word_size, word_size, end());
142 }
143
144 inline HeapWord* HeapRegion::allocate_no_bot_updates(size_t min_word_size, size_t& word_size) {
145 assert(is_young(), "we can only skip BOT updates on young regions");
146 return allocate_impl(min_word_size, word_size, end());
147 }
148
149 inline void HeapRegion::note_start_of_marking() {
150 _next_marked_bytes = 0;
151 _next_top_at_mark_start = top();
152 }
153
154 inline void HeapRegion::note_end_of_marking() {
155 _prev_top_at_mark_start = _next_top_at_mark_start;
156 _prev_marked_bytes = _next_marked_bytes;
157 _next_marked_bytes = 0;
158
159 assert(_prev_marked_bytes <=
160 (size_t) pointer_delta(prev_top_at_mark_start(), bottom()) *
161 HeapWordSize, "invariant");
162 }
163
164 inline void HeapRegion::note_start_of_copying(bool during_initial_mark) {
165 if (is_survivor()) {
166 // This is how we always allocate survivors.
|