14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_INLINE_HPP
26 #define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_INLINE_HPP
27
28 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
29 #include "gc_implementation/g1/g1CollectedHeap.hpp"
30 #include "gc_implementation/g1/heapRegion.hpp"
31 #include "memory/space.hpp"
32 #include "runtime/atomic.inline.hpp"
33
34 inline HeapWord* G1OffsetTableContigSpace::cas_allocate_inner(size_t size) {
35 HeapWord* obj = top();
36 do {
37 if (pointer_delta(end(), obj) >= size) {
38 HeapWord* new_top = obj + size;
39 HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, &_top, obj);
40 if (result == obj) {
41 assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
42 return obj;
43 }
44 obj = result;
45 } else {
46 break;
47 }
48 } while (true);
49 return NULL;
50 }
51
52 inline HeapWord* G1OffsetTableContigSpace::allocate_inner(size_t size) {
53 HeapWord* obj = top();
54 if (pointer_delta(end(), obj) >= size) {
55 HeapWord* new_top = obj + size;
56 assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
57 set_top(new_top);
58 return obj;
59 }
60 return NULL;
61 }
62
63
64 inline HeapWord* G1OffsetTableContigSpace::allocate(size_t size) {
65 HeapWord* res = allocate_inner(size);
66 if (res != NULL) {
67 _offsets.alloc_block(res, size);
68 }
69 return res;
70 }
71
72 // Because of the requirement of keeping "_offsets" up to date with the
73 // allocations, we sequentialize these with a lock. Therefore, best if
74 // this is used for larger LAB allocations only.
75 inline HeapWord* G1OffsetTableContigSpace::par_allocate(size_t size) {
76 MutexLocker x(&_par_alloc_lock);
77 return allocate(size);
78 }
79
80 inline HeapWord* G1OffsetTableContigSpace::block_start(const void* p) {
81 return _offsets.block_start(p);
82 }
83
84 inline HeapWord*
85 G1OffsetTableContigSpace::block_start_const(const void* p) const {
87 }
88
89 inline bool
90 HeapRegion::block_is_obj(const HeapWord* p) const {
91 return p < top();
92 }
93
94 inline size_t
95 HeapRegion::block_size(const HeapWord *addr) const {
96 const HeapWord* current_top = top();
97 if (addr < current_top) {
98 return oop(addr)->size();
99 } else {
100 assert(addr == current_top, "just checking");
101 return pointer_delta(end(), addr);
102 }
103 }
104
105 inline HeapWord* HeapRegion::par_allocate_no_bot_updates(size_t word_size) {
106 assert(is_young(), "we can only skip BOT updates on young regions");
107 return cas_allocate_inner(word_size);
108 }
109
110 inline HeapWord* HeapRegion::allocate_no_bot_updates(size_t word_size) {
111 assert(is_young(), "we can only skip BOT updates on young regions");
112 return allocate_inner(word_size);
113 }
114
115 inline void HeapRegion::note_start_of_marking() {
116 _next_marked_bytes = 0;
117 _next_top_at_mark_start = top();
118 }
119
120 inline void HeapRegion::note_end_of_marking() {
121 _prev_top_at_mark_start = _next_top_at_mark_start;
122 _prev_marked_bytes = _next_marked_bytes;
123 _next_marked_bytes = 0;
124
125 assert(_prev_marked_bytes <=
126 (size_t) pointer_delta(prev_top_at_mark_start(), bottom()) *
127 HeapWordSize, "invariant");
128 }
129
130 inline void HeapRegion::note_start_of_copying(bool during_initial_mark) {
131 if (is_survivor()) {
132 // This is how we always allocate survivors.
|
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_INLINE_HPP
26 #define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_INLINE_HPP
27
28 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
29 #include "gc_implementation/g1/g1CollectedHeap.hpp"
30 #include "gc_implementation/g1/heapRegion.hpp"
31 #include "memory/space.hpp"
32 #include "runtime/atomic.inline.hpp"
33
34 // This version requires locking.
35 inline HeapWord* G1OffsetTableContigSpace::allocate_impl(size_t size,
36 HeapWord* const end_value) {
37 HeapWord* obj = top();
38 if (pointer_delta(end_value, obj) >= size) {
39 HeapWord* new_top = obj + size;
40 set_top(new_top);
41 assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
42 return obj;
43 } else {
44 return NULL;
45 }
46 }
47
48 // This version is lock-free.
49 inline HeapWord* G1OffsetTableContigSpace::par_allocate_impl(size_t size,
50 HeapWord* const end_value) {
51 do {
52 HeapWord* obj = top();
53 if (pointer_delta(end_value, obj) >= size) {
54 HeapWord* new_top = obj + size;
55 HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj);
56 // result can be one of two:
57 // the old top value: the exchange succeeded
58 // otherwise: the new value of the top is returned.
59 if (result == obj) {
60 assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
61 return obj;
62 }
63 } else {
64 return NULL;
65 }
66 } while (true);
67 }
68
69 inline HeapWord* G1OffsetTableContigSpace::allocate(size_t size) {
70 HeapWord* res = allocate_impl(size, end());
71 if (res != NULL) {
72 _offsets.alloc_block(res, size);
73 }
74 return res;
75 }
76
77 // Because of the requirement of keeping "_offsets" up to date with the
78 // allocations, we sequentialize these with a lock. Therefore, best if
79 // this is used for larger LAB allocations only.
80 inline HeapWord* G1OffsetTableContigSpace::par_allocate(size_t size) {
81 MutexLocker x(&_par_alloc_lock);
82 return allocate(size);
83 }
84
85 inline HeapWord* G1OffsetTableContigSpace::block_start(const void* p) {
86 return _offsets.block_start(p);
87 }
88
89 inline HeapWord*
90 G1OffsetTableContigSpace::block_start_const(const void* p) const {
92 }
93
94 inline bool
95 HeapRegion::block_is_obj(const HeapWord* p) const {
96 return p < top();
97 }
98
99 inline size_t
100 HeapRegion::block_size(const HeapWord *addr) const {
101 const HeapWord* current_top = top();
102 if (addr < current_top) {
103 return oop(addr)->size();
104 } else {
105 assert(addr == current_top, "just checking");
106 return pointer_delta(end(), addr);
107 }
108 }
109
110 inline HeapWord* HeapRegion::par_allocate_no_bot_updates(size_t word_size) {
111 assert(is_young(), "we can only skip BOT updates on young regions");
112 return par_allocate_impl(word_size, end());
113 }
114
115 inline HeapWord* HeapRegion::allocate_no_bot_updates(size_t word_size) {
116 assert(is_young(), "we can only skip BOT updates on young regions");
117 return allocate_impl(word_size, end());
118 }
119
120 inline void HeapRegion::note_start_of_marking() {
121 _next_marked_bytes = 0;
122 _next_top_at_mark_start = top();
123 }
124
125 inline void HeapRegion::note_end_of_marking() {
126 _prev_top_at_mark_start = _next_top_at_mark_start;
127 _prev_marked_bytes = _next_marked_bytes;
128 _next_marked_bytes = 0;
129
130 assert(_prev_marked_bytes <=
131 (size_t) pointer_delta(prev_top_at_mark_start(), bottom()) *
132 HeapWordSize, "invariant");
133 }
134
135 inline void HeapRegion::note_start_of_copying(bool during_initial_mark) {
136 if (is_survivor()) {
137 // This is how we always allocate survivors.
|