19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "memory/allocation.inline.hpp"
27 #include "oops/constantPool.hpp"
28 #include "oops/oop.inline.hpp"
29 #include "runtime/atomic.inline.hpp"
30 #include "runtime/handles.inline.hpp"
31 #include "runtime/thread.inline.hpp"
32
33 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
34
35 #ifdef ASSERT
36 oop* HandleArea::allocate_handle(oop obj) {
37 assert(_handle_mark_nesting > 1, "memory leak: allocating handle outside HandleMark");
38 assert(_no_handle_mark_nesting == 0, "allocating handle inside NoHandleMark");
39 assert(obj->is_oop(), "not an oop: " INTPTR_FORMAT, (intptr_t*) obj);
40 return real_allocate_handle(obj);
41 }
42
43 Handle::Handle(Thread* thread, oop obj) {
44 assert(thread == Thread::current(), "sanity check");
45 if (obj == NULL) {
46 _handle = NULL;
47 } else {
48 _handle = thread->handle_area()->allocate_handle(obj);
49 }
50 }
51
52 #endif
53
54 static uintx chunk_oops_do(OopClosure* f, Chunk* chunk, char* chunk_top) {
55 oop* bottom = (oop*) chunk->bottom();
56 oop* top = (oop*) chunk_top;
57 uintx handles_visited = top - bottom;
58 assert(top >= bottom && top <= (oop*) chunk->top(), "just checking");
59 // during GC phase 3, a handle may be a forward pointer that
68 return handles_visited;
69 }
70
71 // Used for debugging handle allocation.
72 NOT_PRODUCT(jint _nof_handlemarks = 0;)
73
74 void HandleArea::oops_do(OopClosure* f) {
75 uintx handles_visited = 0;
76 // First handle the current chunk. It is filled to the high water mark.
77 handles_visited += chunk_oops_do(f, _chunk, _hwm);
78 // Then handle all previous chunks. They are completely filled.
79 Chunk* k = _first;
80 while(k != _chunk) {
81 handles_visited += chunk_oops_do(f, k, k->top());
82 k = k->next();
83 }
84
85 // The thread local handle areas should not get very large
86 if (TraceHandleAllocation && (size_t)handles_visited > TotalHandleAllocationLimit) {
87 #ifdef ASSERT
88 warning("%d: Visited in HandleMark : %d",
89 _nof_handlemarks, handles_visited);
90 #else
91 warning("Visited in HandleMark : %d", handles_visited);
92 #endif
93 }
94 if (_prev != NULL) _prev->oops_do(f);
95 }
96
97 void HandleMark::initialize(Thread* thread) {
98 _thread = thread;
99 // Save area
100 _area = thread->handle_area();
101 // Save current top
102 _chunk = _area->_chunk;
103 _hwm = _area->_hwm;
104 _max = _area->_max;
105 _size_in_bytes = _area->_size_in_bytes;
106 debug_only(_area->_handle_mark_nesting++);
107 assert(_area->_handle_mark_nesting > 0, "must stack allocate HandleMarks");
108 debug_only(Atomic::inc(&_nof_handlemarks);)
109
110 // Link this in the thread
111 set_previous_handle_mark(thread->last_handle_mark());
120 debug_only(area->_handle_mark_nesting--);
121
122 // Debug code to trace the number of handles allocated per mark/
123 #ifdef ASSERT
124 if (TraceHandleAllocation) {
125 size_t handles = 0;
126 Chunk *c = _chunk->next();
127 if (c == NULL) {
128 handles = area->_hwm - _hwm; // no new chunk allocated
129 } else {
130 handles = _max - _hwm; // add rest in first chunk
131 while(c != NULL) {
132 handles += c->length();
133 c = c->next();
134 }
135 handles -= area->_max - area->_hwm; // adjust for last trunk not full
136 }
137 handles /= sizeof(void *); // Adjust for size of a handle
138 if (handles > HandleAllocationLimit) {
139 // Note: _nof_handlemarks is only set in debug mode
140 warning("%d: Allocated in HandleMark : %d", _nof_handlemarks, handles);
141 }
142
143 tty->print_cr("Handles %d", handles);
144 }
145 #endif
146
147 // Delete later chunks
148 if( _chunk->next() ) {
149 // reset arena size before delete chunks. Otherwise, the total
150 // arena size could exceed total chunk size
151 assert(area->size_in_bytes() > size_in_bytes(), "Sanity check");
152 area->set_size_in_bytes(size_in_bytes());
153 _chunk->next_chop();
154 } else {
155 assert(area->size_in_bytes() == size_in_bytes(), "Sanity check");
156 }
157 // Roll back arena to saved top markers
158 area->_chunk = _chunk;
159 area->_hwm = _hwm;
160 area->_max = _max;
161 #ifdef ASSERT
162 // clear out first chunk (to detect allocation bugs)
163 if (ZapVMHandleArea) {
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "memory/allocation.inline.hpp"
27 #include "oops/constantPool.hpp"
28 #include "oops/oop.inline.hpp"
29 #include "runtime/atomic.inline.hpp"
30 #include "runtime/handles.inline.hpp"
31 #include "runtime/thread.inline.hpp"
32
33 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
34
35 #ifdef ASSERT
36 oop* HandleArea::allocate_handle(oop obj) {
37 assert(_handle_mark_nesting > 1, "memory leak: allocating handle outside HandleMark");
38 assert(_no_handle_mark_nesting == 0, "allocating handle inside NoHandleMark");
39 assert(obj->is_oop(), "not an oop: " INTPTR_FORMAT, p2i(obj));
40 return real_allocate_handle(obj);
41 }
42
43 Handle::Handle(Thread* thread, oop obj) {
44 assert(thread == Thread::current(), "sanity check");
45 if (obj == NULL) {
46 _handle = NULL;
47 } else {
48 _handle = thread->handle_area()->allocate_handle(obj);
49 }
50 }
51
52 #endif
53
54 static uintx chunk_oops_do(OopClosure* f, Chunk* chunk, char* chunk_top) {
55 oop* bottom = (oop*) chunk->bottom();
56 oop* top = (oop*) chunk_top;
57 uintx handles_visited = top - bottom;
58 assert(top >= bottom && top <= (oop*) chunk->top(), "just checking");
59 // during GC phase 3, a handle may be a forward pointer that
68 return handles_visited;
69 }
70
71 // Used for debugging handle allocation.
72 NOT_PRODUCT(jint _nof_handlemarks = 0;)
73
74 void HandleArea::oops_do(OopClosure* f) {
75 uintx handles_visited = 0;
76 // First handle the current chunk. It is filled to the high water mark.
77 handles_visited += chunk_oops_do(f, _chunk, _hwm);
78 // Then handle all previous chunks. They are completely filled.
79 Chunk* k = _first;
80 while(k != _chunk) {
81 handles_visited += chunk_oops_do(f, k, k->top());
82 k = k->next();
83 }
84
85 // The thread local handle areas should not get very large
86 if (TraceHandleAllocation && (size_t)handles_visited > TotalHandleAllocationLimit) {
87 #ifdef ASSERT
88 warning("%d: Visited in HandleMark : " SIZE_FORMAT, _nof_handlemarks, handles_visited);
89 #else
90 warning("Visited in HandleMark : " SIZE_FORMAT, handles_visited);
91 #endif
92 }
93 if (_prev != NULL) _prev->oops_do(f);
94 }
95
96 void HandleMark::initialize(Thread* thread) {
97 _thread = thread;
98 // Save area
99 _area = thread->handle_area();
100 // Save current top
101 _chunk = _area->_chunk;
102 _hwm = _area->_hwm;
103 _max = _area->_max;
104 _size_in_bytes = _area->_size_in_bytes;
105 debug_only(_area->_handle_mark_nesting++);
106 assert(_area->_handle_mark_nesting > 0, "must stack allocate HandleMarks");
107 debug_only(Atomic::inc(&_nof_handlemarks);)
108
109 // Link this in the thread
110 set_previous_handle_mark(thread->last_handle_mark());
119 debug_only(area->_handle_mark_nesting--);
120
121 // Debug code to trace the number of handles allocated per mark/
122 #ifdef ASSERT
123 if (TraceHandleAllocation) {
124 size_t handles = 0;
125 Chunk *c = _chunk->next();
126 if (c == NULL) {
127 handles = area->_hwm - _hwm; // no new chunk allocated
128 } else {
129 handles = _max - _hwm; // add rest in first chunk
130 while(c != NULL) {
131 handles += c->length();
132 c = c->next();
133 }
134 handles -= area->_max - area->_hwm; // adjust for last trunk not full
135 }
136 handles /= sizeof(void *); // Adjust for size of a handle
137 if (handles > HandleAllocationLimit) {
138 // Note: _nof_handlemarks is only set in debug mode
139 warning("%d: Allocated in HandleMark : " SIZE_FORMAT, _nof_handlemarks, handles);
140 }
141
142 tty->print_cr("Handles " SIZE_FORMAT, handles);
143 }
144 #endif
145
146 // Delete later chunks
147 if( _chunk->next() ) {
148 // reset arena size before delete chunks. Otherwise, the total
149 // arena size could exceed total chunk size
150 assert(area->size_in_bytes() > size_in_bytes(), "Sanity check");
151 area->set_size_in_bytes(size_in_bytes());
152 _chunk->next_chop();
153 } else {
154 assert(area->size_in_bytes() == size_in_bytes(), "Sanity check");
155 }
156 // Roll back arena to saved top markers
157 area->_chunk = _chunk;
158 area->_hwm = _hwm;
159 area->_max = _max;
160 #ifdef ASSERT
161 // clear out first chunk (to detect allocation bugs)
162 if (ZapVMHandleArea) {
|