14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_SERVICES_MEM_SNAPSHOT_HPP
26 #define SHARE_VM_SERVICES_MEM_SNAPSHOT_HPP
27
28 #include "memory/allocation.hpp"
29 #include "runtime/mutex.hpp"
30 #include "runtime/mutexLocker.hpp"
31 #include "services/memBaseline.hpp"
32 #include "services/memPtrArray.hpp"
33
34
35 // Snapshot pointer array iterator
36
37 // The pointer array contains malloc-ed pointers
38 class MemPointerIterator : public MemPointerArrayIteratorImpl {
39 public:
40 MemPointerIterator(MemPointerArray* arr):
41 MemPointerArrayIteratorImpl(arr) {
42 assert(arr != NULL, "null array");
43 }
44
45 #ifdef ASSERT
46 virtual bool is_dup_pointer(const MemPointer* ptr1,
47 const MemPointer* ptr2) const {
48 MemPointerRecord* p1 = (MemPointerRecord*)ptr1;
49 MemPointerRecord* p2 = (MemPointerRecord*)ptr2;
50
51 if (p1->addr() != p2->addr()) return false;
52 if ((p1->flags() & MemPointerRecord::tag_masks) !=
53 (p2->flags() & MemPointerRecord::tag_masks)) {
54 return false;
148 inline void reset() { _pos = 0; }
149 #ifdef ASSERT
150 virtual bool is_dup_pointer(const MemPointer* ptr1,
151 const MemPointer* ptr2) const {
152 VMMemRegion* p1 = (VMMemRegion*)ptr1;
153 VMMemRegion* p2 = (VMMemRegion*)ptr2;
154
155 if (p1->addr() != p2->addr()) return false;
156 if ((p1->flags() & MemPointerRecord::tag_masks) !=
157 (p2->flags() & MemPointerRecord::tag_masks)) {
158 return false;
159 }
160 // we do see multiple commit/uncommit on the same memory, it is ok
161 return (p1->flags() & MemPointerRecord::tag_masks) == MemPointerRecord::tag_alloc ||
162 (p1->flags() & MemPointerRecord::tag_masks) == MemPointerRecord::tag_release;
163 }
164 #endif
165 };
166
167 class MallocRecordIterator : public MemPointerArrayIterator {
168 protected:
169 MemPointerArrayIteratorImpl _itr;
170
171 public:
172 MallocRecordIterator(MemPointerArray* arr) : _itr(arr) {
173 }
174
175 virtual MemPointer* current() const {
176 MemPointerRecord* cur = (MemPointerRecord*)_itr.current();
177 assert(cur == NULL || !cur->is_vm_pointer(), "seek error");
178 MemPointerRecord* next = (MemPointerRecord*)_itr.peek_next();
179 if (next == NULL || next->addr() != cur->addr()) {
180 return cur;
181 } else {
182 assert(!cur->is_vm_pointer(), "Sanity check");
183 assert(cur->is_allocation_record() && next->is_deallocation_record(),
184 "sorting order");
185 assert(cur->seq() != next->seq(), "Sanity check");
186 return cur->seq() > next->seq() ? cur : next;
187 }
188 }
189
190 virtual MemPointer* next() {
191 MemPointerRecord* cur = (MemPointerRecord*)_itr.current();
192 assert(cur == NULL || !cur->is_vm_pointer(), "Sanity check");
193 MemPointerRecord* next = (MemPointerRecord*)_itr.next();
194 if (next == NULL) {
195 return NULL;
196 }
197 if (cur->addr() == next->addr()) {
198 next = (MemPointerRecord*)_itr.next();
199 }
200 return current();
201 }
202
203 MemPointer* peek_next() const { ShouldNotReachHere(); return NULL; }
204 MemPointer* peek_prev() const { ShouldNotReachHere(); return NULL; }
205 void remove() { ShouldNotReachHere(); }
206 bool insert(MemPointer* ptr) { ShouldNotReachHere(); return false; }
207 bool insert_after(MemPointer* ptr) { ShouldNotReachHere(); return false; }
208 };
209
210 // collapse duplicated records. Eliminating duplicated records here, is much
211 // cheaper than during promotion phase. However, it does have limitation - it
212 // can only eliminate duplicated records within the generation, there are
213 // still chances seeing duplicated records during promotion.
214 // We want to use the record with higher sequence number, because it has
215 // more accurate callsite pc.
216 class VMRecordIterator : public MallocRecordIterator {
217 public:
218 VMRecordIterator(MemPointerArray* arr) : MallocRecordIterator(arr) {
219 MemPointerRecord* cur = (MemPointerRecord*)_itr.current();
220 MemPointerRecord* next = (MemPointerRecord*)_itr.peek_next();
221 while (next != NULL) {
222 assert(cur != NULL, "Sanity check");
223 assert(((SeqMemPointerRecord*)next)->seq() > ((SeqMemPointerRecord*)cur)->seq(),
224 "pre-sort order");
225
226 if (is_duplicated_record(cur, next)) {
227 _itr.next();
228 next = (MemPointerRecord*)_itr.peek_next();
229 } else {
230 break;
231 }
232 }
233 }
234
235 virtual MemPointer* current() const {
236 return _itr.current();
237 }
238
239 // get next record, but skip the duplicated records
240 virtual MemPointer* next() {
241 MemPointerRecord* cur = (MemPointerRecord*)_itr.next();
242 MemPointerRecord* next = (MemPointerRecord*)_itr.peek_next();
243 while (next != NULL) {
244 assert(cur != NULL, "Sanity check");
245 assert(((SeqMemPointerRecord*)next)->seq() > ((SeqMemPointerRecord*)cur)->seq(),
246 "pre-sort order");
247
248 if (is_duplicated_record(cur, next)) {
249 _itr.next();
250 cur = next;
251 next = (MemPointerRecord*)_itr.peek_next();
252 } else {
253 break;
254 }
255 }
256 return cur;
257 }
258
259 private:
260 bool is_duplicated_record(MemPointerRecord* p1, MemPointerRecord* p2) const {
261 bool ret = (p1->addr() == p2->addr() && p1->size() == p2->size() && p1->flags() == p2->flags());
262 assert(!(ret && FLAGS_TO_MEMORY_TYPE(p1->flags()) == mtThreadStack), "dup on stack record");
263 return ret;
264 }
265 };
266
267 class StagingArea : public _ValueObj {
268 private:
269 MemPointerArray* _malloc_data;
270 MemPointerArray* _vm_data;
271
272 public:
273 StagingArea() : _malloc_data(NULL), _vm_data(NULL) {
274 init();
275 }
276
277 ~StagingArea() {
278 if (_malloc_data != NULL) delete _malloc_data;
331 // merge a per-thread memory recorder into staging area
332 bool merge(MemRecorder* rec);
333 // promote staged data to snapshot
334 bool promote();
335
336
337 void wait(long timeout) {
338 assert(_lock != NULL, "Just check");
339 MonitorLockerEx locker(_lock);
340 locker.wait(true, timeout);
341 }
342
343 NOT_PRODUCT(void print_snapshot_stats(outputStream* st);)
344 NOT_PRODUCT(void check_staging_data();)
345 NOT_PRODUCT(void check_malloc_pointers();)
346 NOT_PRODUCT(bool has_allocation_record(address addr);)
347 // dump all virtual memory pointers in snapshot
348 DEBUG_ONLY( void dump_all_vm_pointers();)
349
350 private:
351 // copy pointer data from src to dest
352 void copy_pointer(MemPointerRecord* dest, const MemPointerRecord* src);
353
354 bool promote_malloc_records(MemPointerArrayIterator* itr);
355 bool promote_virtual_memory_records(MemPointerArrayIterator* itr);
356 };
357
358 #endif // SHARE_VM_SERVICES_MEM_SNAPSHOT_HPP
|
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_SERVICES_MEM_SNAPSHOT_HPP
26 #define SHARE_VM_SERVICES_MEM_SNAPSHOT_HPP
27
28 #include "memory/allocation.hpp"
29 #include "runtime/mutex.hpp"
30 #include "runtime/mutexLocker.hpp"
31 #include "services/memBaseline.hpp"
32 #include "services/memPtrArray.hpp"
33
34 // Snapshot pointer array iterator
35
36 // The pointer array contains malloc-ed pointers
37 class MemPointerIterator : public MemPointerArrayIteratorImpl {
38 public:
39 MemPointerIterator(MemPointerArray* arr):
40 MemPointerArrayIteratorImpl(arr) {
41 assert(arr != NULL, "null array");
42 }
43
44 #ifdef ASSERT
45 virtual bool is_dup_pointer(const MemPointer* ptr1,
46 const MemPointer* ptr2) const {
47 MemPointerRecord* p1 = (MemPointerRecord*)ptr1;
48 MemPointerRecord* p2 = (MemPointerRecord*)ptr2;
49
50 if (p1->addr() != p2->addr()) return false;
51 if ((p1->flags() & MemPointerRecord::tag_masks) !=
52 (p2->flags() & MemPointerRecord::tag_masks)) {
53 return false;
147 inline void reset() { _pos = 0; }
148 #ifdef ASSERT
149 virtual bool is_dup_pointer(const MemPointer* ptr1,
150 const MemPointer* ptr2) const {
151 VMMemRegion* p1 = (VMMemRegion*)ptr1;
152 VMMemRegion* p2 = (VMMemRegion*)ptr2;
153
154 if (p1->addr() != p2->addr()) return false;
155 if ((p1->flags() & MemPointerRecord::tag_masks) !=
156 (p2->flags() & MemPointerRecord::tag_masks)) {
157 return false;
158 }
159 // we do see multiple commit/uncommit on the same memory, it is ok
160 return (p1->flags() & MemPointerRecord::tag_masks) == MemPointerRecord::tag_alloc ||
161 (p1->flags() & MemPointerRecord::tag_masks) == MemPointerRecord::tag_release;
162 }
163 #endif
164 };
165
166 class MallocRecordIterator : public MemPointerArrayIterator {
167 private:
168 MemPointerArrayIteratorImpl _itr;
169
170
171
172 public:
173 MallocRecordIterator(MemPointerArray* arr) : _itr(arr) {
174 }
175
176 virtual MemPointer* current() const {
177 #ifdef ASSERT
178 MemPointer* cur_rec = _itr.current();
179 if (cur_rec != NULL) {
180 MemPointer* prev_rec = _itr.peek_prev();
181 MemPointer* next_rec = _itr.peek_next();
182 assert(prev_rec == NULL || prev_rec->addr() < cur_rec->addr(), "Sorting order");
183 assert(next_rec == NULL || next_rec->addr() > cur_rec->addr(), "Sorting order");
184 }
185 #endif
186 return _itr.current();
187 }
188 virtual MemPointer* next() {
189 MemPointerRecord* next_rec = (MemPointerRecord*)_itr.next();
190 // arena size record is a special case, which we have to compare
191 // sequence number against its associated arena record.
192 if (next_rec != NULL && next_rec->is_arena_size_record()) {
193 MemPointerRecord* prev_rec = (MemPointerRecord*)_itr.peek_prev();
194 // if there is an associated arena record, it has to be previous
195 // record because of sorting order
196 if (prev_rec != NULL && prev_rec->is_arena_record() &&
197 next_rec->is_size_record_of_arena(prev_rec)) {
198 if (prev_rec->seq() > next_rec->seq()) {
199 // Skip this arena size record
200 // Two scenarios:
201 // - if the arena record is an allocation record, this early
202 // size record must be leftover by previous arena,
203 // and the last size record should have size = 0.
204 // - if the arena record is a deallocation record, this
205 // size record should be its cleanup record, which should
206 // also have size = 0. In other world, arena alway reset
207 // its size before gone (see Arena's destructor)
208 assert(next_rec->size() == 0, "size not reset");
209 return _itr.next();
210 } else {
211 assert(prev_rec->is_allocation_record(),
212 "Arena size record ahead of allocation record");
213 }
214 }
215 }
216 return next_rec;
217 }
218
219 MemPointer* peek_next() const { ShouldNotReachHere(); return NULL; }
220 MemPointer* peek_prev() const { ShouldNotReachHere(); return NULL; }
221 void remove() { ShouldNotReachHere(); }
222 bool insert(MemPointer* ptr) { ShouldNotReachHere(); return false; }
223 bool insert_after(MemPointer* ptr) { ShouldNotReachHere(); return false; }
224 };
225
226 // collapse duplicated records. Eliminating duplicated records here, is much
227 // cheaper than during promotion phase. However, it does have limitation - it
228 // can only eliminate duplicated records within the generation, there are
229 // still chances seeing duplicated records during promotion.
230 // We want to use the record with higher sequence number, because it has
231 // more accurate callsite pc.
232 class VMRecordIterator : public MemPointerArrayIterator {
233 private:
234 MemPointerArrayIteratorImpl _itr;
235
236 public:
237 VMRecordIterator(MemPointerArray* arr) : _itr(arr) {
238 MemPointerRecord* cur = (MemPointerRecord*)_itr.current();
239 MemPointerRecord* next = (MemPointerRecord*)_itr.peek_next();
240 while (next != NULL) {
241 assert(cur != NULL, "Sanity check");
242 assert(((SeqMemPointerRecord*)next)->seq() > ((SeqMemPointerRecord*)cur)->seq(),
243 "pre-sort order");
244
245 if (is_duplicated_record(cur, next)) {
246 _itr.next();
247 next = (MemPointerRecord*)_itr.peek_next();
248 } else {
249 break;
250 }
251 }
252 }
253
254 virtual MemPointer* current() const {
255 return _itr.current();
256 }
257
258 // get next record, but skip the duplicated records
259 virtual MemPointer* next() {
260 MemPointerRecord* cur = (MemPointerRecord*)_itr.next();
261 MemPointerRecord* next = (MemPointerRecord*)_itr.peek_next();
262 while (next != NULL) {
263 assert(cur != NULL, "Sanity check");
264 assert(((SeqMemPointerRecord*)next)->seq() > ((SeqMemPointerRecord*)cur)->seq(),
265 "pre-sort order");
266
267 if (is_duplicated_record(cur, next)) {
268 _itr.next();
269 cur = next;
270 next = (MemPointerRecord*)_itr.peek_next();
271 } else {
272 break;
273 }
274 }
275 return cur;
276 }
277
278 MemPointer* peek_next() const { ShouldNotReachHere(); return NULL; }
279 MemPointer* peek_prev() const { ShouldNotReachHere(); return NULL; }
280 void remove() { ShouldNotReachHere(); }
281 bool insert(MemPointer* ptr) { ShouldNotReachHere(); return false; }
282 bool insert_after(MemPointer* ptr) { ShouldNotReachHere(); return false; }
283
284 private:
285 bool is_duplicated_record(MemPointerRecord* p1, MemPointerRecord* p2) const {
286 bool ret = (p1->addr() == p2->addr() && p1->size() == p2->size() && p1->flags() == p2->flags());
287 assert(!(ret && FLAGS_TO_MEMORY_TYPE(p1->flags()) == mtThreadStack), "dup on stack record");
288 return ret;
289 }
290 };
291
292 class StagingArea : public _ValueObj {
293 private:
294 MemPointerArray* _malloc_data;
295 MemPointerArray* _vm_data;
296
297 public:
298 StagingArea() : _malloc_data(NULL), _vm_data(NULL) {
299 init();
300 }
301
302 ~StagingArea() {
303 if (_malloc_data != NULL) delete _malloc_data;
356 // merge a per-thread memory recorder into staging area
357 bool merge(MemRecorder* rec);
358 // promote staged data to snapshot
359 bool promote();
360
361
362 void wait(long timeout) {
363 assert(_lock != NULL, "Just check");
364 MonitorLockerEx locker(_lock);
365 locker.wait(true, timeout);
366 }
367
368 NOT_PRODUCT(void print_snapshot_stats(outputStream* st);)
369 NOT_PRODUCT(void check_staging_data();)
370 NOT_PRODUCT(void check_malloc_pointers();)
371 NOT_PRODUCT(bool has_allocation_record(address addr);)
372 // dump all virtual memory pointers in snapshot
373 DEBUG_ONLY( void dump_all_vm_pointers();)
374
375 private:
376 // copy sequenced pointer from src to dest
377 void copy_seq_pointer(MemPointerRecord* dest, const MemPointerRecord* src);
378 // assign a sequenced pointer to non-sequenced pointer
379 void assign_pointer(MemPointerRecord*dest, const MemPointerRecord* src);
380
381 bool promote_malloc_records(MemPointerArrayIterator* itr);
382 bool promote_virtual_memory_records(MemPointerArrayIterator* itr);
383 };
384
385 #endif // SHARE_VM_SERVICES_MEM_SNAPSHOT_HPP
|