src/share/vm/services/memSnapshot.hpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File hotspot Cdiff src/share/vm/services/memSnapshot.hpp

src/share/vm/services/memSnapshot.hpp

Print this page

        

*** 29,39 **** #include "runtime/mutex.hpp" #include "runtime/mutexLocker.hpp" #include "services/memBaseline.hpp" #include "services/memPtrArray.hpp" - // Snapshot pointer array iterator // The pointer array contains malloc-ed pointers class MemPointerIterator : public MemPointerArrayIteratorImpl { public: --- 29,38 ----
*** 163,205 **** } #endif }; class MallocRecordIterator : public MemPointerArrayIterator { ! protected: MemPointerArrayIteratorImpl _itr; public: MallocRecordIterator(MemPointerArray* arr) : _itr(arr) { } virtual MemPointer* current() const { ! MemPointerRecord* cur = (MemPointerRecord*)_itr.current(); ! assert(cur == NULL || !cur->is_vm_pointer(), "seek error"); ! MemPointerRecord* next = (MemPointerRecord*)_itr.peek_next(); ! if (next == NULL || next->addr() != cur->addr()) { ! return cur; ! } else { ! assert(!cur->is_vm_pointer(), "Sanity check"); ! assert(cur->is_allocation_record() && next->is_deallocation_record(), ! "sorting order"); ! assert(cur->seq() != next->seq(), "Sanity check"); ! return cur->seq() > next->seq() ? cur : next; } } - virtual MemPointer* next() { ! MemPointerRecord* cur = (MemPointerRecord*)_itr.current(); ! assert(cur == NULL || !cur->is_vm_pointer(), "Sanity check"); ! MemPointerRecord* next = (MemPointerRecord*)_itr.next(); ! if (next == NULL) { ! return NULL; } - if (cur->addr() == next->addr()) { - next = (MemPointerRecord*)_itr.next(); } ! return current(); } MemPointer* peek_next() const { ShouldNotReachHere(); return NULL; } MemPointer* peek_prev() const { ShouldNotReachHere(); return NULL; } void remove() { ShouldNotReachHere(); } --- 162,221 ---- } #endif }; class MallocRecordIterator : public MemPointerArrayIterator { ! private: MemPointerArrayIteratorImpl _itr; + + public: MallocRecordIterator(MemPointerArray* arr) : _itr(arr) { } virtual MemPointer* current() const { ! #ifdef ASSERT ! MemPointer* cur_rec = _itr.current(); ! if (cur_rec != NULL) { ! MemPointer* prev_rec = _itr.peek_prev(); ! MemPointer* next_rec = _itr.peek_next(); ! assert(prev_rec == NULL || prev_rec->addr() < cur_rec->addr(), "Sorting order"); ! assert(next_rec == NULL || next_rec->addr() > cur_rec->addr(), "Sorting order"); } + #endif + return _itr.current(); } virtual MemPointer* next() { ! MemPointerRecord* next_rec = (MemPointerRecord*)_itr.next(); ! // arena size record is a special case, which we have to compare ! // sequence number against its associated arena record. ! if (next_rec != NULL && next_rec->is_arena_size_record()) { ! MemPointerRecord* prev_rec = (MemPointerRecord*)_itr.peek_prev(); ! // if there is an associated arena record, it has to be previous ! // record because of sorting order ! if (prev_rec != NULL && prev_rec->is_arena_record() && ! next_rec->is_size_record_of_arena(prev_rec)) { ! if (prev_rec->seq() > next_rec->seq()) { ! // Skip this arena size record ! // Two scenarios: ! // - if the arena record is an allocation record, this early ! // size record must be leftover by previous arena, ! // and the last size record should have size = 0. ! // - if the arena record is a deallocation record, this ! // size record should be its cleanup record, which should ! // also have size = 0. In other world, arena alway reset ! // its size before gone (see Arena's destructor) ! assert(next_rec->size() == 0, "size not reset"); ! return _itr.next(); ! } else { ! assert(prev_rec->is_allocation_record(), ! "Arena size record ahead of allocation record"); ! } } } ! return next_rec; } MemPointer* peek_next() const { ShouldNotReachHere(); return NULL; } MemPointer* peek_prev() const { ShouldNotReachHere(); return NULL; } void remove() { ShouldNotReachHere(); }
*** 211,223 **** // cheaper than during promotion phase. However, it does have limitation - it // can only eliminate duplicated records within the generation, there are // still chances seeing duplicated records during promotion. // We want to use the record with higher sequence number, because it has // more accurate callsite pc. ! class VMRecordIterator : public MallocRecordIterator { public: ! VMRecordIterator(MemPointerArray* arr) : MallocRecordIterator(arr) { MemPointerRecord* cur = (MemPointerRecord*)_itr.current(); MemPointerRecord* next = (MemPointerRecord*)_itr.peek_next(); while (next != NULL) { assert(cur != NULL, "Sanity check"); assert(((SeqMemPointerRecord*)next)->seq() > ((SeqMemPointerRecord*)cur)->seq(), --- 227,242 ---- // cheaper than during promotion phase. However, it does have limitation - it // can only eliminate duplicated records within the generation, there are // still chances seeing duplicated records during promotion. // We want to use the record with higher sequence number, because it has // more accurate callsite pc. ! class VMRecordIterator : public MemPointerArrayIterator { ! private: ! MemPointerArrayIteratorImpl _itr; ! public: ! VMRecordIterator(MemPointerArray* arr) : _itr(arr) { MemPointerRecord* cur = (MemPointerRecord*)_itr.current(); MemPointerRecord* next = (MemPointerRecord*)_itr.peek_next(); while (next != NULL) { assert(cur != NULL, "Sanity check"); assert(((SeqMemPointerRecord*)next)->seq() > ((SeqMemPointerRecord*)cur)->seq(),
*** 254,263 **** --- 273,288 ---- } } return cur; } + MemPointer* peek_next() const { ShouldNotReachHere(); return NULL; } + MemPointer* peek_prev() const { ShouldNotReachHere(); return NULL; } + void remove() { ShouldNotReachHere(); } + bool insert(MemPointer* ptr) { ShouldNotReachHere(); return false; } + bool insert_after(MemPointer* ptr) { ShouldNotReachHere(); return false; } + private: bool is_duplicated_record(MemPointerRecord* p1, MemPointerRecord* p2) const { bool ret = (p1->addr() == p2->addr() && p1->size() == p2->size() && p1->flags() == p2->flags()); assert(!(ret && FLAGS_TO_MEMORY_TYPE(p1->flags()) == mtThreadStack), "dup on stack record"); return ret;
*** 346,357 **** NOT_PRODUCT(bool has_allocation_record(address addr);) // dump all virtual memory pointers in snapshot DEBUG_ONLY( void dump_all_vm_pointers();) private: ! // copy pointer data from src to dest ! void copy_pointer(MemPointerRecord* dest, const MemPointerRecord* src); bool promote_malloc_records(MemPointerArrayIterator* itr); bool promote_virtual_memory_records(MemPointerArrayIterator* itr); }; --- 371,384 ---- NOT_PRODUCT(bool has_allocation_record(address addr);) // dump all virtual memory pointers in snapshot DEBUG_ONLY( void dump_all_vm_pointers();) private: ! // copy sequenced pointer from src to dest ! void copy_seq_pointer(MemPointerRecord* dest, const MemPointerRecord* src); ! // assign a sequenced pointer to non-sequenced pointer ! void assign_pointer(MemPointerRecord*dest, const MemPointerRecord* src); bool promote_malloc_records(MemPointerArrayIterator* itr); bool promote_virtual_memory_records(MemPointerArrayIterator* itr); };
src/share/vm/services/memSnapshot.hpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File