106 } 107 return insert(&new_rec); 108 } 109 110 bool VMMemPointerIterator::insert_record_after(MemPointerRecord* rec) { 111 VMMemRegionEx new_rec; 112 assert(rec->is_allocation_record() || rec->is_commit_record(), 113 "Sanity check"); 114 if (MemTracker::track_callsite()) { 115 new_rec.init((MemPointerRecordEx*)rec); 116 } else { 117 new_rec.init(rec); 118 } 119 return insert_after(&new_rec); 120 } 121 122 // we don't consolidate reserved regions, since they may be categorized 123 // in different types. 124 bool VMMemPointerIterator::add_reserved_region(MemPointerRecord* rec) { 125 assert(rec->is_allocation_record(), "Sanity check"); 126 VMMemRegion* cur = (VMMemRegion*)current(); 127 128 // we don't have anything yet 129 if (cur == NULL) { 130 return insert_record(rec); 131 } 132 133 assert(cur->is_reserved_region(), "Sanity check"); 134 // duplicated records 135 if (cur->is_same_region(rec)) { 136 return true; 137 } 138 assert(cur->base() > rec->addr(), "Just check: locate()"); 139 assert(!cur->overlaps_region(rec), "overlapping reserved regions"); 140 return insert_record(rec); 141 } 142 143 // we do consolidate committed regions 144 bool VMMemPointerIterator::add_committed_region(MemPointerRecord* rec) { 145 assert(rec->is_commit_record(), "Sanity check"); 146 VMMemRegion* reserved_rgn = (VMMemRegion*)current(); 147 assert(reserved_rgn->is_reserved_region() && reserved_rgn->contains_region(rec), 148 "Sanity check"); 149 150 // thread's native stack is always marked as "committed", ignore 151 // the "commit" operation for creating stack guard pages 152 if (FLAGS_TO_MEMORY_TYPE(reserved_rgn->flags()) == mtThreadStack && 153 FLAGS_TO_MEMORY_TYPE(rec->flags()) != mtThreadStack) { 154 return true; 155 } 156 157 // if the reserved region has any committed regions 158 VMMemRegion* committed_rgn = (VMMemRegion*)next(); 159 while (committed_rgn != NULL && committed_rgn->is_committed_region()) { | 106 } 107 return insert(&new_rec); 108 } 109 110 bool VMMemPointerIterator::insert_record_after(MemPointerRecord* rec) { 111 VMMemRegionEx new_rec; 112 assert(rec->is_allocation_record() || rec->is_commit_record(), 113 "Sanity check"); 114 if (MemTracker::track_callsite()) { 115 new_rec.init((MemPointerRecordEx*)rec); 116 } else { 117 new_rec.init(rec); 118 } 119 return insert_after(&new_rec); 120 } 121 122 // we don't consolidate reserved regions, since they may be categorized 123 // in different types. 124 bool VMMemPointerIterator::add_reserved_region(MemPointerRecord* rec) { 125 assert(rec->is_allocation_record(), "Sanity check"); 126 VMMemRegion* reserved_rgn = (VMMemRegion*)current(); 127 128 // we don't have anything yet 129 if (reserved_rgn == NULL) { 130 return insert_record(rec); 131 } 132 133 assert(reserved_rgn->is_reserved_region(), "Sanity check"); 134 // duplicated records 135 if (reserved_rgn->is_same_region(rec)) { 136 return true; 137 } 138 // an attached JNI thread can exit without detaching the thread, that results 139 // JVM to leak JavaThread object (JDK-8001743) 140 if (CheckJNICalls) { 141 guarantee(FLAGS_TO_MEMORY_TYPE(reserved_rgn->flags()) != mtThreadStack || 142 !reserved_rgn->overlaps_region(rec), 143 "Attached JNI thread exited without being detached"); 144 } 145 // otherwise, we should not have overlapping reserved regions 146 assert(FLAGS_TO_MEMORY_TYPE(reserved_rgn->flags()) == mtThreadStack || 147 reserved_rgn->base() > rec->addr(), "Just check: locate()"); 148 assert(FLAGS_TO_MEMORY_TYPE(reserved_rgn->flags()) == mtThreadStack || 149 !reserved_rgn->overlaps_region(rec), "overlapping reserved regions"); 150 151 return insert_record(rec); 152 } 153 154 // we do consolidate committed regions 155 bool VMMemPointerIterator::add_committed_region(MemPointerRecord* rec) { 156 assert(rec->is_commit_record(), "Sanity check"); 157 VMMemRegion* reserved_rgn = (VMMemRegion*)current(); 158 assert(reserved_rgn->is_reserved_region() && reserved_rgn->contains_region(rec), 159 "Sanity check"); 160 161 // thread's native stack is always marked as "committed", ignore 162 // the "commit" operation for creating stack guard pages 163 if (FLAGS_TO_MEMORY_TYPE(reserved_rgn->flags()) == mtThreadStack && 164 FLAGS_TO_MEMORY_TYPE(rec->flags()) != mtThreadStack) { 165 return true; 166 } 167 168 // if the reserved region has any committed regions 169 VMMemRegion* committed_rgn = (VMMemRegion*)next(); 170 while (committed_rgn != NULL && committed_rgn->is_committed_region()) { |