128 // Release the Heap_lock first.
129 SharedHeap* sh = SharedHeap::heap();
130 if (sh != NULL) sh->_thread_holds_heap_lock_for_gc = false;
131 Heap_lock->unlock();
132 release_and_notify_pending_list_lock();
133 }
134
135 bool VM_GC_HeapInspection::doit_prologue() {
136 if (Universe::heap()->supports_heap_inspection()) {
137 return VM_GC_Operation::doit_prologue();
138 } else {
139 return false;
140 }
141 }
142
143 bool VM_GC_HeapInspection::skip_operation() const {
144 assert(Universe::heap()->supports_heap_inspection(), "huh?");
145 return false;
146 }
147
148 void VM_GC_HeapInspection::doit() {
149 HandleMark hm;
150 CollectedHeap* ch = Universe::heap();
151 ch->ensure_parsability(false); // must happen, even if collection does
152 // not happen (e.g. due to GC_locker)
153 if (_full_gc) {
154 // The collection attempt below would be skipped anyway if
155 // the gc locker is held. The following dump may then be a tad
156 // misleading to someone expecting only live objects to show
157 // up in the dump (see CR 6944195). Just issue a suitable warning
158 // in that case and do not attempt to do a collection.
159 // The latter is a subtle point, because even a failed attempt
160 // to GC will, in fact, induce one in the future, which we
161 // probably want to avoid in this case because the GC that we may
162 // be about to attempt holds value for us only
163 // if it happens now and not if it happens in the eventual
164 // future.
165 if (GC_locker::is_active()) {
166 warning("GC locker is held; pre-dump GC was skipped");
167 } else {
168 ch->collect_as_vm_thread(GCCause::_heap_inspection);
169 }
170 }
171 HeapInspection inspect(_csv_format, _print_help, _print_class_stats,
172 _columns);
173 inspect.heap_inspection(_out, _need_prologue /* need_prologue */);
174 }
175
176
177 void VM_GenCollectForAllocation::doit() {
178 SvcGCMarker sgcm(SvcGCMarker::MINOR);
179
180 GenCollectedHeap* gch = GenCollectedHeap::heap();
181 GCCauseSetter gccs(gch, _gc_cause);
182 _res = gch->satisfy_failed_allocation(_size, _tlab);
183 assert(gch->is_in_reserved_or_null(_res), "result not in heap");
184
185 if (_res == NULL && GC_locker::is_active_and_needs_gc()) {
186 set_gc_locked();
187 }
188 }
189
190 void VM_GenCollectFull::doit() {
191 SvcGCMarker sgcm(SvcGCMarker::FULL);
192
193 GenCollectedHeap* gch = GenCollectedHeap::heap();
|
128 // Release the Heap_lock first.
129 SharedHeap* sh = SharedHeap::heap();
130 if (sh != NULL) sh->_thread_holds_heap_lock_for_gc = false;
131 Heap_lock->unlock();
132 release_and_notify_pending_list_lock();
133 }
134
135 bool VM_GC_HeapInspection::doit_prologue() {
136 if (Universe::heap()->supports_heap_inspection()) {
137 return VM_GC_Operation::doit_prologue();
138 } else {
139 return false;
140 }
141 }
142
143 bool VM_GC_HeapInspection::skip_operation() const {
144 assert(Universe::heap()->supports_heap_inspection(), "huh?");
145 return false;
146 }
147
148 bool VM_GC_HeapInspection::collect() {
149 if (GC_locker::is_active()) {
150 return false;
151 }
152 Universe::heap()->collect_as_vm_thread(GCCause::_heap_inspection);
153 return true;
154 }
155
156 void VM_GC_HeapInspection::doit() {
157 HandleMark hm;
158 Universe::heap()->ensure_parsability(false); // must happen, even if collection does
159 // not happen (e.g. due to GC_locker)
160 // or _full_gc being false
161 if (_full_gc) {
162 if (!collect()) {
163 // The collection attempt was skipped because the gc locker is held.
164 // The following dump may then be a tad misleading to someone expecting
165 // only live objects to show up in the dump (see CR 6944195). Just issue
166 // a suitable warning in that case and do not attempt to do a collection.
167 // The latter is a subtle point, because even a failed attempt
168 // to GC will, in fact, induce one in the future, which we
169 // probably want to avoid in this case because the GC that we may
170 // be about to attempt holds value for us only
171 // if it happens now and not if it happens in the eventual
172 // future.
173 warning("GC locker is held; pre-dump GC was skipped");
174 }
175 }
176 HeapInspection inspect(_csv_format, _print_help, _print_class_stats,
177 _columns);
178 inspect.heap_inspection(_out);
179 }
180
181
182 void VM_GenCollectForAllocation::doit() {
183 SvcGCMarker sgcm(SvcGCMarker::MINOR);
184
185 GenCollectedHeap* gch = GenCollectedHeap::heap();
186 GCCauseSetter gccs(gch, _gc_cause);
187 _res = gch->satisfy_failed_allocation(_size, _tlab);
188 assert(gch->is_in_reserved_or_null(_res), "result not in heap");
189
190 if (_res == NULL && GC_locker::is_active_and_needs_gc()) {
191 set_gc_locked();
192 }
193 }
194
195 void VM_GenCollectFull::doit() {
196 SvcGCMarker sgcm(SvcGCMarker::FULL);
197
198 GenCollectedHeap* gch = GenCollectedHeap::heap();
|