127 // Release the Heap_lock first. 128 SharedHeap* sh = SharedHeap::heap(); 129 if (sh != NULL) sh->_thread_holds_heap_lock_for_gc = false; 130 Heap_lock->unlock(); 131 release_and_notify_pending_list_lock(); 132 } 133 134 bool VM_GC_HeapInspection::doit_prologue() { 135 if (Universe::heap()->supports_heap_inspection()) { 136 return VM_GC_Operation::doit_prologue(); 137 } else { 138 return false; 139 } 140 } 141 142 bool VM_GC_HeapInspection::skip_operation() const { 143 assert(Universe::heap()->supports_heap_inspection(), "huh?"); 144 return false; 145 } 146 147 void VM_GC_HeapInspection::doit() { 148 HandleMark hm; 149 CollectedHeap* ch = Universe::heap(); 150 ch->ensure_parsability(false); // must happen, even if collection does 151 // not happen (e.g. due to GC_locker) 152 if (_full_gc) { 153 // The collection attempt below would be skipped anyway if 154 // the gc locker is held. The following dump may then be a tad 155 // misleading to someone expecting only live objects to show 156 // up in the dump (see CR 6944195). Just issue a suitable warning 157 // in that case and do not attempt to do a collection. 158 // The latter is a subtle point, because even a failed attempt 159 // to GC will, in fact, induce one in the future, which we 160 // probably want to avoid in this case because the GC that we may 161 // be about to attempt holds value for us only 162 // if it happens now and not if it happens in the eventual 163 // future. 164 if (GC_locker::is_active()) { 165 warning("GC locker is held; pre-dump GC was skipped"); 166 } else { 167 ch->collect_as_vm_thread(GCCause::_heap_inspection); 168 } 169 } 170 HeapInspection::heap_inspection(_out, _need_prologue /* need_prologue */); 171 } 172 173 174 void VM_GenCollectForAllocation::doit() { 175 SvcGCMarker sgcm(SvcGCMarker::MINOR); 176 177 GenCollectedHeap* gch = GenCollectedHeap::heap(); 178 GCCauseSetter gccs(gch, _gc_cause); 179 _res = gch->satisfy_failed_allocation(_size, _tlab); 180 assert(gch->is_in_reserved_or_null(_res), "result not in heap"); 181 182 if (_res == NULL && GC_locker::is_active_and_needs_gc()) { 183 set_gc_locked(); 184 } 185 } 186 187 void VM_GenCollectFull::doit() { | 127 // Release the Heap_lock first. 128 SharedHeap* sh = SharedHeap::heap(); 129 if (sh != NULL) sh->_thread_holds_heap_lock_for_gc = false; 130 Heap_lock->unlock(); 131 release_and_notify_pending_list_lock(); 132 } 133 134 bool VM_GC_HeapInspection::doit_prologue() { 135 if (Universe::heap()->supports_heap_inspection()) { 136 return VM_GC_Operation::doit_prologue(); 137 } else { 138 return false; 139 } 140 } 141 142 bool VM_GC_HeapInspection::skip_operation() const { 143 assert(Universe::heap()->supports_heap_inspection(), "huh?"); 144 return false; 145 } 146 147 bool VM_GC_HeapInspection::collect() { 148 CollectedHeap* ch = Universe::heap(); 149 ch->ensure_parsability(false); // must happen, even if collection does 150 // not happen (e.g. due to GC_locker) 151 152 // The collection attempt below would be skipped anyway if 153 // the gc locker is held. The following dump may then be a tad 154 // misleading to someone expecting only live objects to show 155 // up in the dump (see CR 6944195). Just issue a suitable warning 156 // in that case and do not attempt to do a collection. 157 // The latter is a subtle point, because even a failed attempt 158 // to GC will, in fact, induce one in the future, which we 159 // probably want to avoid in this case because the GC that we may 160 // be about to attempt holds value for us only 161 // if it happens now and not if it happens in the eventual 162 // future. 163 if (GC_locker::is_active()) { 164 return false; 165 } 166 ch->collect_as_vm_thread(GCCause::_heap_inspection); 167 return true; 168 } 169 170 void VM_GC_HeapInspection::doit() { 171 HandleMark hm; 172 if (_full_gc) { 173 if (!collect()) { 174 warning("GC locker is held; pre-dump GC was skipped"); 175 } 176 } 177 HeapInspection::heap_inspection(_out, _need_prologue /* need_prologue */); 178 } 179 180 181 void VM_GenCollectForAllocation::doit() { 182 SvcGCMarker sgcm(SvcGCMarker::MINOR); 183 184 GenCollectedHeap* gch = GenCollectedHeap::heap(); 185 GCCauseSetter gccs(gch, _gc_cause); 186 _res = gch->satisfy_failed_allocation(_size, _tlab); 187 assert(gch->is_in_reserved_or_null(_res), "result not in heap"); 188 189 if (_res == NULL && GC_locker::is_active_and_needs_gc()) { 190 set_gc_locked(); 191 } 192 } 193 194 void VM_GenCollectFull::doit() { |