1 #ifdef USE_PRAGMA_IDENT_HDR 2 #pragma ident "@(#)vmGCOperations.cpp 1.21 07/05/29 09:44:12 JVM" 3 #endif 4 /* 5 * Copyright 2005-2008 Sun Microsystems, Inc. All Rights Reserved. 6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 7 * 8 * This code is free software; you can redistribute it and/or modify it 9 * under the terms of the GNU General Public License version 2 only, as 10 * published by the Free Software Foundation. 11 * 12 * This code is distributed in the hope that it will be useful, but WITHOUT 13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 15 * version 2 for more details (a copy is included in the LICENSE file that 16 * accompanied this code). 17 * 18 * You should have received a copy of the GNU General Public License version 19 * 2 along with this work; if not, write to the Free Software Foundation, 20 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 21 * 22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, 23 * CA 95054 USA or visit www.sun.com if you need additional information or 24 * have any questions. 25 * 26 */ 27 # include "incls/_precompiled.incl" 28 # include "incls/_vmGCOperations.cpp.incl" 29 30 HS_DTRACE_PROBE_DECL1(hotspot, gc__begin, bool); 31 HS_DTRACE_PROBE_DECL(hotspot, gc__end); 32 33 // The same dtrace probe can't be inserted in two different files, so we 34 // have to call it here, so it's only in one file. Can't create new probes 35 // for the other file anymore. The dtrace probes have to remain stable. 36 void VM_GC_Operation::notify_gc_begin(bool full) { 37 HS_DTRACE_PROBE1(hotspot, gc__begin, full); 38 } 39 40 void VM_GC_Operation::notify_gc_end() { 41 HS_DTRACE_PROBE(hotspot, gc__end); 42 } 43 44 void VM_GC_Operation::acquire_pending_list_lock() { 45 // we may enter this with pending exception set 46 instanceRefKlass::acquire_pending_list_lock(&_pending_list_basic_lock); 47 } 48 49 50 void VM_GC_Operation::release_and_notify_pending_list_lock() { 51 52 instanceRefKlass::release_and_notify_pending_list_lock(&_pending_list_basic_lock); 53 } 54 55 // Allocations may fail in several threads at about the same time, 56 // resulting in multiple gc requests. We only want to do one of them. 57 // In case a GC locker is active and the need for a GC is already signalled, 58 // we want to skip this GC attempt altogether, without doing a futile 59 // safepoint operation. 60 bool VM_GC_Operation::skip_operation() const { 61 bool skip = (_gc_count_before != Universe::heap()->total_collections()); 62 if (_full && skip) { 63 skip = (_full_gc_count_before != Universe::heap()->total_full_collections()); 64 } 65 if (!skip && GC_locker::is_active_and_needs_gc()) { 66 skip = Universe::heap()->is_maximal_no_gc(); 67 assert(!(skip && (_gc_cause == GCCause::_gc_locker)), 68 "GC_locker cannot be active when initiating GC"); 69 } 70 return skip; 71 } 72 73 bool VM_GC_Operation::doit_prologue() { 74 assert(Thread::current()->is_Java_thread(), "just checking"); 75 76 acquire_pending_list_lock(); 77 // If the GC count has changed someone beat us to the collection 78 // Get the Heap_lock after the pending_list_lock. 79 Heap_lock->lock(); 80 81 // Check invocations 82 if (skip_operation()) { 83 // skip collection 84 Heap_lock->unlock(); 85 release_and_notify_pending_list_lock(); 86 _prologue_succeeded = false; 87 } else { 88 _prologue_succeeded = true; 89 SharedHeap* sh = SharedHeap::heap(); 90 if (sh != NULL) sh->_thread_holds_heap_lock_for_gc = true; 91 } 92 return _prologue_succeeded; 93 } 94 95 96 void VM_GC_Operation::doit_epilogue() { 97 assert(Thread::current()->is_Java_thread(), "just checking"); 98 // Release the Heap_lock first. 99 SharedHeap* sh = SharedHeap::heap(); 100 if (sh != NULL) sh->_thread_holds_heap_lock_for_gc = false; 101 Heap_lock->unlock(); 102 release_and_notify_pending_list_lock(); 103 } 104 105 bool VM_GC_HeapInspection::doit_prologue() { 106 if (Universe::heap()->supports_heap_inspection()) { 107 return VM_GC_Operation::doit_prologue(); 108 } else { 109 return false; 110 } 111 } 112 113 bool VM_GC_HeapInspection::skip_operation() const { 114 assert(Universe::heap()->supports_heap_inspection(), "huh?"); 115 return false; 116 } 117 118 void VM_GC_HeapInspection::doit() { 119 HandleMark hm; 120 CollectedHeap* ch = Universe::heap(); 121 if (_full_gc) { 122 ch->collect_as_vm_thread(GCCause::_heap_inspection); 123 } else { 124 // make the heap parsable (no need to retire TLABs) 125 ch->ensure_parsability(false); 126 } 127 HeapInspection::heap_inspection(_out); 128 } 129 130 131 void VM_GenCollectForAllocation::doit() { 132 JvmtiGCForAllocationMarker jgcm; 133 notify_gc_begin(false); 134 135 GenCollectedHeap* gch = GenCollectedHeap::heap(); 136 GCCauseSetter gccs(gch, _gc_cause); 137 _res = gch->satisfy_failed_allocation(_size, _tlab); 138 assert(gch->is_in_reserved_or_null(_res), "result not in heap"); 139 140 if (_res == NULL && GC_locker::is_active_and_needs_gc()) { 141 set_gc_locked(); 142 } 143 notify_gc_end(); 144 } 145 146 void VM_GenCollectFull::doit() { 147 JvmtiGCFullMarker jgcm; 148 notify_gc_begin(true); 149 150 GenCollectedHeap* gch = GenCollectedHeap::heap(); 151 GCCauseSetter gccs(gch, _gc_cause); 152 gch->do_full_collection(gch->must_clear_all_soft_refs(), _max_level); 153 notify_gc_end(); 154 } 155 156 void VM_GenCollectForPermanentAllocation::doit() { 157 JvmtiGCForAllocationMarker jgcm; 158 notify_gc_begin(true); 159 SharedHeap* heap = (SharedHeap*)Universe::heap(); 160 GCCauseSetter gccs(heap, _gc_cause); 161 switch (heap->kind()) { 162 case (CollectedHeap::GenCollectedHeap): { 163 GenCollectedHeap* gch = (GenCollectedHeap*)heap; 164 gch->do_full_collection(gch->must_clear_all_soft_refs(), 165 gch->n_gens() - 1); 166 break; 167 } 168 #ifndef SERIALGC 169 case (CollectedHeap::G1CollectedHeap): { 170 G1CollectedHeap* g1h = (G1CollectedHeap*)heap; 171 g1h->do_full_collection(_gc_cause == GCCause::_last_ditch_collection); 172 break; 173 } 174 #endif // SERIALGC 175 default: 176 ShouldNotReachHere(); 177 } 178 _res = heap->perm_gen()->allocate(_size, false); 179 assert(heap->is_in_reserved_or_null(_res), "result not in heap"); 180 if (_res == NULL && GC_locker::is_active_and_needs_gc()) { 181 set_gc_locked(); 182 } 183 notify_gc_end(); 184 }