1 /* 2 * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/shared/collectedHeap.hpp" 27 #include "gc/shared/collectedHeap.inline.hpp" 28 #include "logging/log.hpp" 29 #include "memory/resourceArea.hpp" 30 #include "memory/universe.hpp" 31 #include "runtime/atomic.hpp" 32 #include "runtime/frame.inline.hpp" 33 #include "runtime/deoptimization.hpp" 34 #include "runtime/handles.inline.hpp" 35 #include "runtime/init.hpp" 36 #include "runtime/interfaceSupport.inline.hpp" 37 #include "runtime/orderAccess.hpp" 38 #include "runtime/os.inline.hpp" 39 #include "runtime/thread.inline.hpp" 40 #include "runtime/safepointVerifiers.hpp" 41 #include "runtime/vframe.hpp" 42 #include "runtime/vmThread.hpp" 43 #include "utilities/preserveException.hpp" 44 45 // Implementation of InterfaceSupport 46 47 #ifdef ASSERT 48 VMEntryWrapper::VMEntryWrapper() { 49 if (VerifyLastFrame) { 50 InterfaceSupport::verify_last_frame(); 51 } 52 } 53 54 VMEntryWrapper::~VMEntryWrapper() { 55 InterfaceSupport::check_gc_alot(); 56 if (WalkStackALot) { 57 InterfaceSupport::walk_stack(); 58 } 59 #ifdef COMPILER2 60 // This option is not used by Compiler 1 61 if (StressDerivedPointers) { 62 InterfaceSupport::stress_derived_pointers(); 63 } 64 #endif 65 if (DeoptimizeALot || DeoptimizeRandom) { 66 InterfaceSupport::deoptimizeAll(); 67 } 68 if (ZombieALot) { 69 InterfaceSupport::zombieAll(); 70 } 71 if (DeoptimizeObjectsALot == 1) { 72 InterfaceSupport::deoptimizeAllObjects(); 73 } 74 // do verification AFTER potential deoptimization 75 if (VerifyStack) { 76 InterfaceSupport::verify_stack(); 77 } 78 } 79 80 VMNativeEntryWrapper::VMNativeEntryWrapper() { 81 if (GCALotAtAllSafepoints) InterfaceSupport::check_gc_alot(); 82 } 83 84 VMNativeEntryWrapper::~VMNativeEntryWrapper() { 85 if (GCALotAtAllSafepoints) InterfaceSupport::check_gc_alot(); 86 } 87 88 long InterfaceSupport::_number_of_calls = 0; 89 long InterfaceSupport::_scavenge_alot_counter = 1; 90 long InterfaceSupport::_fullgc_alot_counter = 1; 91 long InterfaceSupport::_fullgc_alot_invocation = 0; 92 93 Histogram* RuntimeHistogram; 94 95 RuntimeHistogramElement::RuntimeHistogramElement(const char* elementName) { 96 static volatile int RuntimeHistogram_lock = 0; 97 _name = elementName; 98 uintx count = 0; 99 100 while (Atomic::cmpxchg(1, &RuntimeHistogram_lock, 0) != 0) { 101 while (OrderAccess::load_acquire(&RuntimeHistogram_lock) != 0) { 102 count +=1; 103 if ( (WarnOnStalledSpinLock > 0) 104 && (count % WarnOnStalledSpinLock == 0)) { 105 warning("RuntimeHistogram_lock seems to be stalled"); 106 } 107 } 108 } 109 110 if (RuntimeHistogram == NULL) { 111 RuntimeHistogram = new Histogram("VM Runtime Call Counts",200); 112 } 113 114 RuntimeHistogram->add_element(this); 115 Atomic::dec(&RuntimeHistogram_lock); 116 } 117 118 void InterfaceSupport::gc_alot() { 119 Thread *thread = Thread::current(); 120 if (!thread->is_Java_thread()) return; // Avoid concurrent calls 121 // Check for new, not quite initialized thread. A thread in new mode cannot initiate a GC. 122 JavaThread *current_thread = (JavaThread *)thread; 123 if (current_thread->active_handles() == NULL) return; 124 125 // Short-circuit any possible re-entrant gc-a-lot attempt 126 if (thread->skip_gcalot()) return; 127 128 if (Threads::is_vm_complete()) { 129 130 if (++_fullgc_alot_invocation < FullGCALotStart) { 131 return; 132 } 133 134 // Use this line if you want to block at a specific point, 135 // e.g. one number_of_calls/scavenge/gc before you got into problems 136 if (FullGCALot) _fullgc_alot_counter--; 137 138 // Check if we should force a full gc 139 if (_fullgc_alot_counter == 0) { 140 // Release dummy so objects are forced to move 141 if (!Universe::release_fullgc_alot_dummy()) { 142 warning("FullGCALot: Unable to release more dummies at bottom of heap"); 143 } 144 HandleMark hm(thread); 145 Universe::heap()->collect(GCCause::_full_gc_alot); 146 unsigned int invocations = Universe::heap()->total_full_collections(); 147 // Compute new interval 148 if (FullGCALotInterval > 1) { 149 _fullgc_alot_counter = 1+(long)((double)FullGCALotInterval*os::random()/(max_jint+1.0)); 150 log_trace(gc)("Full gc no: %u\tInterval: %ld", invocations, _fullgc_alot_counter); 151 } else { 152 _fullgc_alot_counter = 1; 153 } 154 // Print progress message 155 if (invocations % 100 == 0) { 156 log_trace(gc)("Full gc no: %u", invocations); 157 } 158 } else { 159 if (ScavengeALot) _scavenge_alot_counter--; 160 // Check if we should force a scavenge 161 if (_scavenge_alot_counter == 0) { 162 HandleMark hm(thread); 163 Universe::heap()->collect(GCCause::_scavenge_alot); 164 unsigned int invocations = Universe::heap()->total_collections() - Universe::heap()->total_full_collections(); 165 // Compute new interval 166 if (ScavengeALotInterval > 1) { 167 _scavenge_alot_counter = 1+(long)((double)ScavengeALotInterval*os::random()/(max_jint+1.0)); 168 log_trace(gc)("Scavenge no: %u\tInterval: %ld", invocations, _scavenge_alot_counter); 169 } else { 170 _scavenge_alot_counter = 1; 171 } 172 // Print progress message 173 if (invocations % 1000 == 0) { 174 log_trace(gc)("Scavenge no: %u", invocations); 175 } 176 } 177 } 178 } 179 } 180 181 182 vframe* vframe_array[50]; 183 int walk_stack_counter = 0; 184 185 void InterfaceSupport::walk_stack_from(vframe* start_vf) { 186 // walk 187 int i = 0; 188 for (vframe* f = start_vf; f; f = f->sender() ) { 189 if (i < 50) vframe_array[i++] = f; 190 } 191 } 192 193 194 void InterfaceSupport::walk_stack() { 195 JavaThread* thread = JavaThread::current(); 196 walk_stack_counter++; 197 if (!thread->has_last_Java_frame()) return; 198 ResourceMark rm(thread); 199 RegisterMap reg_map(thread); 200 walk_stack_from(thread->last_java_vframe(®_map)); 201 } 202 203 // invocation counter for InterfaceSupport::deoptimizeAll/zombieAll functions 204 int deoptimizeAllCounter = 0; 205 int zombieAllCounter = 0; 206 int deoptimizeAllObjectsCounter = 0; 207 208 void InterfaceSupport::zombieAll() { 209 // This method is called by all threads when a thread make 210 // transition to VM state (for example, runtime calls). 211 // Divide number of calls by number of threads to avoid 212 // dependence of ZombieAll events frequency on number of threads. 213 int value = zombieAllCounter / Threads::number_of_threads(); 214 if (is_init_completed() && value > ZombieALotInterval) { 215 zombieAllCounter = 0; 216 VM_ZombieAll op; 217 VMThread::execute(&op); 218 } 219 zombieAllCounter++; 220 } 221 222 void InterfaceSupport::deoptimizeAll() { 223 // This method is called by all threads when a thread make 224 // transition to VM state (for example, runtime calls). 225 // Divide number of calls by number of threads to avoid 226 // dependence of DeoptimizeAll events frequency on number of threads. 227 int value = deoptimizeAllCounter / Threads::number_of_threads(); 228 if (is_init_completed()) { 229 if (DeoptimizeALot && value > DeoptimizeALotInterval) { 230 deoptimizeAllCounter = 0; 231 VM_DeoptimizeAll op; 232 VMThread::execute(&op); 233 } else if (DeoptimizeRandom && (value & 0x1F) == (os::random() & 0x1F)) { 234 VM_DeoptimizeAll op; 235 VMThread::execute(&op); 236 } 237 } 238 deoptimizeAllCounter++; 239 } 240 241 void InterfaceSupport::deoptimizeAllObjects() { 242 // This method is called by all threads when a thread makes 243 // transition to VM state (for example, runtime calls). 244 // Divide number of calls by number of threads to avoid 245 // dependence of DeoptimizeObjectsALot events frequency on number of threads. 246 int value = deoptimizeAllObjectsCounter / Threads::number_of_threads(); 247 if (is_init_completed() && value > DeoptimizeObjectsALotInterval) { 248 // Revert optimizations based on escape analysis for all compiled frames of all Java threads as 249 // if objects local to a frame or a thread were escaping. 250 deoptimizeAllObjectsCounter = 0; 251 JavaThread* ct = JavaThread::current(); 252 JVMTIEscapeBarrier eb(ct, true); 253 eb.deoptimize_objects_all_threads(); 254 } 255 deoptimizeAllObjectsCounter++; 256 } 257 258 259 void InterfaceSupport::stress_derived_pointers() { 260 #ifdef COMPILER2 261 JavaThread *thread = JavaThread::current(); 262 if (!is_init_completed()) return; 263 ResourceMark rm(thread); 264 bool found = false; 265 for (StackFrameStream sfs(thread); !sfs.is_done() && !found; sfs.next()) { 266 CodeBlob* cb = sfs.current()->cb(); 267 if (cb != NULL && cb->oop_maps() ) { 268 // Find oopmap for current method 269 const ImmutableOopMap* map = cb->oop_map_for_return_address(sfs.current()->pc()); 270 assert(map != NULL, "no oopmap found for pc"); 271 found = map->has_derived_pointer(); 272 } 273 } 274 if (found) { 275 // $$$ Not sure what to do here. 276 /* 277 Scavenge::invoke(0); 278 */ 279 } 280 #endif 281 } 282 283 284 void InterfaceSupport::verify_stack() { 285 JavaThread* thread = JavaThread::current(); 286 ResourceMark rm(thread); 287 // disabled because it throws warnings that oop maps should only be accessed 288 // in VM thread or during debugging 289 290 if (!thread->has_pending_exception()) { 291 // verification does not work if there are pending exceptions 292 StackFrameStream sfs(thread); 293 CodeBlob* cb = sfs.current()->cb(); 294 // In case of exceptions we might not have a runtime_stub on 295 // top of stack, hence, all callee-saved registers are not going 296 // to be setup correctly, hence, we cannot do stack verify 297 if (cb != NULL && !(cb->is_runtime_stub() || cb->is_uncommon_trap_stub())) return; 298 299 for (; !sfs.is_done(); sfs.next()) { 300 sfs.current()->verify(sfs.register_map()); 301 } 302 } 303 } 304 305 306 void InterfaceSupport::verify_last_frame() { 307 JavaThread* thread = JavaThread::current(); 308 ResourceMark rm(thread); 309 RegisterMap reg_map(thread); 310 frame fr = thread->last_frame(); 311 fr.verify(®_map); 312 } 313 314 315 #endif // ASSERT 316 317 318 void InterfaceSupport_init() { 319 #ifdef ASSERT 320 if (ScavengeALot || FullGCALot) { 321 srand(ScavengeALotInterval * FullGCALotInterval); 322 } 323 #endif 324 }