1 /*
   2  * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/shared/collectedHeap.hpp"
  27 #include "gc/shared/collectedHeap.inline.hpp"
  28 #include "logging/log.hpp"
  29 #include "memory/resourceArea.hpp"
  30 #include "memory/universe.hpp"
  31 #include "runtime/atomic.hpp"
  32 #include "runtime/frame.inline.hpp"
  33 #include "runtime/handles.inline.hpp"
  34 #include "runtime/init.hpp"
  35 #include "runtime/interfaceSupport.inline.hpp"
  36 #include "runtime/orderAccess.hpp"
  37 #include "runtime/os.inline.hpp"
  38 #include "runtime/thread.inline.hpp"
  39 #include "runtime/safepointVerifiers.hpp"
  40 #include "runtime/vframe.hpp"
  41 #include "runtime/vmThread.hpp"
  42 #include "utilities/preserveException.hpp"
  43 
  44 // Implementation of InterfaceSupport
  45 
  46 #ifdef ASSERT
  47 VMEntryWrapper::VMEntryWrapper() {
  48   if (VerifyLastFrame) {
  49     InterfaceSupport::verify_last_frame();
  50   }
  51 }
  52 
  53 VMEntryWrapper::~VMEntryWrapper() {
  54   InterfaceSupport::check_gc_alot();
  55   if (WalkStackALot) {
  56     InterfaceSupport::walk_stack();
  57   }
  58   if (DeoptimizeALot || DeoptimizeRandom) {
  59     InterfaceSupport::deoptimizeAll();
  60   }
  61   if (ZombieALot) {
  62     InterfaceSupport::zombieAll();
  63   }
  64   // do verification AFTER potential deoptimization
  65   if (VerifyStack) {
  66     InterfaceSupport::verify_stack();
  67   }
  68 }
  69 
  70 VMNativeEntryWrapper::VMNativeEntryWrapper() {
  71   if (GCALotAtAllSafepoints) InterfaceSupport::check_gc_alot();
  72 }
  73 
  74 VMNativeEntryWrapper::~VMNativeEntryWrapper() {
  75   if (GCALotAtAllSafepoints) InterfaceSupport::check_gc_alot();
  76 }
  77 
  78 long InterfaceSupport::_number_of_calls       = 0;
  79 long InterfaceSupport::_scavenge_alot_counter = 1;
  80 long InterfaceSupport::_fullgc_alot_counter   = 1;
  81 long InterfaceSupport::_fullgc_alot_invocation = 0;
  82 
  83 Histogram* RuntimeHistogram;
  84 
  85 RuntimeHistogramElement::RuntimeHistogramElement(const char* elementName) {
  86   static volatile int RuntimeHistogram_lock = 0;
  87   _name = elementName;
  88   uintx count = 0;
  89 
  90   while (Atomic::cmpxchg(&RuntimeHistogram_lock, 0, 1) != 0) {
  91     while (Atomic::load_acquire(&RuntimeHistogram_lock) != 0) {
  92       count +=1;
  93       if ( (WarnOnStalledSpinLock > 0)
  94         && (count % WarnOnStalledSpinLock == 0)) {
  95         warning("RuntimeHistogram_lock seems to be stalled");
  96       }
  97     }
  98   }
  99 
 100   if (RuntimeHistogram == NULL) {
 101     RuntimeHistogram = new Histogram("VM Runtime Call Counts",200);
 102   }
 103 
 104   RuntimeHistogram->add_element(this);
 105   Atomic::dec(&RuntimeHistogram_lock);
 106 }
 107 
 108 void InterfaceSupport::gc_alot() {
 109   Thread *thread = Thread::current();
 110   if (!thread->is_Java_thread()) return; // Avoid concurrent calls
 111   // Check for new, not quite initialized thread. A thread in new mode cannot initiate a GC.
 112   JavaThread *current_thread = (JavaThread *)thread;
 113   if (current_thread->active_handles() == NULL) return;
 114 
 115   // Short-circuit any possible re-entrant gc-a-lot attempt
 116   if (thread->skip_gcalot()) return;
 117 
 118   if (Threads::is_vm_complete()) {
 119 
 120     if (++_fullgc_alot_invocation < FullGCALotStart) {
 121       return;
 122     }
 123 
 124     // Use this line if you want to block at a specific point,
 125     // e.g. one number_of_calls/scavenge/gc before you got into problems
 126     if (FullGCALot) _fullgc_alot_counter--;
 127 
 128     // Check if we should force a full gc
 129     if (_fullgc_alot_counter == 0) {
 130       // Release dummy so objects are forced to move
 131       if (!Universe::release_fullgc_alot_dummy()) {
 132         warning("FullGCALot: Unable to release more dummies at bottom of heap");
 133       }
 134       HandleMark hm(thread);
 135       Universe::heap()->collect(GCCause::_full_gc_alot);
 136       unsigned int invocations = Universe::heap()->total_full_collections();
 137       // Compute new interval
 138       if (FullGCALotInterval > 1) {
 139         _fullgc_alot_counter = 1+(long)((double)FullGCALotInterval*os::random()/(max_jint+1.0));
 140         log_trace(gc)("Full gc no: %u\tInterval: %ld", invocations, _fullgc_alot_counter);
 141       } else {
 142         _fullgc_alot_counter = 1;
 143       }
 144       // Print progress message
 145       if (invocations % 100 == 0) {
 146         log_trace(gc)("Full gc no: %u", invocations);
 147       }
 148     } else {
 149       if (ScavengeALot) _scavenge_alot_counter--;
 150       // Check if we should force a scavenge
 151       if (_scavenge_alot_counter == 0) {
 152         HandleMark hm(thread);
 153         Universe::heap()->collect(GCCause::_scavenge_alot);
 154         unsigned int invocations = Universe::heap()->total_collections() - Universe::heap()->total_full_collections();
 155         // Compute new interval
 156         if (ScavengeALotInterval > 1) {
 157           _scavenge_alot_counter = 1+(long)((double)ScavengeALotInterval*os::random()/(max_jint+1.0));
 158           log_trace(gc)("Scavenge no: %u\tInterval: %ld", invocations, _scavenge_alot_counter);
 159         } else {
 160           _scavenge_alot_counter = 1;
 161         }
 162         // Print progress message
 163         if (invocations % 1000 == 0) {
 164           log_trace(gc)("Scavenge no: %u", invocations);
 165         }
 166       }
 167     }
 168   }
 169 }
 170 
 171 
 172 vframe* vframe_array[50];
 173 int walk_stack_counter = 0;
 174 
 175 void InterfaceSupport::walk_stack_from(vframe* start_vf) {
 176   // walk
 177   int i = 0;
 178   for (vframe* f = start_vf; f; f = f->sender() ) {
 179     if (i < 50) vframe_array[i++] = f;
 180   }
 181 }
 182 
 183 
 184 void InterfaceSupport::walk_stack() {
 185   JavaThread* thread = JavaThread::current();
 186   walk_stack_counter++;
 187   if (!thread->has_last_Java_frame()) return;
 188   ResourceMark rm(thread);
 189   RegisterMap reg_map(thread);
 190   walk_stack_from(thread->last_java_vframe(&reg_map));
 191 }
 192 
 193 // invocation counter for InterfaceSupport::deoptimizeAll/zombieAll functions
 194 int deoptimizeAllCounter = 0;
 195 int zombieAllCounter = 0;
 196 
 197 void InterfaceSupport::zombieAll() {
 198   // This method is called by all threads when a thread make
 199   // transition to VM state (for example, runtime calls).
 200   // Divide number of calls by number of threads to avoid
 201   // dependence of ZombieAll events frequency on number of threads.
 202   int value = zombieAllCounter / Threads::number_of_threads();
 203   if (is_init_completed() && value > ZombieALotInterval) {
 204     zombieAllCounter = 0;
 205     VM_ZombieAll op;
 206     VMThread::execute(&op);
 207   }
 208   zombieAllCounter++;
 209 }
 210 
 211 void InterfaceSupport::deoptimizeAll() {
 212   // This method is called by all threads when a thread make
 213   // transition to VM state (for example, runtime calls).
 214   // Divide number of calls by number of threads to avoid
 215   // dependence of DeoptimizeAll events frequency on number of threads.
 216   int value = deoptimizeAllCounter / Threads::number_of_threads();
 217   if (is_init_completed()) {
 218     if (DeoptimizeALot && value > DeoptimizeALotInterval) {
 219       deoptimizeAllCounter = 0;
 220       VM_DeoptimizeAll op;
 221       VMThread::execute(&op);
 222     } else if (DeoptimizeRandom && (value & 0x1F) == (os::random() & 0x1F)) {
 223       VM_DeoptimizeAll op;
 224       VMThread::execute(&op);
 225     }
 226   }
 227   deoptimizeAllCounter++;
 228 }
 229 
 230 
 231 void InterfaceSupport::verify_stack() {
 232   JavaThread* thread = JavaThread::current();
 233   ResourceMark rm(thread);
 234   // disabled because it throws warnings that oop maps should only be accessed
 235   // in VM thread or during debugging
 236 
 237   if (!thread->has_pending_exception()) {
 238     // verification does not work if there are pending exceptions
 239     StackFrameStream sfs(thread);
 240     CodeBlob* cb = sfs.current()->cb();
 241       // In case of exceptions we might not have a runtime_stub on
 242       // top of stack, hence, all callee-saved registers are not going
 243       // to be setup correctly, hence, we cannot do stack verify
 244     if (cb != NULL && !(cb->is_runtime_stub() || cb->is_uncommon_trap_stub())) return;
 245 
 246     for (; !sfs.is_done(); sfs.next()) {
 247       sfs.current()->verify(sfs.register_map());
 248     }
 249   }
 250 }
 251 
 252 
 253 void InterfaceSupport::verify_last_frame() {
 254   JavaThread* thread = JavaThread::current();
 255   ResourceMark rm(thread);
 256   RegisterMap reg_map(thread);
 257   frame fr = thread->last_frame();
 258   fr.verify(&reg_map);
 259 }
 260 
 261 
 262 #endif // ASSERT
 263 
 264 
 265 void InterfaceSupport_init() {
 266 #ifdef ASSERT
 267   if (ScavengeALot || FullGCALot) {
 268     srand(ScavengeALotInterval * FullGCALotInterval);
 269   }
 270 #endif
 271 }