1 /*
   2  * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/shared/collectedHeap.hpp"
  27 #include "gc/shared/collectedHeap.inline.hpp"
  28 #include "gc/shared/genCollectedHeap.hpp"
  29 #include "memory/resourceArea.hpp"
  30 #include "runtime/atomic.hpp"
  31 #include "runtime/init.hpp"
  32 #include "runtime/interfaceSupport.hpp"
  33 #include "runtime/orderAccess.inline.hpp"
  34 #include "runtime/os.inline.hpp"
  35 #include "runtime/vframe.hpp"
  36 #include "utilities/preserveException.hpp"
  37 
  38 // Implementation of InterfaceSupport
  39 
  40 #ifdef ASSERT
  41 
  42 long InterfaceSupport::_number_of_calls       = 0;
  43 long InterfaceSupport::_scavenge_alot_counter = 1;
  44 long InterfaceSupport::_fullgc_alot_counter   = 1;
  45 long InterfaceSupport::_fullgc_alot_invocation = 0;
  46 
  47 Histogram* RuntimeHistogram;
  48 
  49 RuntimeHistogramElement::RuntimeHistogramElement(const char* elementName) {
  50   static volatile jint RuntimeHistogram_lock = 0;
  51   _name = elementName;
  52   uintx count = 0;
  53 
  54   while (Atomic::cmpxchg(1, &RuntimeHistogram_lock, 0) != 0) {
  55     while (OrderAccess::load_acquire(&RuntimeHistogram_lock) != 0) {
  56       count +=1;
  57       if ( (WarnOnStalledSpinLock > 0)
  58         && (count % WarnOnStalledSpinLock == 0)) {
  59         warning("RuntimeHistogram_lock seems to be stalled");
  60       }
  61     }
  62   }
  63 
  64   if (RuntimeHistogram == NULL) {
  65     RuntimeHistogram = new Histogram("VM Runtime Call Counts",200);
  66   }
  67 
  68   RuntimeHistogram->add_element(this);
  69   Atomic::dec(&RuntimeHistogram_lock);
  70 }
  71 
  72 void InterfaceSupport::gc_alot() {
  73   Thread *thread = Thread::current();
  74   if (!thread->is_Java_thread()) return; // Avoid concurrent calls
  75   // Check for new, not quite initialized thread. A thread in new mode cannot initiate a GC.
  76   JavaThread *current_thread = (JavaThread *)thread;
  77   if (current_thread->active_handles() == NULL) return;
  78 
  79   // Short-circuit any possible re-entrant gc-a-lot attempt
  80   if (thread->skip_gcalot()) return;
  81 
  82   if (Threads::is_vm_complete()) {
  83 
  84     if (++_fullgc_alot_invocation < FullGCALotStart) {
  85       return;
  86     }
  87 
  88     // Use this line if you want to block at a specific point,
  89     // e.g. one number_of_calls/scavenge/gc before you got into problems
  90     if (FullGCALot) _fullgc_alot_counter--;
  91 
  92     // Check if we should force a full gc
  93     if (_fullgc_alot_counter == 0) {
  94       // Release dummy so objects are forced to move
  95       if (!Universe::release_fullgc_alot_dummy()) {
  96         warning("FullGCALot: Unable to release more dummies at bottom of heap");
  97       }
  98       HandleMark hm(thread);
  99       Universe::heap()->collect(GCCause::_full_gc_alot);
 100       unsigned int invocations = Universe::heap()->total_full_collections();
 101       // Compute new interval
 102       if (FullGCALotInterval > 1) {
 103         _fullgc_alot_counter = 1+(long)((double)FullGCALotInterval*os::random()/(max_jint+1.0));
 104         log_trace(gc)("Full gc no: %u\tInterval: %ld", invocations, _fullgc_alot_counter);
 105       } else {
 106         _fullgc_alot_counter = 1;
 107       }
 108       // Print progress message
 109       if (invocations % 100 == 0) {
 110         log_trace(gc)("Full gc no: %u", invocations);
 111       }
 112     } else {
 113       if (ScavengeALot) _scavenge_alot_counter--;
 114       // Check if we should force a scavenge
 115       if (_scavenge_alot_counter == 0) {
 116         HandleMark hm(thread);
 117         Universe::heap()->collect(GCCause::_scavenge_alot);
 118         unsigned int invocations = Universe::heap()->total_collections() - Universe::heap()->total_full_collections();
 119         // Compute new interval
 120         if (ScavengeALotInterval > 1) {
 121           _scavenge_alot_counter = 1+(long)((double)ScavengeALotInterval*os::random()/(max_jint+1.0));
 122           log_trace(gc)("Scavenge no: %u\tInterval: %ld", invocations, _scavenge_alot_counter);
 123         } else {
 124           _scavenge_alot_counter = 1;
 125         }
 126         // Print progress message
 127         if (invocations % 1000 == 0) {
 128           log_trace(gc)("Scavenge no: %u", invocations);
 129         }
 130       }
 131     }
 132   }
 133 }
 134 
 135 
 136 vframe* vframe_array[50];
 137 int walk_stack_counter = 0;
 138 
 139 void InterfaceSupport::walk_stack_from(vframe* start_vf) {
 140   // walk
 141   int i = 0;
 142   for (vframe* f = start_vf; f; f = f->sender() ) {
 143     if (i < 50) vframe_array[i++] = f;
 144   }
 145 }
 146 
 147 
 148 void InterfaceSupport::walk_stack() {
 149   JavaThread* thread = JavaThread::current();
 150   walk_stack_counter++;
 151   if (!thread->has_last_Java_frame()) return;
 152   ResourceMark rm(thread);
 153   RegisterMap reg_map(thread);
 154   walk_stack_from(thread->last_java_vframe(&reg_map));
 155 }
 156 
 157 // invocation counter for InterfaceSupport::deoptimizeAll/zombieAll functions
 158 int deoptimizeAllCounter = 0;
 159 int zombieAllCounter = 0;
 160 
 161 void InterfaceSupport::zombieAll() {
 162   // This method is called by all threads when a thread make
 163   // transition to VM state (for example, runtime calls).
 164   // Divide number of calls by number of threads to avoid
 165   // dependence of ZombieAll events frequency on number of threads.
 166   int value = zombieAllCounter / Threads::number_of_threads();
 167   if (is_init_completed() && value > ZombieALotInterval) {
 168     zombieAllCounter = 0;
 169     VM_ZombieAll op;
 170     VMThread::execute(&op);
 171   }
 172   zombieAllCounter++;
 173 }
 174 
 175 void InterfaceSupport::unlinkSymbols() {
 176   VM_UnlinkSymbols op;
 177   VMThread::execute(&op);
 178 }
 179 
 180 void InterfaceSupport::deoptimizeAll() {
 181   // This method is called by all threads when a thread make
 182   // transition to VM state (for example, runtime calls).
 183   // Divide number of calls by number of threads to avoid
 184   // dependence of DeoptimizeAll events frequency on number of threads.
 185   int value = deoptimizeAllCounter / Threads::number_of_threads();
 186   if (is_init_completed()) {
 187     if (DeoptimizeALot && value > DeoptimizeALotInterval) {
 188       deoptimizeAllCounter = 0;
 189       VM_DeoptimizeAll op;
 190       VMThread::execute(&op);
 191     } else if (DeoptimizeRandom && (value & 0x1F) == (os::random() & 0x1F)) {
 192       VM_DeoptimizeAll op;
 193       VMThread::execute(&op);
 194     }
 195   }
 196   deoptimizeAllCounter++;
 197 }
 198 
 199 
 200 void InterfaceSupport::stress_derived_pointers() {
 201 #ifdef COMPILER2
 202   JavaThread *thread = JavaThread::current();
 203   if (!is_init_completed()) return;
 204   ResourceMark rm(thread);
 205   bool found = false;
 206   for (StackFrameStream sfs(thread); !sfs.is_done() && !found; sfs.next()) {
 207     CodeBlob* cb = sfs.current()->cb();
 208     if (cb != NULL && cb->oop_maps() ) {
 209       // Find oopmap for current method
 210       const ImmutableOopMap* map = cb->oop_map_for_return_address(sfs.current()->pc());
 211       assert(map != NULL, "no oopmap found for pc");
 212       found = map->has_derived_pointer();
 213     }
 214   }
 215   if (found) {
 216     // $$$ Not sure what to do here.
 217     /*
 218     Scavenge::invoke(0);
 219     */
 220   }
 221 #endif
 222 }
 223 
 224 
 225 void InterfaceSupport::verify_stack() {
 226   JavaThread* thread = JavaThread::current();
 227   ResourceMark rm(thread);
 228   // disabled because it throws warnings that oop maps should only be accessed
 229   // in VM thread or during debugging
 230 
 231   if (!thread->has_pending_exception()) {
 232     // verification does not work if there are pending exceptions
 233     StackFrameStream sfs(thread);
 234     CodeBlob* cb = sfs.current()->cb();
 235       // In case of exceptions we might not have a runtime_stub on
 236       // top of stack, hence, all callee-saved registers are not going
 237       // to be setup correctly, hence, we cannot do stack verify
 238     if (cb != NULL && !(cb->is_runtime_stub() || cb->is_uncommon_trap_stub())) return;
 239 
 240     for (; !sfs.is_done(); sfs.next()) {
 241       sfs.current()->verify(sfs.register_map());
 242     }
 243   }
 244 }
 245 
 246 
 247 void InterfaceSupport::verify_last_frame() {
 248   JavaThread* thread = JavaThread::current();
 249   ResourceMark rm(thread);
 250   RegisterMap reg_map(thread);
 251   frame fr = thread->last_frame();
 252   fr.verify(&reg_map);
 253 }
 254 
 255 
 256 #endif // ASSERT
 257 
 258 
 259 void InterfaceSupport_init() {
 260 #ifdef ASSERT
 261   if (ScavengeALot || FullGCALot) {
 262     srand(ScavengeALotInterval * FullGCALotInterval);
 263   }
 264 #endif
 265 }