1 /*
   2  * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/shared/collectedHeap.hpp"
  27 #include "gc/shared/collectedHeap.inline.hpp"
  28 #include "gc/shared/genCollectedHeap.hpp"
  29 #include "memory/resourceArea.hpp"
  30 #include "runtime/atomic.inline.hpp"
  31 #include "runtime/init.hpp"
  32 #include "runtime/interfaceSupport.hpp"
  33 #include "runtime/orderAccess.inline.hpp"
  34 #include "runtime/os.inline.hpp"
  35 #include "runtime/threadLocalStorage.hpp"
  36 #include "runtime/vframe.hpp"
  37 #include "utilities/preserveException.hpp"
  38 
  39 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
  40 
  41 // Implementation of InterfaceSupport
  42 
  43 #ifdef ASSERT
  44 
  45 long InterfaceSupport::_number_of_calls       = 0;
  46 long InterfaceSupport::_scavenge_alot_counter = 1;
  47 long InterfaceSupport::_fullgc_alot_counter   = 1;
  48 long InterfaceSupport::_fullgc_alot_invocation = 0;
  49 
  50 Histogram* RuntimeHistogram;
  51 
  52 RuntimeHistogramElement::RuntimeHistogramElement(const char* elementName) {
  53   static volatile jint RuntimeHistogram_lock = 0;
  54   _name = elementName;
  55   uintx count = 0;
  56 
  57   while (Atomic::cmpxchg(1, &RuntimeHistogram_lock, 0) != 0) {
  58     while (OrderAccess::load_acquire(&RuntimeHistogram_lock) != 0) {
  59       count +=1;
  60       if ( (WarnOnStalledSpinLock > 0)
  61         && (count % WarnOnStalledSpinLock == 0)) {
  62         warning("RuntimeHistogram_lock seems to be stalled");
  63       }
  64     }
  65   }
  66 
  67   if (RuntimeHistogram == NULL) {
  68     RuntimeHistogram = new Histogram("VM Runtime Call Counts",200);
  69   }
  70 
  71   RuntimeHistogram->add_element(this);
  72   Atomic::dec(&RuntimeHistogram_lock);
  73 }
  74 
  75 void InterfaceSupport::trace(const char* result_type, const char* header) {
  76   tty->print_cr("%6d  %s", _number_of_calls, header);
  77 }
  78 
  79 void InterfaceSupport::gc_alot() {
  80   Thread *thread = Thread::current();
  81   if (!thread->is_Java_thread()) return; // Avoid concurrent calls
  82   // Check for new, not quite initialized thread. A thread in new mode cannot initiate a GC.
  83   JavaThread *current_thread = (JavaThread *)thread;
  84   if (current_thread->active_handles() == NULL) return;
  85 
  86   // Short-circuit any possible re-entrant gc-a-lot attempt
  87   if (thread->skip_gcalot()) return;
  88 
  89   if (Threads::is_vm_complete()) {
  90 
  91     if (++_fullgc_alot_invocation < FullGCALotStart) {
  92       return;
  93     }
  94 
  95     // Use this line if you want to block at a specific point,
  96     // e.g. one number_of_calls/scavenge/gc before you got into problems
  97     if (FullGCALot) _fullgc_alot_counter--;
  98 
  99     // Check if we should force a full gc
 100     if (_fullgc_alot_counter == 0) {
 101       // Release dummy so objects are forced to move
 102       if (!Universe::release_fullgc_alot_dummy()) {
 103         warning("FullGCALot: Unable to release more dummies at bottom of heap");
 104       }
 105       HandleMark hm(thread);
 106       Universe::heap()->collect(GCCause::_full_gc_alot);
 107       unsigned int invocations = Universe::heap()->total_full_collections();
 108       // Compute new interval
 109       if (FullGCALotInterval > 1) {
 110         _fullgc_alot_counter = 1+(long)((double)FullGCALotInterval*os::random()/(max_jint+1.0));
 111         if (PrintGCDetails && Verbose) {
 112           tty->print_cr("Full gc no: %u\tInterval: %d", invocations,
 113                         _fullgc_alot_counter);
 114         }
 115       } else {
 116         _fullgc_alot_counter = 1;
 117       }
 118       // Print progress message
 119       if (invocations % 100 == 0) {
 120         if (PrintGCDetails && Verbose) tty->print_cr("Full gc no: %u", invocations);
 121       }
 122     } else {
 123       if (ScavengeALot) _scavenge_alot_counter--;
 124       // Check if we should force a scavenge
 125       if (_scavenge_alot_counter == 0) {
 126         HandleMark hm(thread);
 127         Universe::heap()->collect(GCCause::_scavenge_alot);
 128         unsigned int invocations = Universe::heap()->total_collections() - Universe::heap()->total_full_collections();
 129         // Compute new interval
 130         if (ScavengeALotInterval > 1) {
 131           _scavenge_alot_counter = 1+(long)((double)ScavengeALotInterval*os::random()/(max_jint+1.0));
 132           if (PrintGCDetails && Verbose) {
 133             tty->print_cr("Scavenge no: %u\tInterval: %d", invocations,
 134                           _scavenge_alot_counter);
 135           }
 136         } else {
 137           _scavenge_alot_counter = 1;
 138         }
 139         // Print progress message
 140         if (invocations % 1000 == 0) {
 141           if (PrintGCDetails && Verbose) tty->print_cr("Scavenge no: %u", invocations);
 142         }
 143       }
 144     }
 145   }
 146 }
 147 
 148 
 149 vframe* vframe_array[50];
 150 int walk_stack_counter = 0;
 151 
 152 void InterfaceSupport::walk_stack_from(vframe* start_vf) {
 153   // walk
 154   int i = 0;
 155   for (vframe* f = start_vf; f; f = f->sender() ) {
 156     if (i < 50) vframe_array[i++] = f;
 157   }
 158 }
 159 
 160 
 161 void InterfaceSupport::walk_stack() {
 162   JavaThread* thread = JavaThread::current();
 163   walk_stack_counter++;
 164   if (!thread->has_last_Java_frame()) return;
 165   ResourceMark rm(thread);
 166   RegisterMap reg_map(thread);
 167   walk_stack_from(thread->last_java_vframe(&reg_map));
 168 }
 169 
 170 
 171 # ifdef ENABLE_ZAP_DEAD_LOCALS
 172 
 173 static int zap_traversals = 0;
 174 
 175 void InterfaceSupport::zap_dead_locals_old() {
 176   JavaThread* thread = JavaThread::current();
 177   if (zap_traversals == -1) // edit constant for debugging
 178     warning("I am here");
 179   int zap_frame_count = 0; // count frames to help debugging
 180   for (StackFrameStream sfs(thread); !sfs.is_done(); sfs.next()) {
 181     sfs.current()->zap_dead_locals(thread, sfs.register_map());
 182     ++zap_frame_count;
 183   }
 184   ++zap_traversals;
 185 }
 186 
 187 # endif
 188 
 189 // invocation counter for InterfaceSupport::deoptimizeAll/zombieAll functions
 190 int deoptimizeAllCounter = 0;
 191 int zombieAllCounter = 0;
 192 
 193 void InterfaceSupport::zombieAll() {
 194   // This method is called by all threads when a thread make
 195   // transition to VM state (for example, runtime calls).
 196   // Divide number of calls by number of threads to avoid
 197   // dependence of ZombieAll events frequency on number of threads.
 198   int value = zombieAllCounter / Threads::number_of_threads();
 199   if (is_init_completed() && value > ZombieALotInterval) {
 200     zombieAllCounter = 0;
 201     VM_ZombieAll op;
 202     VMThread::execute(&op);
 203   }
 204   zombieAllCounter++;
 205 }
 206 
 207 void InterfaceSupport::unlinkSymbols() {
 208   VM_UnlinkSymbols op;
 209   VMThread::execute(&op);
 210 }
 211 
 212 void InterfaceSupport::deoptimizeAll() {
 213   // This method is called by all threads when a thread make
 214   // transition to VM state (for example, runtime calls).
 215   // Divide number of calls by number of threads to avoid
 216   // dependence of DeoptimizeAll events frequency on number of threads.
 217   int value = deoptimizeAllCounter / Threads::number_of_threads();
 218   if (is_init_completed()) {
 219     if (DeoptimizeALot && value > DeoptimizeALotInterval) {
 220       deoptimizeAllCounter = 0;
 221       VM_DeoptimizeAll op;
 222       VMThread::execute(&op);
 223     } else if (DeoptimizeRandom && (value & 0x1F) == (os::random() & 0x1F)) {
 224       VM_DeoptimizeAll op;
 225       VMThread::execute(&op);
 226     }
 227   }
 228   deoptimizeAllCounter++;
 229 }
 230 
 231 
 232 void InterfaceSupport::stress_derived_pointers() {
 233 #ifdef COMPILER2
 234   JavaThread *thread = JavaThread::current();
 235   if (!is_init_completed()) return;
 236   ResourceMark rm(thread);
 237   bool found = false;
 238   for (StackFrameStream sfs(thread); !sfs.is_done() && !found; sfs.next()) {
 239     CodeBlob* cb = sfs.current()->cb();
 240     if (cb != NULL && cb->oop_maps() ) {
 241       // Find oopmap for current method
 242       const ImmutableOopMap* map = cb->oop_map_for_return_address(sfs.current()->pc());
 243       assert(map != NULL, "no oopmap found for pc");
 244       found = map->has_derived_pointer();
 245     }
 246   }
 247   if (found) {
 248     // $$$ Not sure what to do here.
 249     /*
 250     Scavenge::invoke(0);
 251     */
 252   }
 253 #endif
 254 }
 255 
 256 
 257 void InterfaceSupport::verify_stack() {
 258   JavaThread* thread = JavaThread::current();
 259   ResourceMark rm(thread);
 260   // disabled because it throws warnings that oop maps should only be accessed
 261   // in VM thread or during debugging
 262 
 263   if (!thread->has_pending_exception()) {
 264     // verification does not work if there are pending exceptions
 265     StackFrameStream sfs(thread);
 266     CodeBlob* cb = sfs.current()->cb();
 267       // In case of exceptions we might not have a runtime_stub on
 268       // top of stack, hence, all callee-saved registers are not going
 269       // to be setup correctly, hence, we cannot do stack verify
 270     if (cb != NULL && !(cb->is_runtime_stub() || cb->is_uncommon_trap_stub())) return;
 271 
 272     for (; !sfs.is_done(); sfs.next()) {
 273       sfs.current()->verify(sfs.register_map());
 274     }
 275   }
 276 }
 277 
 278 
 279 void InterfaceSupport::verify_last_frame() {
 280   JavaThread* thread = JavaThread::current();
 281   ResourceMark rm(thread);
 282   RegisterMap reg_map(thread);
 283   frame fr = thread->last_frame();
 284   fr.verify(&reg_map);
 285 }
 286 
 287 
 288 #endif // ASSERT
 289 
 290 
 291 void InterfaceSupport_init() {
 292 #ifdef ASSERT
 293   if (ScavengeALot || FullGCALot) {
 294     srand(ScavengeALotInterval * FullGCALotInterval);
 295   }
 296 #endif
 297 }