1 /*
   2  * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_CODE_CODECACHE_HPP
  26 #define SHARE_VM_CODE_CODECACHE_HPP
  27 
  28 #include "code/codeBlob.hpp"
  29 #include "memory/allocation.hpp"
  30 #include "memory/heap.hpp"
  31 #include "oops/instanceKlass.hpp"
  32 #include "oops/oopsHierarchy.hpp"
  33 #include "runtime/safepoint.hpp"
  34 
  35 // The CodeCache implements the code cache for various pieces of generated
  36 // code, e.g., compiled java methods, runtime stubs, transition frames, etc.
  37 // The entries in the CodeCache are all CodeBlob's.
  38 
  39 // Implementation:
  40 //   - Each CodeBlob occupies one chunk of memory.
  41 //   - Like the offset table in oldspace the zone has at table for
  42 //     locating a method given a addess of an instruction.
  43 
  44 class OopClosure;
  45 class DepChange;
  46 
  47 class ParallelCodeCacheIterator VALUE_OBJ_CLASS_SPEC {
  48   friend class CodeCache;
  49 private:
  50   volatile int  _claimed_idx;
  51   volatile bool _finished;
  52 public:
  53   ParallelCodeCacheIterator();
  54   void parallel_blobs_do(CodeBlobClosure* f);
  55 };
  56 
  57 class CodeCache : AllStatic {
  58   friend class VMStructs;
  59  private:
  60   // CodeHeap is malloc()'ed at startup and never deleted during shutdown,
  61   // so that the generated assembly code is always there when it's needed.
  62   // This may cause memory leak, but is necessary, for now. See 4423824,
  63   // 4422213 or 4436291 for details.
  64   static CodeHeap * _heap;
  65   static int _number_of_blobs;
  66   static int _number_of_adapters;
  67   static int _number_of_nmethods;
  68   static int _number_of_nmethods_with_dependencies;
  69   static bool _needs_cache_clean;
  70   static nmethod* _scavenge_root_nmethods;  // linked via nm->scavenge_root_link()
  71 
  72   static void verify_if_often() PRODUCT_RETURN;
  73 
  74   static void mark_scavenge_root_nmethods() PRODUCT_RETURN;
  75   static void verify_perm_nmethods(CodeBlobClosure* f_or_null) PRODUCT_RETURN;
  76 
  77   static int _codemem_full_count;
  78 
  79  public:
  80 
  81   // Initialization
  82   static void initialize();
  83 
  84   static void report_codemem_full();
  85 
  86   // Allocation/administration
  87   static CodeBlob* allocate(int size, bool is_critical = false); // allocates a new CodeBlob
  88   static void commit(CodeBlob* cb);                 // called when the allocated CodeBlob has been filled
  89   static int alignment_unit();                      // guaranteed alignment of all CodeBlobs
  90   static int alignment_offset();                    // guaranteed offset of first CodeBlob byte within alignment unit (i.e., allocation header)
  91   static void free(CodeBlob* cb);                   // frees a CodeBlob
  92   static void flush();                              // flushes all CodeBlobs
  93   static bool contains(void *p);                    // returns whether p is included
  94   static void blobs_do(void f(CodeBlob* cb));       // iterates over all CodeBlobs
  95   static void blobs_do(CodeBlobClosure* f);         // iterates over all CodeBlobs
  96   static void nmethods_do(void f(nmethod* nm));     // iterates over all nmethods
  97   static void alive_nmethods_do(void f(nmethod* nm)); // iterates over all alive nmethods
  98 
  99   // Lookup
 100   static CodeBlob* find_blob(void* start);
 101   static nmethod*  find_nmethod(void* start);
 102 
 103   // Lookup that does not fail if you lookup a zombie method (if you call this, be sure to know
 104   // what you are doing)
 105   static CodeBlob* find_blob_unsafe(void* start) {
 106     // NMT can walk the stack before code cache is created
 107     if (_heap == NULL) return NULL;
 108 
 109     CodeBlob* result = (CodeBlob*)_heap->find_start(start);
 110     // this assert is too strong because the heap code will return the
 111     // heapblock containing start. That block can often be larger than
 112     // the codeBlob itself. If you look up an address that is within
 113     // the heapblock but not in the codeBlob you will assert.
 114     //
 115     // Most things will not lookup such bad addresses. However
 116     // AsyncGetCallTrace can see intermediate frames and get that kind
 117     // of invalid address and so can a developer using hsfind.
 118     //
 119     // The more correct answer is to return NULL if blob_contains() returns
 120     // false.
 121     // assert(result == NULL || result->blob_contains((address)start), "found wrong CodeBlob");
 122 
 123     if (result != NULL && !result->blob_contains((address)start)) {
 124       result = NULL;
 125     }
 126     return result;
 127   }
 128 
 129   // Iteration
 130   static CodeBlob* first();
 131   static CodeBlob* next (CodeBlob* cb);
 132   static CodeBlob* alive(CodeBlob *cb);
 133   static nmethod* alive_nmethod(CodeBlob *cb);
 134   static nmethod* first_nmethod();
 135   static nmethod* next_nmethod (CodeBlob* cb);
 136   static int       nof_blobs()                 { return _number_of_blobs; }
 137   static int       nof_adapters()              { return _number_of_adapters; }
 138   static int       nof_nmethods()              { return _number_of_nmethods; }
 139 
 140   // GC support
 141   static void gc_epilogue();
 142   static void gc_prologue();
 143   static void verify_oops();
 144 
 145   // Parallel GC support
 146   static ParallelCodeCacheIterator parallel_iterator() {
 147     assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint");
 148     return ParallelCodeCacheIterator();
 149   }
 150 
 151   // If "unloading_occurred" is true, then unloads (i.e., breaks root links
 152   // to) any unmarked codeBlobs in the cache.  Sets "marked_for_unloading"
 153   // to "true" iff some code got unloaded.
 154   static void do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred);
 155   static void asserted_non_scavengable_nmethods_do(CodeBlobClosure* f = NULL) PRODUCT_RETURN;
 156   static void scavenge_root_nmethods_do(CodeBlobClosure* f);
 157 
 158   static nmethod* scavenge_root_nmethods()          { return _scavenge_root_nmethods; }
 159   static void set_scavenge_root_nmethods(nmethod* nm) { _scavenge_root_nmethods = nm; }
 160   static void add_scavenge_root_nmethod(nmethod* nm);
 161   static void drop_scavenge_root_nmethod(nmethod* nm);
 162   static void prune_scavenge_root_nmethods();
 163 
 164   // Printing/debugging
 165   static void print();                           // prints summary
 166   static void print_internals();
 167   static void verify();                          // verifies the code cache
 168   static void print_trace(const char* event, CodeBlob* cb, int size = 0) PRODUCT_RETURN;
 169   static void print_summary(outputStream* st, bool detailed = true); // Prints a summary of the code cache usage
 170   static void log_state(outputStream* st);
 171 
 172   // The full limits of the codeCache
 173   static address  low_bound()                    { return (address) _heap->low_boundary(); }
 174   static address  high_bound()                   { return (address) _heap->high_boundary(); }
 175   static address  high()                         { return (address) _heap->high(); }
 176 
 177   // Profiling
 178   static address first_address();                // first address used for CodeBlobs
 179   static address last_address();                 // last  address used for CodeBlobs
 180   static size_t  capacity()                      { return _heap->capacity(); }
 181   static size_t  max_capacity()                  { return _heap->max_capacity(); }
 182   static size_t  unallocated_capacity()          { return _heap->unallocated_capacity(); }
 183   static double  reverse_free_ratio();
 184 
 185   static bool needs_cache_clean()                { return _needs_cache_clean; }
 186   static void set_needs_cache_clean(bool v)      { _needs_cache_clean = v;    }
 187   static void clear_inline_caches();             // clear all inline caches
 188 
 189   static void verify_clean_inline_caches();
 190   static void verify_icholder_relocations();
 191 
 192   // Deoptimization
 193   static int  mark_for_deoptimization(DepChange& changes);
 194 #ifdef HOTSWAP
 195   static int  mark_for_evol_deoptimization(instanceKlassHandle dependee);
 196 #endif // HOTSWAP
 197 
 198   static void mark_all_nmethods_for_deoptimization();
 199   static int  mark_for_deoptimization(Method* dependee);
 200   static void make_marked_nmethods_not_entrant();
 201 
 202     // tells how many nmethods have dependencies
 203   static int number_of_nmethods_with_dependencies();
 204 
 205   static int get_codemem_full_count() { return _codemem_full_count; }
 206 };
 207 
 208 #endif // SHARE_VM_CODE_CODECACHE_HPP