9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_CODE_CODECACHE_HPP 26 #define SHARE_VM_CODE_CODECACHE_HPP 27 28 #include "code/codeBlob.hpp" 29 #include "memory/allocation.hpp" 30 #include "memory/heap.hpp" 31 #include "oops/instanceKlass.hpp" 32 #include "oops/oopsHierarchy.hpp" 33 34 // The CodeCache implements the code cache for various pieces of generated 35 // code, e.g., compiled java methods, runtime stubs, transition frames, etc. 36 // The entries in the CodeCache are all CodeBlob's. 37 38 // Implementation: 39 // - Each CodeBlob occupies one chunk of memory. 40 // - Like the offset table in oldspace the zone has at table for 41 // locating a method given a addess of an instruction. 42 43 class OopClosure; 44 class DepChange; 45 46 class CodeCache : AllStatic { 47 friend class VMStructs; 48 private: 49 // CodeHeap is malloc()'ed at startup and never deleted during shutdown, 50 // so that the generated assembly code is always there when it's needed. 51 // This may cause memory leak, but is necessary, for now. See 4423824, 52 // 4422213 or 4436291 for details. 53 static CodeHeap * _heap; 54 static int _number_of_blobs; 55 static int _number_of_adapters; 56 static int _number_of_nmethods; 57 static int _number_of_nmethods_with_dependencies; 58 static bool _needs_cache_clean; 59 static nmethod* _scavenge_root_nmethods; // linked via nm->scavenge_root_link() 60 61 static void mark_scavenge_root_nmethods() PRODUCT_RETURN; 62 static void verify_perm_nmethods(CodeBlobClosure* f_or_null) PRODUCT_RETURN; 63 64 static int _codemem_full_count; 65 static size_t bytes_allocated_in_freelist() { return _heap->allocated_in_freelist(); } 66 static int allocated_segments() { return _heap->allocated_segments(); } 67 static size_t freelist_length() { return _heap->freelist_length(); } 68 69 public: 70 71 // Initialization 72 static void initialize(); 73 74 static void report_codemem_full(); 75 76 // Allocation/administration 77 static CodeBlob* allocate(int size, bool is_critical = false); // allocates a new CodeBlob 78 static void commit(CodeBlob* cb); // called when the allocated CodeBlob has been filled 79 static int alignment_unit(); // guaranteed alignment of all CodeBlobs 80 static int alignment_offset(); // guaranteed offset of first CodeBlob byte within alignment unit (i.e., allocation header) 81 static void free(CodeBlob* cb); // frees a CodeBlob 82 static bool contains(void *p); // returns whether p is included 83 static void blobs_do(void f(CodeBlob* cb)); // iterates over all CodeBlobs 84 static void blobs_do(CodeBlobClosure* f); // iterates over all CodeBlobs 85 static void nmethods_do(void f(nmethod* nm)); // iterates over all nmethods 86 static void alive_nmethods_do(void f(nmethod* nm)); // iterates over all alive nmethods 87 88 // Lookup 89 static CodeBlob* find_blob(void* start); 90 static nmethod* find_nmethod(void* start); 91 92 // Lookup that does not fail if you lookup a zombie method (if you call this, be sure to know 93 // what you are doing) 94 static CodeBlob* find_blob_unsafe(void* start) { 95 // NMT can walk the stack before code cache is created 96 if (_heap == NULL) return NULL; 97 98 CodeBlob* result = (CodeBlob*)_heap->find_start(start); 99 // this assert is too strong because the heap code will return the 100 // heapblock containing start. That block can often be larger than 101 // the codeBlob itself. If you look up an address that is within 102 // the heapblock but not in the codeBlob you will assert. 103 // 104 // Most things will not lookup such bad addresses. However 105 // AsyncGetCallTrace can see intermediate frames and get that kind 106 // of invalid address and so can a developer using hsfind. 107 // 108 // The more correct answer is to return NULL if blob_contains() returns 109 // false. 110 // assert(result == NULL || result->blob_contains((address)start), "found wrong CodeBlob"); 111 112 if (result != NULL && !result->blob_contains((address)start)) { 113 result = NULL; 114 } 115 return result; 116 } 117 118 // Iteration 119 static CodeBlob* first(); 120 static CodeBlob* next (CodeBlob* cb); 121 static CodeBlob* alive(CodeBlob *cb); 122 static nmethod* alive_nmethod(CodeBlob *cb); 123 static nmethod* first_nmethod(); 124 static nmethod* next_nmethod (CodeBlob* cb); 125 static int nof_blobs() { return _number_of_blobs; } 126 static int nof_adapters() { return _number_of_adapters; } 127 static int nof_nmethods() { return _number_of_nmethods; } 128 129 // GC support 130 static void gc_epilogue(); 131 static void gc_prologue(); 132 static void verify_oops(); 133 // If "unloading_occurred" is true, then unloads (i.e., breaks root links 134 // to) any unmarked codeBlobs in the cache. Sets "marked_for_unloading" 135 // to "true" iff some code got unloaded. 136 static void do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred); 137 static void asserted_non_scavengable_nmethods_do(CodeBlobClosure* f = NULL) PRODUCT_RETURN; 138 static void scavenge_root_nmethods_do(CodeBlobClosure* f); 139 140 static nmethod* scavenge_root_nmethods() { return _scavenge_root_nmethods; } 141 static void set_scavenge_root_nmethods(nmethod* nm) { _scavenge_root_nmethods = nm; } 142 static void add_scavenge_root_nmethod(nmethod* nm); 143 static void drop_scavenge_root_nmethod(nmethod* nm); 144 static void prune_scavenge_root_nmethods(); 145 146 // Printing/debugging 147 static void print(); // prints summary 148 static void print_internals(); 149 static void print_memory_overhead(); 150 static void verify(); // verifies the code cache 151 static void print_trace(const char* event, CodeBlob* cb, int size = 0) PRODUCT_RETURN; 152 static void print_summary(outputStream* st, bool detailed = true); // Prints a summary of the code cache usage 153 static void log_state(outputStream* st); 154 155 // The full limits of the codeCache 156 static address low_bound() { return (address) _heap->low_boundary(); } 157 static address high_bound() { return (address) _heap->high_boundary(); } 158 static address high() { return (address) _heap->high(); } 159 160 // Profiling 161 static address first_address(); // first address used for CodeBlobs 162 static address last_address(); // last address used for CodeBlobs 163 static size_t capacity() { return _heap->capacity(); } 164 static size_t max_capacity() { return _heap->max_capacity(); } 165 static size_t unallocated_capacity() { return _heap->unallocated_capacity(); } 166 static double reverse_free_ratio(); 167 168 static bool needs_cache_clean() { return _needs_cache_clean; } 169 static void set_needs_cache_clean(bool v) { _needs_cache_clean = v; } 170 static void clear_inline_caches(); // clear all inline caches 171 172 static void verify_clean_inline_caches(); 173 static void verify_icholder_relocations(); 174 175 // Deoptimization 176 static int mark_for_deoptimization(DepChange& changes); 177 #ifdef HOTSWAP 178 static int mark_for_evol_deoptimization(instanceKlassHandle dependee); 179 #endif // HOTSWAP 180 181 static void mark_all_nmethods_for_deoptimization(); 182 static int mark_for_deoptimization(Method* dependee); 183 static void make_marked_nmethods_zombies(); 184 static void make_marked_nmethods_not_entrant(); 185 186 // tells how many nmethods have dependencies 187 static int number_of_nmethods_with_dependencies(); 188 189 static int get_codemem_full_count() { return _codemem_full_count; } 190 }; 191 192 #endif // SHARE_VM_CODE_CODECACHE_HPP | 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_CODE_CODECACHE_HPP 26 #define SHARE_VM_CODE_CODECACHE_HPP 27 28 #include "code/codeBlob.hpp" 29 #include "code/nmethod.hpp" 30 #include "memory/allocation.hpp" 31 #include "memory/heap.hpp" 32 #include "oops/instanceKlass.hpp" 33 #include "oops/oopsHierarchy.hpp" 34 #include "runtime/mutexLocker.hpp" 35 36 // The CodeCache implements the code cache for various pieces of generated 37 // code, e.g., compiled java methods, runtime stubs, transition frames, etc. 38 // The entries in the CodeCache are all CodeBlob's. 39 40 // -- Implementation -- 41 // The CodeCache consists of one or more CodeHeaps, each of which contains 42 // CodeBlobs of a specific CodeBlobType. Currently heaps for the following 43 // types are available: 44 // - Non-methods: Non-methods like Buffers, Adapters and Runtime Stubs 45 // - Profiled nmethods: nmethods that are profiled, i.e., those 46 // executed at level 2 or 3 47 // - Non-Profiled nmethods: nmethods that are not profiled, i.e., those 48 // executed at level 1 or 4 and native methods 49 // - All: Used for code of all types if code cache segmentation is disabled. 50 // 51 // In the rare case of the non-method code heap getting full, non-method code 52 // will be stored in the non-profiled code heap as a fallback solution. 53 // 54 // Depending on the availability of compilers and TieredCompilation there 55 // may be fewer heaps. The size of the code heaps depends on the values of 56 // ReservedCodeCacheSize, NonProfiledCodeHeapSize and ProfiledCodeHeapSize 57 // (see CodeCache::heap_available(..) and CodeCache::initialize_heaps(..) 58 // for details). 59 // 60 // Code cache segmentation is controlled by the flag SegmentedCodeCache. 61 // If turned off, all code types are stored in a single code heap. By default 62 // code cache segmentation is turned on if TieredCompilation is enabled and 63 // ReservedCodeCacheSize >= 240 MB. 64 // 65 // All methods of the CodeCache accepting a CodeBlobType only apply to 66 // CodeBlobs of the given type. For example, iteration over the 67 // CodeBlobs of a specific type can be done by using CodeCache::first_blob(..) 68 // and CodeCache::next_blob(..) and providing the corresponding CodeBlobType. 69 // 70 // IMPORTANT: If you add new CodeHeaps to the code cache or change the 71 // existing ones, make sure to adapt the dtrace scripts (jhelper.d) for 72 // Solaris and BSD. 73 74 class OopClosure; 75 class DepChange; 76 77 class CodeCache : AllStatic { 78 friend class VMStructs; 79 friend class NMethodIterator; 80 private: 81 // CodeHeaps of the cache 82 static GrowableArray<CodeHeap*>* _heaps; 83 84 static address _low_bound; // Lower bound of CodeHeap addresses 85 static address _high_bound; // Upper bound of CodeHeap addresses 86 static int _number_of_blobs; // Total number of CodeBlobs in the cache 87 static int _number_of_adapters; // Total number of Adapters in the cache 88 static int _number_of_nmethods; // Total number of nmethods in the cache 89 static int _number_of_nmethods_with_dependencies; // Total number of nmethods with dependencies 90 static bool _needs_cache_clean; // True if inline caches of the nmethods needs to be flushed 91 static nmethod* _scavenge_root_nmethods; // linked via nm->scavenge_root_link() 92 static int _codemem_full_count; // Number of times a CodeHeap in the cache was full 93 94 static void mark_scavenge_root_nmethods() PRODUCT_RETURN; 95 static void verify_perm_nmethods(CodeBlobClosure* f_or_null) PRODUCT_RETURN; 96 97 // CodeHeap management 98 static void initialize_heaps(); // Initializes the CodeHeaps 99 // Creates a new heap with the given name and size, containing CodeBlobs of the given type 100 static void add_heap(ReservedSpace rs, const char* name, size_t size_initial, int code_blob_type); 101 static CodeHeap* get_code_heap(CodeBlob* cb); // Returns the CodeHeap for the given CodeBlob 102 static CodeHeap* get_code_heap(int code_blob_type); // Returns the CodeHeap for the given CodeBlobType 103 static bool heap_available(int code_blob_type); // Returns true if a CodeHeap for the given CodeBlobType is available 104 static ReservedCodeSpace reserve_heap_memory(size_t size); // Reserves one continuous chunk of memory for the CodeHeaps 105 106 // Iteration 107 static CodeBlob* first_blob(CodeHeap* heap); // Returns the first CodeBlob on the given CodeHeap 108 static CodeBlob* first_blob(int code_blob_type); // Returns the first CodeBlob of the given type 109 static CodeBlob* next_blob(CodeHeap* heap, CodeBlob* cb); // Returns the first alive CodeBlob on the given CodeHeap 110 static CodeBlob* next_blob(CodeBlob* cb); // Returns the next CodeBlob of the given type succeeding the given CodeBlob 111 112 static size_t bytes_allocated_in_freelists(); 113 static int allocated_segments(); 114 static size_t freelists_length(); 115 116 public: 117 // Initialization 118 static void initialize(); 119 120 // Allocation/administration 121 static CodeBlob* allocate(int size, int code_blob_type, bool is_critical = false); // allocates a new CodeBlob 122 static void commit(CodeBlob* cb); // called when the allocated CodeBlob has been filled 123 static int alignment_unit(); // guaranteed alignment of all CodeBlobs 124 static int alignment_offset(); // guaranteed offset of first CodeBlob byte within alignment unit (i.e., allocation header) 125 static void free(CodeBlob* cb); // frees a CodeBlob 126 static bool contains(void *p); // returns whether p is included 127 static void blobs_do(void f(CodeBlob* cb)); // iterates over all CodeBlobs 128 static void blobs_do(CodeBlobClosure* f); // iterates over all CodeBlobs 129 static void nmethods_do(void f(nmethod* nm)); // iterates over all nmethods 130 static void alive_nmethods_do(void f(nmethod* nm)); // iterates over all alive nmethods 131 132 // Lookup 133 static CodeBlob* find_blob(void* start); // Returns the CodeBlob containing the given address 134 static CodeBlob* find_blob_unsafe(void* start); // Same as find_blob but does not fail if looking up a zombie method 135 static nmethod* find_nmethod(void* start); // Returns the nmethod containing the given address 136 137 static int nof_blobs() { return _number_of_blobs; } // Returns the total number of CodeBlobs in the cache 138 static int nof_adapters() { return _number_of_adapters; } // Returns the total number of Adapters in the cache 139 static int nof_nmethods() { return _number_of_nmethods; } // Returns the total number of nmethods in the cache 140 141 // GC support 142 static void gc_epilogue(); 143 static void gc_prologue(); 144 static void verify_oops(); 145 // If "unloading_occurred" is true, then unloads (i.e., breaks root links 146 // to) any unmarked codeBlobs in the cache. Sets "marked_for_unloading" 147 // to "true" iff some code got unloaded. 148 static void do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred); 149 static void asserted_non_scavengable_nmethods_do(CodeBlobClosure* f = NULL) PRODUCT_RETURN; 150 static void scavenge_root_nmethods_do(CodeBlobClosure* f); 151 152 static nmethod* scavenge_root_nmethods() { return _scavenge_root_nmethods; } 153 static void set_scavenge_root_nmethods(nmethod* nm) { _scavenge_root_nmethods = nm; } 154 static void add_scavenge_root_nmethod(nmethod* nm); 155 static void drop_scavenge_root_nmethod(nmethod* nm); 156 static void prune_scavenge_root_nmethods(); 157 158 // Printing/debugging 159 static void print(); // prints summary 160 static void print_internals(); 161 static void print_memory_overhead(); 162 static void verify(); // verifies the code cache 163 static void print_trace(const char* event, CodeBlob* cb, int size = 0) PRODUCT_RETURN; 164 static void print_summary(outputStream* st, bool detailed = true); // Prints a summary of the code cache usage 165 static void log_state(outputStream* st); 166 static const char* get_code_heap_name(int code_blob_type) { return (heap_available(code_blob_type) ? get_code_heap(code_blob_type)->name() : "Unused"); } 167 static void report_codemem_full(int code_blob_type, bool print); 168 169 // The full limits of the codeCache 170 static address low_bound() { return _low_bound; } 171 static address high_bound() { return _high_bound; } 172 173 // Profiling 174 static size_t capacity(int code_blob_type) { return heap_available(code_blob_type) ? get_code_heap(code_blob_type)->capacity() : 0; } 175 static size_t capacity(); 176 static size_t unallocated_capacity(int code_blob_type) { return heap_available(code_blob_type) ? get_code_heap(code_blob_type)->unallocated_capacity() : 0; } 177 static size_t unallocated_capacity(); 178 static size_t max_capacity(int code_blob_type) { return heap_available(code_blob_type) ? get_code_heap(code_blob_type)->max_capacity() : 0; } 179 static size_t max_capacity(); 180 181 static bool is_full(int* code_blob_type); 182 static double reverse_free_ratio(int code_blob_type); 183 184 static bool needs_cache_clean() { return _needs_cache_clean; } 185 static void set_needs_cache_clean(bool v) { _needs_cache_clean = v; } 186 static void clear_inline_caches(); // clear all inline caches 187 188 // Returns the CodeBlobType for nmethods of the given compilation level 189 static int get_code_blob_type(int comp_level) { 190 if (comp_level == CompLevel_none || 191 comp_level == CompLevel_simple || 192 comp_level == CompLevel_full_optimization) { 193 // Non profiled methods 194 return CodeBlobType::MethodNonProfiled; 195 } else if (comp_level == CompLevel_limited_profile || 196 comp_level == CompLevel_full_profile) { 197 // Profiled methods 198 return CodeBlobType::MethodProfiled; 199 } 200 ShouldNotReachHere(); 201 return 0; 202 } 203 204 static void verify_clean_inline_caches(); 205 static void verify_icholder_relocations(); 206 207 // Deoptimization 208 static int mark_for_deoptimization(DepChange& changes); 209 #ifdef HOTSWAP 210 static int mark_for_evol_deoptimization(instanceKlassHandle dependee); 211 #endif // HOTSWAP 212 213 static void mark_all_nmethods_for_deoptimization(); 214 static int mark_for_deoptimization(Method* dependee); 215 static void make_marked_nmethods_zombies(); 216 static void make_marked_nmethods_not_entrant(); 217 218 // tells how many nmethods have dependencies 219 static int number_of_nmethods_with_dependencies(); 220 221 static int get_codemem_full_count() { return _codemem_full_count; } 222 }; 223 224 225 // Iterator to iterate over nmethods in the CodeCache. 226 class NMethodIterator : public StackObj { 227 private: 228 CodeBlob* _code_blob; // Current CodeBlob 229 int _code_blob_type; // Refers to current CodeHeap 230 231 public: 232 NMethodIterator() { 233 initialize(NULL); // Set to NULL, initialized by first call to next() 234 } 235 236 NMethodIterator(nmethod* nm) { 237 initialize(nm); 238 } 239 240 // Advance iterator to next nmethod 241 bool next() { 242 assert_locked_or_safepoint(CodeCache_lock); 243 assert(_code_blob_type < CodeBlobType::NumTypes, "end reached"); 244 245 bool result = next_nmethod(); 246 while (!result && (_code_blob_type < CodeBlobType::MethodProfiled)) { 247 // Advance to next code heap if segmented code cache 248 _code_blob_type++; 249 result = next_nmethod(); 250 } 251 return result; 252 } 253 254 // Advance iterator to next alive nmethod 255 bool next_alive() { 256 bool result = next(); 257 while(result && !_code_blob->is_alive()) { 258 result = next(); 259 } 260 return result; 261 } 262 263 bool end() const { return _code_blob == NULL; } 264 nmethod* method() const { return (nmethod*)_code_blob; } 265 266 private: 267 // Initialize iterator to given nmethod 268 void initialize(nmethod* nm) { 269 _code_blob = (CodeBlob*)nm; 270 if (!SegmentedCodeCache) { 271 // Iterate over all CodeBlobs 272 _code_blob_type = CodeBlobType::All; 273 } else if (nm != NULL) { 274 _code_blob_type = CodeCache::get_code_blob_type(nm->comp_level()); 275 } else { 276 // Only iterate over method code heaps, starting with non-profiled 277 _code_blob_type = CodeBlobType::MethodNonProfiled; 278 } 279 } 280 281 // Advance iterator to the next nmethod in the current code heap 282 bool next_nmethod() { 283 // Get first method CodeBlob 284 if (_code_blob == NULL) { 285 _code_blob = CodeCache::first_blob(_code_blob_type); 286 if (_code_blob == NULL) { 287 return false; 288 } else if (_code_blob->is_nmethod()) { 289 return true; 290 } 291 } 292 // Search for next method CodeBlob 293 _code_blob = CodeCache::next_blob(_code_blob); 294 while (_code_blob != NULL && !_code_blob->is_nmethod()) { 295 _code_blob = CodeCache::next_blob(_code_blob); 296 } 297 return _code_blob != NULL; 298 } 299 }; 300 301 #endif // SHARE_VM_CODE_CODECACHE_HPP |