205
206 // Compute initial sizes of CodeHeaps
207 size_t init_non_method_size = MIN2(InitialCodeCacheSize, non_method_size);
208 size_t init_profiled_size = MIN2(InitialCodeCacheSize, profiled_size);
209 size_t init_non_profiled_size = MIN2(InitialCodeCacheSize, non_profiled_size);
210
211 // Reserve one continuous chunk of memory for CodeHeaps and split it into
212 // parts for the individual heaps. The memory layout looks like this:
213 // ---------- high -----------
214 // Non-profiled nmethods
215 // Profiled nmethods
216 // Non-methods
217 // ---------- low ------------
218 ReservedCodeSpace rs = reserve_heap_memory(non_profiled_size + profiled_size + non_method_size);
219 ReservedSpace non_method_space = rs.first_part(non_method_size);
220 ReservedSpace rest = rs.last_part(non_method_size);
221 ReservedSpace profiled_space = rest.first_part(profiled_size);
222 ReservedSpace non_profiled_space = rest.last_part(profiled_size);
223
224 // Non-methods (stubs, adapters, ...)
225 add_heap(non_method_space, "Code Heap 'non-methods'", init_non_method_size, CodeBlobType::NonMethod);
226 // Tier 2 and tier 3 (profiled) methods
227 add_heap(profiled_space, "Code Heap 'profiled nmethods'", init_profiled_size, CodeBlobType::MethodProfiled);
228 // Tier 1 and tier 4 (non-profiled) methods and native methods
229 add_heap(non_profiled_space, "Code Heap 'non-profiled nmethods'", init_non_profiled_size, CodeBlobType::MethodNonProfiled);
230 }
231
232 ReservedCodeSpace CodeCache::reserve_heap_memory(size_t size) {
233 // Determine alignment
234 const size_t page_size = os::can_execute_large_page_memory() ?
235 MIN2(os::page_size_for_region(InitialCodeCacheSize, 8),
236 os::page_size_for_region(size, 8)) :
237 os::vm_page_size();
238 const size_t granularity = os::vm_allocation_granularity();
239 const size_t r_align = MAX2(page_size, granularity);
240 const size_t r_size = align_size_up(size, r_align);
241 const size_t rs_align = page_size == (size_t) os::vm_page_size() ? 0 :
242 MAX2(page_size, granularity);
243
244 ReservedCodeSpace rs(r_size, rs_align, rs_align > 0);
245
246 // Initialize bounds
247 _low_bound = (address)rs.base();
248 _high_bound = _low_bound + rs.size();
249
349 // Get CodeHeap for the given CodeBlobType
350 CodeHeap* heap = get_code_heap(SegmentedCodeCache ? code_blob_type : CodeBlobType::All);
351 assert (heap != NULL, "heap is null");
352
353 while (true) {
354 cb = (CodeBlob*)heap->allocate(size, is_critical);
355 if (cb != NULL) break;
356 if (!heap->expand_by(CodeCacheExpansionSize)) {
357 // Expansion failed
358 if (SegmentedCodeCache && (code_blob_type == CodeBlobType::NonMethod)) {
359 // Fallback solution: Store non-method code in the non-profiled code heap
360 return allocate(size, CodeBlobType::MethodNonProfiled, is_critical);
361 }
362 return NULL;
363 }
364 if (PrintCodeCacheExtension) {
365 ResourceMark rm;
366 if (SegmentedCodeCache) {
367 tty->print("%s", heap->name());
368 } else {
369 tty->print("Code Cache");
370 }
371 tty->print_cr(" extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (" SSIZE_FORMAT " bytes)",
372 (intptr_t)heap->low_boundary(), (intptr_t)heap->high(),
373 (address)heap->high() - (address)heap->low_boundary());
374 }
375 }
376 print_trace("allocation", cb, size);
377 _number_of_blobs++;
378 return cb;
379 }
380
381 void CodeCache::free(CodeBlob* cb) {
382 assert_locked_or_safepoint(CodeCache_lock);
383
384 print_trace("free", cb);
385 if (cb->is_nmethod()) {
386 _number_of_nmethods--;
387 if (((nmethod *)cb)->has_dependencies()) {
388 _number_of_nmethods_with_dependencies--;
389 }
803
804 void icache_init();
805
806 void CodeCache::initialize() {
807 assert(CodeCacheSegmentSize >= (uintx)CodeEntryAlignment, "CodeCacheSegmentSize must be large enough to align entry points");
808 #ifdef COMPILER2
809 assert(CodeCacheSegmentSize >= (uintx)OptoLoopAlignment, "CodeCacheSegmentSize must be large enough to align inner loops");
810 #endif
811 assert(CodeCacheSegmentSize >= sizeof(jdouble), "CodeCacheSegmentSize must be large enough to align constants");
812 // This was originally just a check of the alignment, causing failure, instead, round
813 // the code cache to the page size. In particular, Solaris is moving to a larger
814 // default page size.
815 CodeCacheExpansionSize = round_to(CodeCacheExpansionSize, os::vm_page_size());
816
817 if (SegmentedCodeCache) {
818 // Use multiple code heaps
819 initialize_heaps();
820 } else {
821 // Use a single code heap
822 ReservedCodeSpace rs = reserve_heap_memory(ReservedCodeCacheSize);
823 add_heap(rs, "Code Cache", InitialCodeCacheSize, CodeBlobType::All);
824 }
825
826 // Initialize ICache flush mechanism
827 // This service is needed for os::register_code_area
828 icache_init();
829
830 // Give OS a chance to register generated code area.
831 // This is used on Windows 64 bit platforms to register
832 // Structured Exception Handlers for our generated code.
833 os::register_code_area((char*)low_bound(), (char*)high_bound());
834 }
835
836 void codeCache_init() {
837 CodeCache::initialize();
838 }
839
840 //------------------------------------------------------------------------------------------------
841
842 int CodeCache::number_of_nmethods_with_dependencies() {
843 return _number_of_nmethods_with_dependencies;
1224 }
1225 }
1226 }
1227 tty->print_cr("OopMaps");
1228 tty->print_cr(" #blobs = %d", number_of_blobs);
1229 tty->print_cr(" code size = %d", code_size);
1230 tty->print_cr(" #oop_maps = %d", number_of_oop_maps);
1231 tty->print_cr(" map size = %d", map_size);
1232 }
1233
1234 #endif // !PRODUCT
1235 }
1236
1237 void CodeCache::print_summary(outputStream* st, bool detailed) {
1238 FOR_ALL_HEAPS(heap_iterator) {
1239 CodeHeap* heap = (*heap_iterator);
1240 size_t total = (heap->high_boundary() - heap->low_boundary());
1241 if (SegmentedCodeCache) {
1242 st->print("%s:", heap->name());
1243 } else {
1244 st->print("Code Cache:");
1245 }
1246 st->print_cr(" size=" SIZE_FORMAT "Kb used=" SIZE_FORMAT
1247 "Kb max_used=" SIZE_FORMAT "Kb free=" SIZE_FORMAT "Kb",
1248 total/K, (total - heap->unallocated_capacity())/K,
1249 heap->max_allocated_capacity()/K, heap->unallocated_capacity()/K);
1250
1251 if (detailed) {
1252 st->print_cr(" bounds [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT "]",
1253 p2i(heap->low_boundary()),
1254 p2i(heap->high()),
1255 p2i(heap->high_boundary()));
1256 }
1257 }
1258
1259 if (detailed) {
1260 st->print_cr(" total_blobs=" UINT32_FORMAT " nmethods=" UINT32_FORMAT
1261 " adapters=" UINT32_FORMAT,
1262 nof_blobs(), nof_nmethods(), nof_adapters());
1263 st->print_cr(" compilation: %s", CompileBroker::should_compile_new_jobs() ?
1264 "enabled" : Arguments::mode() == Arguments::_int ?
|
205
206 // Compute initial sizes of CodeHeaps
207 size_t init_non_method_size = MIN2(InitialCodeCacheSize, non_method_size);
208 size_t init_profiled_size = MIN2(InitialCodeCacheSize, profiled_size);
209 size_t init_non_profiled_size = MIN2(InitialCodeCacheSize, non_profiled_size);
210
211 // Reserve one continuous chunk of memory for CodeHeaps and split it into
212 // parts for the individual heaps. The memory layout looks like this:
213 // ---------- high -----------
214 // Non-profiled nmethods
215 // Profiled nmethods
216 // Non-methods
217 // ---------- low ------------
218 ReservedCodeSpace rs = reserve_heap_memory(non_profiled_size + profiled_size + non_method_size);
219 ReservedSpace non_method_space = rs.first_part(non_method_size);
220 ReservedSpace rest = rs.last_part(non_method_size);
221 ReservedSpace profiled_space = rest.first_part(profiled_size);
222 ReservedSpace non_profiled_space = rest.last_part(profiled_size);
223
224 // Non-methods (stubs, adapters, ...)
225 add_heap(non_method_space, "CodeHeap 'non-methods'", init_non_method_size, CodeBlobType::NonMethod);
226 // Tier 2 and tier 3 (profiled) methods
227 add_heap(profiled_space, "CodeHeap 'profiled nmethods'", init_profiled_size, CodeBlobType::MethodProfiled);
228 // Tier 1 and tier 4 (non-profiled) methods and native methods
229 add_heap(non_profiled_space, "CodeHeap 'non-profiled nmethods'", init_non_profiled_size, CodeBlobType::MethodNonProfiled);
230 }
231
232 ReservedCodeSpace CodeCache::reserve_heap_memory(size_t size) {
233 // Determine alignment
234 const size_t page_size = os::can_execute_large_page_memory() ?
235 MIN2(os::page_size_for_region(InitialCodeCacheSize, 8),
236 os::page_size_for_region(size, 8)) :
237 os::vm_page_size();
238 const size_t granularity = os::vm_allocation_granularity();
239 const size_t r_align = MAX2(page_size, granularity);
240 const size_t r_size = align_size_up(size, r_align);
241 const size_t rs_align = page_size == (size_t) os::vm_page_size() ? 0 :
242 MAX2(page_size, granularity);
243
244 ReservedCodeSpace rs(r_size, rs_align, rs_align > 0);
245
246 // Initialize bounds
247 _low_bound = (address)rs.base();
248 _high_bound = _low_bound + rs.size();
249
349 // Get CodeHeap for the given CodeBlobType
350 CodeHeap* heap = get_code_heap(SegmentedCodeCache ? code_blob_type : CodeBlobType::All);
351 assert (heap != NULL, "heap is null");
352
353 while (true) {
354 cb = (CodeBlob*)heap->allocate(size, is_critical);
355 if (cb != NULL) break;
356 if (!heap->expand_by(CodeCacheExpansionSize)) {
357 // Expansion failed
358 if (SegmentedCodeCache && (code_blob_type == CodeBlobType::NonMethod)) {
359 // Fallback solution: Store non-method code in the non-profiled code heap
360 return allocate(size, CodeBlobType::MethodNonProfiled, is_critical);
361 }
362 return NULL;
363 }
364 if (PrintCodeCacheExtension) {
365 ResourceMark rm;
366 if (SegmentedCodeCache) {
367 tty->print("%s", heap->name());
368 } else {
369 tty->print("CodeCache");
370 }
371 tty->print_cr(" extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (" SSIZE_FORMAT " bytes)",
372 (intptr_t)heap->low_boundary(), (intptr_t)heap->high(),
373 (address)heap->high() - (address)heap->low_boundary());
374 }
375 }
376 print_trace("allocation", cb, size);
377 _number_of_blobs++;
378 return cb;
379 }
380
381 void CodeCache::free(CodeBlob* cb) {
382 assert_locked_or_safepoint(CodeCache_lock);
383
384 print_trace("free", cb);
385 if (cb->is_nmethod()) {
386 _number_of_nmethods--;
387 if (((nmethod *)cb)->has_dependencies()) {
388 _number_of_nmethods_with_dependencies--;
389 }
803
804 void icache_init();
805
806 void CodeCache::initialize() {
807 assert(CodeCacheSegmentSize >= (uintx)CodeEntryAlignment, "CodeCacheSegmentSize must be large enough to align entry points");
808 #ifdef COMPILER2
809 assert(CodeCacheSegmentSize >= (uintx)OptoLoopAlignment, "CodeCacheSegmentSize must be large enough to align inner loops");
810 #endif
811 assert(CodeCacheSegmentSize >= sizeof(jdouble), "CodeCacheSegmentSize must be large enough to align constants");
812 // This was originally just a check of the alignment, causing failure, instead, round
813 // the code cache to the page size. In particular, Solaris is moving to a larger
814 // default page size.
815 CodeCacheExpansionSize = round_to(CodeCacheExpansionSize, os::vm_page_size());
816
817 if (SegmentedCodeCache) {
818 // Use multiple code heaps
819 initialize_heaps();
820 } else {
821 // Use a single code heap
822 ReservedCodeSpace rs = reserve_heap_memory(ReservedCodeCacheSize);
823 add_heap(rs, "CodeCache", InitialCodeCacheSize, CodeBlobType::All);
824 }
825
826 // Initialize ICache flush mechanism
827 // This service is needed for os::register_code_area
828 icache_init();
829
830 // Give OS a chance to register generated code area.
831 // This is used on Windows 64 bit platforms to register
832 // Structured Exception Handlers for our generated code.
833 os::register_code_area((char*)low_bound(), (char*)high_bound());
834 }
835
836 void codeCache_init() {
837 CodeCache::initialize();
838 }
839
840 //------------------------------------------------------------------------------------------------
841
842 int CodeCache::number_of_nmethods_with_dependencies() {
843 return _number_of_nmethods_with_dependencies;
1224 }
1225 }
1226 }
1227 tty->print_cr("OopMaps");
1228 tty->print_cr(" #blobs = %d", number_of_blobs);
1229 tty->print_cr(" code size = %d", code_size);
1230 tty->print_cr(" #oop_maps = %d", number_of_oop_maps);
1231 tty->print_cr(" map size = %d", map_size);
1232 }
1233
1234 #endif // !PRODUCT
1235 }
1236
1237 void CodeCache::print_summary(outputStream* st, bool detailed) {
1238 FOR_ALL_HEAPS(heap_iterator) {
1239 CodeHeap* heap = (*heap_iterator);
1240 size_t total = (heap->high_boundary() - heap->low_boundary());
1241 if (SegmentedCodeCache) {
1242 st->print("%s:", heap->name());
1243 } else {
1244 st->print("CodeCache:");
1245 }
1246 st->print_cr(" size=" SIZE_FORMAT "Kb used=" SIZE_FORMAT
1247 "Kb max_used=" SIZE_FORMAT "Kb free=" SIZE_FORMAT "Kb",
1248 total/K, (total - heap->unallocated_capacity())/K,
1249 heap->max_allocated_capacity()/K, heap->unallocated_capacity()/K);
1250
1251 if (detailed) {
1252 st->print_cr(" bounds [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT "]",
1253 p2i(heap->low_boundary()),
1254 p2i(heap->high()),
1255 p2i(heap->high_boundary()));
1256 }
1257 }
1258
1259 if (detailed) {
1260 st->print_cr(" total_blobs=" UINT32_FORMAT " nmethods=" UINT32_FORMAT
1261 " adapters=" UINT32_FORMAT,
1262 nof_blobs(), nof_nmethods(), nof_adapters());
1263 st->print_cr(" compilation: %s", CompileBroker::should_compile_new_jobs() ?
1264 "enabled" : Arguments::mode() == Arguments::_int ?
|