< prev index next >

src/share/vm/code/codeCache.cpp

Print this page




 263   if(!heap_available(CodeBlobType::MethodNonProfiled)) {
 264     non_nmethod_size += non_profiled_size;
 265     non_profiled_size = 0;
 266   }
 267   // Make sure we have enough space for VM internal code
 268   uint min_code_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3);
 269   if (non_nmethod_size < (min_code_cache_size + code_buffers_size)) {
 270     vm_exit_during_initialization(err_msg(
 271         "Not enough space in non-nmethod code heap to run VM: " SIZE_FORMAT "K < " SIZE_FORMAT "K",
 272         non_nmethod_size/K, (min_code_cache_size + code_buffers_size)/K));
 273   }
 274 
 275   // Verify sizes and update flag values
 276   assert(non_profiled_size + profiled_size + non_nmethod_size == cache_size, "Invalid code heap sizes");
 277   FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, non_nmethod_size);
 278   FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, profiled_size);
 279   FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, non_profiled_size);
 280 
 281   // Align CodeHeaps
 282   size_t alignment = heap_alignment();
 283   non_nmethod_size = align_size_up(non_nmethod_size, alignment);
 284   profiled_size   = align_size_down(profiled_size, alignment);
 285 
 286   // Reserve one continuous chunk of memory for CodeHeaps and split it into
 287   // parts for the individual heaps. The memory layout looks like this:
 288   // ---------- high -----------
 289   //    Non-profiled nmethods
 290   //      Profiled nmethods
 291   //         Non-nmethods
 292   // ---------- low ------------
 293   ReservedCodeSpace rs = reserve_heap_memory(cache_size);
 294   ReservedSpace non_method_space    = rs.first_part(non_nmethod_size);
 295   ReservedSpace rest                = rs.last_part(non_nmethod_size);
 296   ReservedSpace profiled_space      = rest.first_part(profiled_size);
 297   ReservedSpace non_profiled_space  = rest.last_part(profiled_size);
 298 
 299   // Non-nmethods (stubs, adapters, ...)
 300   add_heap(non_method_space, "CodeHeap 'non-nmethods'", CodeBlobType::NonNMethod);
 301   // Tier 2 and tier 3 (profiled) methods
 302   add_heap(profiled_space, "CodeHeap 'profiled nmethods'", CodeBlobType::MethodProfiled);
 303   // Tier 1 and tier 4 (non-profiled) methods and native methods
 304   add_heap(non_profiled_space, "CodeHeap 'non-profiled nmethods'", CodeBlobType::MethodNonProfiled);
 305 }
 306 
 307 size_t CodeCache::heap_alignment() {
 308   // If large page support is enabled, align code heaps according to large
 309   // page size to make sure that code cache is covered by large pages.
 310   const size_t page_size = os::can_execute_large_page_memory() ?
 311              os::page_size_for_region_unaligned(ReservedCodeCacheSize, 8) :
 312              os::vm_page_size();
 313   return MAX2(page_size, (size_t) os::vm_allocation_granularity());
 314 }
 315 
 316 ReservedCodeSpace CodeCache::reserve_heap_memory(size_t size) {
 317   // Determine alignment
 318   const size_t page_size = os::can_execute_large_page_memory() ?
 319           MIN2(os::page_size_for_region_aligned(InitialCodeCacheSize, 8),
 320                os::page_size_for_region_aligned(size, 8)) :
 321           os::vm_page_size();
 322   const size_t granularity = os::vm_allocation_granularity();
 323   const size_t r_align = MAX2(page_size, granularity);
 324   const size_t r_size = align_size_up(size, r_align);
 325   const size_t rs_align = page_size == (size_t) os::vm_page_size() ? 0 :
 326     MAX2(page_size, granularity);
 327 
 328   ReservedCodeSpace rs(r_size, rs_align, rs_align > 0);
 329 
 330   if (!rs.is_reserved()) {
 331     vm_exit_during_initialization("Could not reserve enough space for code cache");
 332   }
 333 
 334   // Initialize bounds
 335   _low_bound = (address)rs.base();
 336   _high_bound = _low_bound + rs.size();
 337 
 338   return rs;
 339 }
 340 
 341 bool CodeCache::heap_available(int code_blob_type) {
 342   if (!SegmentedCodeCache) {
 343     // No segmentation: use a single code heap
 344     return (code_blob_type == CodeBlobType::All);




 263   if(!heap_available(CodeBlobType::MethodNonProfiled)) {
 264     non_nmethod_size += non_profiled_size;
 265     non_profiled_size = 0;
 266   }
 267   // Make sure we have enough space for VM internal code
 268   uint min_code_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3);
 269   if (non_nmethod_size < (min_code_cache_size + code_buffers_size)) {
 270     vm_exit_during_initialization(err_msg(
 271         "Not enough space in non-nmethod code heap to run VM: " SIZE_FORMAT "K < " SIZE_FORMAT "K",
 272         non_nmethod_size/K, (min_code_cache_size + code_buffers_size)/K));
 273   }
 274 
 275   // Verify sizes and update flag values
 276   assert(non_profiled_size + profiled_size + non_nmethod_size == cache_size, "Invalid code heap sizes");
 277   FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, non_nmethod_size);
 278   FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, profiled_size);
 279   FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, non_profiled_size);
 280 
 281   // Align CodeHeaps
 282   size_t alignment = heap_alignment();
 283   non_nmethod_size = align_up(non_nmethod_size, alignment);
 284   profiled_size   = align_down(profiled_size, alignment);
 285 
 286   // Reserve one continuous chunk of memory for CodeHeaps and split it into
 287   // parts for the individual heaps. The memory layout looks like this:
 288   // ---------- high -----------
 289   //    Non-profiled nmethods
 290   //      Profiled nmethods
 291   //         Non-nmethods
 292   // ---------- low ------------
 293   ReservedCodeSpace rs = reserve_heap_memory(cache_size);
 294   ReservedSpace non_method_space    = rs.first_part(non_nmethod_size);
 295   ReservedSpace rest                = rs.last_part(non_nmethod_size);
 296   ReservedSpace profiled_space      = rest.first_part(profiled_size);
 297   ReservedSpace non_profiled_space  = rest.last_part(profiled_size);
 298 
 299   // Non-nmethods (stubs, adapters, ...)
 300   add_heap(non_method_space, "CodeHeap 'non-nmethods'", CodeBlobType::NonNMethod);
 301   // Tier 2 and tier 3 (profiled) methods
 302   add_heap(profiled_space, "CodeHeap 'profiled nmethods'", CodeBlobType::MethodProfiled);
 303   // Tier 1 and tier 4 (non-profiled) methods and native methods
 304   add_heap(non_profiled_space, "CodeHeap 'non-profiled nmethods'", CodeBlobType::MethodNonProfiled);
 305 }
 306 
 307 size_t CodeCache::heap_alignment() {
 308   // If large page support is enabled, align code heaps according to large
 309   // page size to make sure that code cache is covered by large pages.
 310   const size_t page_size = os::can_execute_large_page_memory() ?
 311              os::page_size_for_region_unaligned(ReservedCodeCacheSize, 8) :
 312              os::vm_page_size();
 313   return MAX2(page_size, (size_t) os::vm_allocation_granularity());
 314 }
 315 
 316 ReservedCodeSpace CodeCache::reserve_heap_memory(size_t size) {
 317   // Determine alignment
 318   const size_t page_size = os::can_execute_large_page_memory() ?
 319           MIN2(os::page_size_for_region_aligned(InitialCodeCacheSize, 8),
 320                os::page_size_for_region_aligned(size, 8)) :
 321           os::vm_page_size();
 322   const size_t granularity = os::vm_allocation_granularity();
 323   const size_t r_align = MAX2(page_size, granularity);
 324   const size_t r_size = align_up(size, r_align);
 325   const size_t rs_align = page_size == (size_t) os::vm_page_size() ? 0 :
 326     MAX2(page_size, granularity);
 327 
 328   ReservedCodeSpace rs(r_size, rs_align, rs_align > 0);
 329 
 330   if (!rs.is_reserved()) {
 331     vm_exit_during_initialization("Could not reserve enough space for code cache");
 332   }
 333 
 334   // Initialize bounds
 335   _low_bound = (address)rs.base();
 336   _high_bound = _low_bound + rs.size();
 337 
 338   return rs;
 339 }
 340 
 341 bool CodeCache::heap_available(int code_blob_type) {
 342   if (!SegmentedCodeCache) {
 343     // No segmentation: use a single code heap
 344     return (code_blob_type == CodeBlobType::All);


< prev index next >