< prev index next >

src/os/windows/vm/os_windows.cpp

Print this page




2363   // code for this condition.
2364   if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2365     PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2366     int exception_subcode = (int) exceptionRecord->ExceptionInformation[0];
2367     address addr = (address) exceptionRecord->ExceptionInformation[1];
2368 
2369     if (exception_subcode == EXCEPTION_INFO_EXEC_VIOLATION) {
2370       int page_size = os::vm_page_size();
2371 
2372       // Make sure the pc and the faulting address are sane.
2373       //
2374       // If an instruction spans a page boundary, and the page containing
2375       // the beginning of the instruction is executable but the following
2376       // page is not, the pc and the faulting address might be slightly
2377       // different - we still want to unguard the 2nd page in this case.
2378       //
2379       // 15 bytes seems to be a (very) safe value for max instruction size.
2380       bool pc_is_near_addr =
2381         (pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15);
2382       bool instr_spans_page_boundary =
2383         (align_size_down((intptr_t) pc ^ (intptr_t) addr,
2384                          (intptr_t) page_size) > 0);
2385 
2386       if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) {
2387         static volatile address last_addr =
2388           (address) os::non_memory_address_word();
2389 
2390         // In conservative mode, don't unguard unless the address is in the VM
2391         if (UnguardOnExecutionViolation > 0 && addr != last_addr &&
2392             (UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) {
2393 
2394           // Set memory to RWX and retry
2395           address page_start = align_ptr_down(addr, page_size);
2396           bool res = os::protect_memory((char*) page_start, page_size,
2397                                         os::MEM_PROT_RWX);
2398 
2399           log_debug(os)("Execution protection violation "
2400                         "at " INTPTR_FORMAT
2401                         ", unguarding " INTPTR_FORMAT ": %s", p2i(addr),
2402                         p2i(page_start), (res ? "success" : os::strerror(errno)));
2403 
2404           // Set last_addr so if we fault again at the same address, we don't
2405           // end up in an endless loop.
2406           //
2407           // There are two potential complications here.  Two threads trapping
2408           // at the same address at the same time could cause one of the
2409           // threads to think it already unguarded, and abort the VM.  Likely
2410           // very rare.
2411           //
2412           // The other race involves two threads alternately trapping at
2413           // different addresses and failing to unguard the page, resulting in
2414           // an endless loop.  This condition is probably even more unlikely
2415           // than the first.


2752   return false;
2753 }
2754 
2755 static void cleanup_after_large_page_init() {
2756   if (_hProcess) CloseHandle(_hProcess);
2757   _hProcess = NULL;
2758   if (_hToken) CloseHandle(_hToken);
2759   _hToken = NULL;
2760 }
2761 
2762 static bool numa_interleaving_init() {
2763   bool success = false;
2764   bool use_numa_interleaving_specified = !FLAG_IS_DEFAULT(UseNUMAInterleaving);
2765 
2766   // print a warning if UseNUMAInterleaving flag is specified on command line
2767   bool warn_on_failure = use_numa_interleaving_specified;
2768 #define WARN(msg) if (warn_on_failure) { warning(msg); }
2769 
2770   // NUMAInterleaveGranularity cannot be less than vm_allocation_granularity (or _large_page_size if using large pages)
2771   size_t min_interleave_granularity = UseLargePages ? _large_page_size : os::vm_allocation_granularity();
2772   NUMAInterleaveGranularity = align_size_up(NUMAInterleaveGranularity, min_interleave_granularity);
2773 
2774   if (numa_node_list_holder.build()) {
2775     if (log_is_enabled(Debug, os, cpu)) {
2776       Log(os, cpu) log;
2777       log.debug("NUMA UsedNodeCount=%d, namely ", numa_node_list_holder.get_count());
2778       for (int i = 0; i < numa_node_list_holder.get_count(); i++) {
2779         log.debug("  %d ", numa_node_list_holder.get_node_list_entry(i));
2780       }
2781     }
2782     success = true;
2783   } else {
2784     WARN("Process does not cover multiple NUMA nodes.");
2785   }
2786   if (!success) {
2787     if (use_numa_interleaving_specified) WARN("...Ignoring UseNUMAInterleaving flag.");
2788   }
2789   return success;
2790 #undef WARN
2791 }
2792 


2809   // so we just use 4K pages for reserve, this gives us a legal contiguous
2810   // address space. then we will deallocate that reservation, and re alloc
2811   // using large pages
2812   const size_t size_of_reserve = bytes + chunk_size;
2813   if (bytes > size_of_reserve) {
2814     // Overflowed.
2815     return NULL;
2816   }
2817   p_buf = (char *) VirtualAlloc(addr,
2818                                 size_of_reserve,  // size of Reserve
2819                                 MEM_RESERVE,
2820                                 PAGE_READWRITE);
2821   // If reservation failed, return NULL
2822   if (p_buf == NULL) return NULL;
2823   MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, CALLER_PC);
2824   os::release_memory(p_buf, bytes + chunk_size);
2825 
2826   // we still need to round up to a page boundary (in case we are using large pages)
2827   // but not to a chunk boundary (in case InterleavingGranularity doesn't align with page size)
2828   // instead we handle this in the bytes_to_rq computation below
2829   p_buf = align_ptr_up(p_buf, page_size);
2830 
2831   // now go through and allocate one chunk at a time until all bytes are
2832   // allocated
2833   size_t  bytes_remaining = bytes;
2834   // An overflow of align_size_up() would have been caught above
2835   // in the calculation of size_of_reserve.
2836   char * next_alloc_addr = p_buf;
2837   HANDLE hProc = GetCurrentProcess();
2838 
2839 #ifdef ASSERT
2840   // Variable for the failure injection
2841   long ran_num = os::random();
2842   size_t fail_after = ran_num % bytes;
2843 #endif
2844 
2845   int count=0;
2846   while (bytes_remaining) {
2847     // select bytes_to_rq to get to the next chunk_size boundary
2848 
2849     size_t bytes_to_rq = MIN2(bytes_remaining, chunk_size - ((size_t)next_alloc_addr % chunk_size));
2850     // Note allocate and commit
2851     char * p_new;
2852 
2853 #ifdef ASSERT
2854     bool inject_error_now = should_inject_error && (bytes_remaining <= fail_after);


2973 
2974 // Multiple threads can race in this code but it's not possible to unmap small sections of
2975 // virtual space to get requested alignment, like posix-like os's.
2976 // Windows prevents multiple thread from remapping over each other so this loop is thread-safe.
2977 char* os::reserve_memory_aligned(size_t size, size_t alignment) {
2978   assert((alignment & (os::vm_allocation_granularity() - 1)) == 0,
2979          "Alignment must be a multiple of allocation granularity (page size)");
2980   assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned");
2981 
2982   size_t extra_size = size + alignment;
2983   assert(extra_size >= size, "overflow, size is too large to allow alignment");
2984 
2985   char* aligned_base = NULL;
2986 
2987   do {
2988     char* extra_base = os::reserve_memory(extra_size, NULL, alignment);
2989     if (extra_base == NULL) {
2990       return NULL;
2991     }
2992     // Do manual alignment
2993     aligned_base = align_ptr_up(extra_base, alignment);
2994 
2995     os::release_memory(extra_base, extra_size);
2996 
2997     aligned_base = os::reserve_memory(size, aligned_base);
2998 
2999   } while (aligned_base == NULL);
3000 
3001   return aligned_base;
3002 }
3003 
3004 char* os::pd_reserve_memory(size_t bytes, char* addr, size_t alignment_hint) {
3005   assert((size_t)addr % os::vm_allocation_granularity() == 0,
3006          "reserve alignment");
3007   assert(bytes % os::vm_page_size() == 0, "reserve page size");
3008   char* res;
3009   // note that if UseLargePages is on, all the areas that require interleaving
3010   // will go thru reserve_memory_special rather than thru here.
3011   bool use_individual = (UseNUMAInterleaving && !UseLargePages);
3012   if (!use_individual) {
3013     res = (char*)VirtualAlloc(addr, bytes, MEM_RESERVE, PAGE_READWRITE);


3042 
3043 size_t os::large_page_size() {
3044   return _large_page_size;
3045 }
3046 
3047 bool os::can_commit_large_page_memory() {
3048   // Windows only uses large page memory when the entire region is reserved
3049   // and committed in a single VirtualAlloc() call. This may change in the
3050   // future, but with Windows 2003 it's not possible to commit on demand.
3051   return false;
3052 }
3053 
3054 bool os::can_execute_large_page_memory() {
3055   return true;
3056 }
3057 
3058 char* os::reserve_memory_special(size_t bytes, size_t alignment, char* addr,
3059                                  bool exec) {
3060   assert(UseLargePages, "only for large pages");
3061 
3062   if (!is_size_aligned(bytes, os::large_page_size()) || alignment > os::large_page_size()) {
3063     return NULL; // Fallback to small pages.
3064   }
3065 
3066   const DWORD prot = exec ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
3067   const DWORD flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
3068 
3069   // with large pages, there are two cases where we need to use Individual Allocation
3070   // 1) the UseLargePagesIndividualAllocation flag is set (set by default on WS2003)
3071   // 2) NUMA Interleaving is enabled, in which case we use a different node for each page
3072   if (UseLargePagesIndividualAllocation || UseNUMAInterleaving) {
3073     log_debug(pagesize)("Reserving large pages individually.");
3074 
3075     char * p_buf = allocate_pages_individually(bytes, addr, flags, prot, LargePagesIndividualAllocationInjectError);
3076     if (p_buf == NULL) {
3077       // give an appropriate warning message
3078       if (UseNUMAInterleaving) {
3079         warning("NUMA large page allocation failed, UseLargePages flag ignored");
3080       }
3081       if (UseLargePagesIndividualAllocation) {
3082         warning("Individually allocated large pages failed, "


4025 
4026   // for debugging float code generation bugs
4027   if (ForceFloatExceptions) {
4028 #ifndef  _WIN64
4029     static long fp_control_word = 0;
4030     __asm { fstcw fp_control_word }
4031     // see Intel PPro Manual, Vol. 2, p 7-16
4032     const long precision = 0x20;
4033     const long underflow = 0x10;
4034     const long overflow  = 0x08;
4035     const long zero_div  = 0x04;
4036     const long denorm    = 0x02;
4037     const long invalid   = 0x01;
4038     fp_control_word |= invalid;
4039     __asm { fldcw fp_control_word }
4040 #endif
4041   }
4042 
4043   // If stack_commit_size is 0, windows will reserve the default size,
4044   // but only commit a small portion of it.
4045   size_t stack_commit_size = align_size_up_((size_t)ThreadStackSize * K, (size_t)os::vm_page_size());
4046   size_t default_reserve_size = os::win32::default_stack_size();
4047   size_t actual_reserve_size = stack_commit_size;
4048   if (stack_commit_size < default_reserve_size) {
4049     // If stack_commit_size == 0, we want this too
4050     actual_reserve_size = default_reserve_size;
4051   }
4052 
4053   // Check minimum allowable stack size for thread creation and to initialize
4054   // the java system classes, including StackOverflowError - depends on page
4055   // size.  Add two 4K pages for compiler2 recursion in main thread.
4056   // Add in 4*BytesPerWord 4K pages to account for VM stack during
4057   // class initialization depending on 32 or 64 bit VM.
4058   size_t min_stack_allowed =
4059             (size_t)(JavaThread::stack_guard_zone_size() +
4060                      JavaThread::stack_shadow_zone_size() +
4061                      (4*BytesPerWord COMPILER2_PRESENT(+2)) * 4 * K);
4062 
4063   min_stack_allowed = align_size_up(min_stack_allowed, os::vm_page_size());
4064 
4065   if (actual_reserve_size < min_stack_allowed) {
4066     tty->print_cr("\nThe Java thread stack size specified is too small. "
4067                   "Specify at least %dk",
4068                   min_stack_allowed / K);
4069     return JNI_ERR;
4070   }
4071 
4072   JavaThread::set_stack_size_at_create(stack_commit_size);
4073 
4074   // Calculate theoretical max. size of Threads to guard gainst artifical
4075   // out-of-memory situations, where all available address-space has been
4076   // reserved by thread stacks.
4077   assert(actual_reserve_size != 0, "Must have a stack");
4078 
4079   // Calculate the thread limit when we should start doing Virtual Memory
4080   // banging. Currently when the threads will have used all but 200Mb of space.
4081   //
4082   // TODO: consider performing a similar calculation for commit size instead
4083   // as reserve size, since on a 64-bit platform we'll run into that more




2363   // code for this condition.
2364   if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2365     PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2366     int exception_subcode = (int) exceptionRecord->ExceptionInformation[0];
2367     address addr = (address) exceptionRecord->ExceptionInformation[1];
2368 
2369     if (exception_subcode == EXCEPTION_INFO_EXEC_VIOLATION) {
2370       int page_size = os::vm_page_size();
2371 
2372       // Make sure the pc and the faulting address are sane.
2373       //
2374       // If an instruction spans a page boundary, and the page containing
2375       // the beginning of the instruction is executable but the following
2376       // page is not, the pc and the faulting address might be slightly
2377       // different - we still want to unguard the 2nd page in this case.
2378       //
2379       // 15 bytes seems to be a (very) safe value for max instruction size.
2380       bool pc_is_near_addr =
2381         (pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15);
2382       bool instr_spans_page_boundary =
2383         (align_down((intptr_t) pc ^ (intptr_t) addr,
2384                          (intptr_t) page_size) > 0);
2385 
2386       if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) {
2387         static volatile address last_addr =
2388           (address) os::non_memory_address_word();
2389 
2390         // In conservative mode, don't unguard unless the address is in the VM
2391         if (UnguardOnExecutionViolation > 0 && addr != last_addr &&
2392             (UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) {
2393 
2394           // Set memory to RWX and retry
2395           address page_start = align_down(addr, page_size);
2396           bool res = os::protect_memory((char*) page_start, page_size,
2397                                         os::MEM_PROT_RWX);
2398 
2399           log_debug(os)("Execution protection violation "
2400                         "at " INTPTR_FORMAT
2401                         ", unguarding " INTPTR_FORMAT ": %s", p2i(addr),
2402                         p2i(page_start), (res ? "success" : os::strerror(errno)));
2403 
2404           // Set last_addr so if we fault again at the same address, we don't
2405           // end up in an endless loop.
2406           //
2407           // There are two potential complications here.  Two threads trapping
2408           // at the same address at the same time could cause one of the
2409           // threads to think it already unguarded, and abort the VM.  Likely
2410           // very rare.
2411           //
2412           // The other race involves two threads alternately trapping at
2413           // different addresses and failing to unguard the page, resulting in
2414           // an endless loop.  This condition is probably even more unlikely
2415           // than the first.


2752   return false;
2753 }
2754 
2755 static void cleanup_after_large_page_init() {
2756   if (_hProcess) CloseHandle(_hProcess);
2757   _hProcess = NULL;
2758   if (_hToken) CloseHandle(_hToken);
2759   _hToken = NULL;
2760 }
2761 
2762 static bool numa_interleaving_init() {
2763   bool success = false;
2764   bool use_numa_interleaving_specified = !FLAG_IS_DEFAULT(UseNUMAInterleaving);
2765 
2766   // print a warning if UseNUMAInterleaving flag is specified on command line
2767   bool warn_on_failure = use_numa_interleaving_specified;
2768 #define WARN(msg) if (warn_on_failure) { warning(msg); }
2769 
2770   // NUMAInterleaveGranularity cannot be less than vm_allocation_granularity (or _large_page_size if using large pages)
2771   size_t min_interleave_granularity = UseLargePages ? _large_page_size : os::vm_allocation_granularity();
2772   NUMAInterleaveGranularity = align_up(NUMAInterleaveGranularity, min_interleave_granularity);
2773 
2774   if (numa_node_list_holder.build()) {
2775     if (log_is_enabled(Debug, os, cpu)) {
2776       Log(os, cpu) log;
2777       log.debug("NUMA UsedNodeCount=%d, namely ", numa_node_list_holder.get_count());
2778       for (int i = 0; i < numa_node_list_holder.get_count(); i++) {
2779         log.debug("  %d ", numa_node_list_holder.get_node_list_entry(i));
2780       }
2781     }
2782     success = true;
2783   } else {
2784     WARN("Process does not cover multiple NUMA nodes.");
2785   }
2786   if (!success) {
2787     if (use_numa_interleaving_specified) WARN("...Ignoring UseNUMAInterleaving flag.");
2788   }
2789   return success;
2790 #undef WARN
2791 }
2792 


2809   // so we just use 4K pages for reserve, this gives us a legal contiguous
2810   // address space. then we will deallocate that reservation, and re alloc
2811   // using large pages
2812   const size_t size_of_reserve = bytes + chunk_size;
2813   if (bytes > size_of_reserve) {
2814     // Overflowed.
2815     return NULL;
2816   }
2817   p_buf = (char *) VirtualAlloc(addr,
2818                                 size_of_reserve,  // size of Reserve
2819                                 MEM_RESERVE,
2820                                 PAGE_READWRITE);
2821   // If reservation failed, return NULL
2822   if (p_buf == NULL) return NULL;
2823   MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, CALLER_PC);
2824   os::release_memory(p_buf, bytes + chunk_size);
2825 
2826   // we still need to round up to a page boundary (in case we are using large pages)
2827   // but not to a chunk boundary (in case InterleavingGranularity doesn't align with page size)
2828   // instead we handle this in the bytes_to_rq computation below
2829   p_buf = align_up(p_buf, page_size);
2830 
2831   // now go through and allocate one chunk at a time until all bytes are
2832   // allocated
2833   size_t  bytes_remaining = bytes;
2834   // An overflow of align_up() would have been caught above
2835   // in the calculation of size_of_reserve.
2836   char * next_alloc_addr = p_buf;
2837   HANDLE hProc = GetCurrentProcess();
2838 
2839 #ifdef ASSERT
2840   // Variable for the failure injection
2841   long ran_num = os::random();
2842   size_t fail_after = ran_num % bytes;
2843 #endif
2844 
2845   int count=0;
2846   while (bytes_remaining) {
2847     // select bytes_to_rq to get to the next chunk_size boundary
2848 
2849     size_t bytes_to_rq = MIN2(bytes_remaining, chunk_size - ((size_t)next_alloc_addr % chunk_size));
2850     // Note allocate and commit
2851     char * p_new;
2852 
2853 #ifdef ASSERT
2854     bool inject_error_now = should_inject_error && (bytes_remaining <= fail_after);


2973 
2974 // Multiple threads can race in this code but it's not possible to unmap small sections of
2975 // virtual space to get requested alignment, like posix-like os's.
2976 // Windows prevents multiple thread from remapping over each other so this loop is thread-safe.
2977 char* os::reserve_memory_aligned(size_t size, size_t alignment) {
2978   assert((alignment & (os::vm_allocation_granularity() - 1)) == 0,
2979          "Alignment must be a multiple of allocation granularity (page size)");
2980   assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned");
2981 
2982   size_t extra_size = size + alignment;
2983   assert(extra_size >= size, "overflow, size is too large to allow alignment");
2984 
2985   char* aligned_base = NULL;
2986 
2987   do {
2988     char* extra_base = os::reserve_memory(extra_size, NULL, alignment);
2989     if (extra_base == NULL) {
2990       return NULL;
2991     }
2992     // Do manual alignment
2993     aligned_base = align_up(extra_base, alignment);
2994 
2995     os::release_memory(extra_base, extra_size);
2996 
2997     aligned_base = os::reserve_memory(size, aligned_base);
2998 
2999   } while (aligned_base == NULL);
3000 
3001   return aligned_base;
3002 }
3003 
3004 char* os::pd_reserve_memory(size_t bytes, char* addr, size_t alignment_hint) {
3005   assert((size_t)addr % os::vm_allocation_granularity() == 0,
3006          "reserve alignment");
3007   assert(bytes % os::vm_page_size() == 0, "reserve page size");
3008   char* res;
3009   // note that if UseLargePages is on, all the areas that require interleaving
3010   // will go thru reserve_memory_special rather than thru here.
3011   bool use_individual = (UseNUMAInterleaving && !UseLargePages);
3012   if (!use_individual) {
3013     res = (char*)VirtualAlloc(addr, bytes, MEM_RESERVE, PAGE_READWRITE);


3042 
3043 size_t os::large_page_size() {
3044   return _large_page_size;
3045 }
3046 
3047 bool os::can_commit_large_page_memory() {
3048   // Windows only uses large page memory when the entire region is reserved
3049   // and committed in a single VirtualAlloc() call. This may change in the
3050   // future, but with Windows 2003 it's not possible to commit on demand.
3051   return false;
3052 }
3053 
3054 bool os::can_execute_large_page_memory() {
3055   return true;
3056 }
3057 
3058 char* os::reserve_memory_special(size_t bytes, size_t alignment, char* addr,
3059                                  bool exec) {
3060   assert(UseLargePages, "only for large pages");
3061 
3062   if (!is_aligned(bytes, os::large_page_size()) || alignment > os::large_page_size()) {
3063     return NULL; // Fallback to small pages.
3064   }
3065 
3066   const DWORD prot = exec ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
3067   const DWORD flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
3068 
3069   // with large pages, there are two cases where we need to use Individual Allocation
3070   // 1) the UseLargePagesIndividualAllocation flag is set (set by default on WS2003)
3071   // 2) NUMA Interleaving is enabled, in which case we use a different node for each page
3072   if (UseLargePagesIndividualAllocation || UseNUMAInterleaving) {
3073     log_debug(pagesize)("Reserving large pages individually.");
3074 
3075     char * p_buf = allocate_pages_individually(bytes, addr, flags, prot, LargePagesIndividualAllocationInjectError);
3076     if (p_buf == NULL) {
3077       // give an appropriate warning message
3078       if (UseNUMAInterleaving) {
3079         warning("NUMA large page allocation failed, UseLargePages flag ignored");
3080       }
3081       if (UseLargePagesIndividualAllocation) {
3082         warning("Individually allocated large pages failed, "


4025 
4026   // for debugging float code generation bugs
4027   if (ForceFloatExceptions) {
4028 #ifndef  _WIN64
4029     static long fp_control_word = 0;
4030     __asm { fstcw fp_control_word }
4031     // see Intel PPro Manual, Vol. 2, p 7-16
4032     const long precision = 0x20;
4033     const long underflow = 0x10;
4034     const long overflow  = 0x08;
4035     const long zero_div  = 0x04;
4036     const long denorm    = 0x02;
4037     const long invalid   = 0x01;
4038     fp_control_word |= invalid;
4039     __asm { fldcw fp_control_word }
4040 #endif
4041   }
4042 
4043   // If stack_commit_size is 0, windows will reserve the default size,
4044   // but only commit a small portion of it.
4045   size_t stack_commit_size = align_up_((size_t)ThreadStackSize * K, (size_t)os::vm_page_size());
4046   size_t default_reserve_size = os::win32::default_stack_size();
4047   size_t actual_reserve_size = stack_commit_size;
4048   if (stack_commit_size < default_reserve_size) {
4049     // If stack_commit_size == 0, we want this too
4050     actual_reserve_size = default_reserve_size;
4051   }
4052 
4053   // Check minimum allowable stack size for thread creation and to initialize
4054   // the java system classes, including StackOverflowError - depends on page
4055   // size.  Add two 4K pages for compiler2 recursion in main thread.
4056   // Add in 4*BytesPerWord 4K pages to account for VM stack during
4057   // class initialization depending on 32 or 64 bit VM.
4058   size_t min_stack_allowed =
4059             (size_t)(JavaThread::stack_guard_zone_size() +
4060                      JavaThread::stack_shadow_zone_size() +
4061                      (4*BytesPerWord COMPILER2_PRESENT(+2)) * 4 * K);
4062 
4063   min_stack_allowed = align_up(min_stack_allowed, os::vm_page_size());
4064 
4065   if (actual_reserve_size < min_stack_allowed) {
4066     tty->print_cr("\nThe Java thread stack size specified is too small. "
4067                   "Specify at least %dk",
4068                   min_stack_allowed / K);
4069     return JNI_ERR;
4070   }
4071 
4072   JavaThread::set_stack_size_at_create(stack_commit_size);
4073 
4074   // Calculate theoretical max. size of Threads to guard gainst artifical
4075   // out-of-memory situations, where all available address-space has been
4076   // reserved by thread stacks.
4077   assert(actual_reserve_size != 0, "Must have a stack");
4078 
4079   // Calculate the thread limit when we should start doing Virtual Memory
4080   // banging. Currently when the threads will have used all but 200Mb of space.
4081   //
4082   // TODO: consider performing a similar calculation for commit size instead
4083   // as reserve size, since on a 64-bit platform we'll run into that more


< prev index next >