< prev index next >

src/os/windows/vm/os_windows.cpp

Print this page

        

*** 2378,2388 **** // // 15 bytes seems to be a (very) safe value for max instruction size. bool pc_is_near_addr = (pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15); bool instr_spans_page_boundary = ! (align_size_down((intptr_t) pc ^ (intptr_t) addr, (intptr_t) page_size) > 0); if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) { static volatile address last_addr = (address) os::non_memory_address_word(); --- 2378,2388 ---- // // 15 bytes seems to be a (very) safe value for max instruction size. bool pc_is_near_addr = (pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15); bool instr_spans_page_boundary = ! (align_down((intptr_t) pc ^ (intptr_t) addr, (intptr_t) page_size) > 0); if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) { static volatile address last_addr = (address) os::non_memory_address_word();
*** 2390,2400 **** // In conservative mode, don't unguard unless the address is in the VM if (UnguardOnExecutionViolation > 0 && addr != last_addr && (UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) { // Set memory to RWX and retry ! address page_start = align_ptr_down(addr, page_size); bool res = os::protect_memory((char*) page_start, page_size, os::MEM_PROT_RWX); log_debug(os)("Execution protection violation " "at " INTPTR_FORMAT --- 2390,2400 ---- // In conservative mode, don't unguard unless the address is in the VM if (UnguardOnExecutionViolation > 0 && addr != last_addr && (UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) { // Set memory to RWX and retry ! address page_start = align_down(addr, page_size); bool res = os::protect_memory((char*) page_start, page_size, os::MEM_PROT_RWX); log_debug(os)("Execution protection violation " "at " INTPTR_FORMAT
*** 2767,2777 **** bool warn_on_failure = use_numa_interleaving_specified; #define WARN(msg) if (warn_on_failure) { warning(msg); } // NUMAInterleaveGranularity cannot be less than vm_allocation_granularity (or _large_page_size if using large pages) size_t min_interleave_granularity = UseLargePages ? _large_page_size : os::vm_allocation_granularity(); ! NUMAInterleaveGranularity = align_size_up(NUMAInterleaveGranularity, min_interleave_granularity); if (numa_node_list_holder.build()) { if (log_is_enabled(Debug, os, cpu)) { Log(os, cpu) log; log.debug("NUMA UsedNodeCount=%d, namely ", numa_node_list_holder.get_count()); --- 2767,2777 ---- bool warn_on_failure = use_numa_interleaving_specified; #define WARN(msg) if (warn_on_failure) { warning(msg); } // NUMAInterleaveGranularity cannot be less than vm_allocation_granularity (or _large_page_size if using large pages) size_t min_interleave_granularity = UseLargePages ? _large_page_size : os::vm_allocation_granularity(); ! NUMAInterleaveGranularity = align_up(NUMAInterleaveGranularity, min_interleave_granularity); if (numa_node_list_holder.build()) { if (log_is_enabled(Debug, os, cpu)) { Log(os, cpu) log; log.debug("NUMA UsedNodeCount=%d, namely ", numa_node_list_holder.get_count());
*** 2824,2839 **** os::release_memory(p_buf, bytes + chunk_size); // we still need to round up to a page boundary (in case we are using large pages) // but not to a chunk boundary (in case InterleavingGranularity doesn't align with page size) // instead we handle this in the bytes_to_rq computation below ! p_buf = align_ptr_up(p_buf, page_size); // now go through and allocate one chunk at a time until all bytes are // allocated size_t bytes_remaining = bytes; ! // An overflow of align_size_up() would have been caught above // in the calculation of size_of_reserve. char * next_alloc_addr = p_buf; HANDLE hProc = GetCurrentProcess(); #ifdef ASSERT --- 2824,2839 ---- os::release_memory(p_buf, bytes + chunk_size); // we still need to round up to a page boundary (in case we are using large pages) // but not to a chunk boundary (in case InterleavingGranularity doesn't align with page size) // instead we handle this in the bytes_to_rq computation below ! p_buf = align_up(p_buf, page_size); // now go through and allocate one chunk at a time until all bytes are // allocated size_t bytes_remaining = bytes; ! // An overflow of align_up() would have been caught above // in the calculation of size_of_reserve. char * next_alloc_addr = p_buf; HANDLE hProc = GetCurrentProcess(); #ifdef ASSERT
*** 2988,2998 **** char* extra_base = os::reserve_memory(extra_size, NULL, alignment); if (extra_base == NULL) { return NULL; } // Do manual alignment ! aligned_base = align_ptr_up(extra_base, alignment); os::release_memory(extra_base, extra_size); aligned_base = os::reserve_memory(size, aligned_base); --- 2988,2998 ---- char* extra_base = os::reserve_memory(extra_size, NULL, alignment); if (extra_base == NULL) { return NULL; } // Do manual alignment ! aligned_base = align_up(extra_base, alignment); os::release_memory(extra_base, extra_size); aligned_base = os::reserve_memory(size, aligned_base);
*** 3057,3067 **** char* os::reserve_memory_special(size_t bytes, size_t alignment, char* addr, bool exec) { assert(UseLargePages, "only for large pages"); ! if (!is_size_aligned(bytes, os::large_page_size()) || alignment > os::large_page_size()) { return NULL; // Fallback to small pages. } const DWORD prot = exec ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE; const DWORD flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES; --- 3057,3067 ---- char* os::reserve_memory_special(size_t bytes, size_t alignment, char* addr, bool exec) { assert(UseLargePages, "only for large pages"); ! if (!is_aligned(bytes, os::large_page_size()) || alignment > os::large_page_size()) { return NULL; // Fallback to small pages. } const DWORD prot = exec ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE; const DWORD flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
*** 4040,4050 **** #endif } // If stack_commit_size is 0, windows will reserve the default size, // but only commit a small portion of it. ! size_t stack_commit_size = align_size_up_((size_t)ThreadStackSize * K, (size_t)os::vm_page_size()); size_t default_reserve_size = os::win32::default_stack_size(); size_t actual_reserve_size = stack_commit_size; if (stack_commit_size < default_reserve_size) { // If stack_commit_size == 0, we want this too actual_reserve_size = default_reserve_size; --- 4040,4050 ---- #endif } // If stack_commit_size is 0, windows will reserve the default size, // but only commit a small portion of it. ! size_t stack_commit_size = align_up_((size_t)ThreadStackSize * K, (size_t)os::vm_page_size()); size_t default_reserve_size = os::win32::default_stack_size(); size_t actual_reserve_size = stack_commit_size; if (stack_commit_size < default_reserve_size) { // If stack_commit_size == 0, we want this too actual_reserve_size = default_reserve_size;
*** 4058,4068 **** size_t min_stack_allowed = (size_t)(JavaThread::stack_guard_zone_size() + JavaThread::stack_shadow_zone_size() + (4*BytesPerWord COMPILER2_PRESENT(+2)) * 4 * K); ! min_stack_allowed = align_size_up(min_stack_allowed, os::vm_page_size()); if (actual_reserve_size < min_stack_allowed) { tty->print_cr("\nThe Java thread stack size specified is too small. " "Specify at least %dk", min_stack_allowed / K); --- 4058,4068 ---- size_t min_stack_allowed = (size_t)(JavaThread::stack_guard_zone_size() + JavaThread::stack_shadow_zone_size() + (4*BytesPerWord COMPILER2_PRESENT(+2)) * 4 * K); ! min_stack_allowed = align_up(min_stack_allowed, os::vm_page_size()); if (actual_reserve_size < min_stack_allowed) { tty->print_cr("\nThe Java thread stack size specified is too small. " "Specify at least %dk", min_stack_allowed / K);
< prev index next >