Print this page
rev 4525 : 8014611: reserve_and_align() assumptions are invalid on windows
Summary: also reviewed by ron.durbin@oracle.com, thomas.schatzl@oracle.com
Reviewed-by: dcubed, brutisso
Split |
Split |
Close |
Expand all |
Collapse all |
--- old/src/os/windows/vm/os_windows.cpp
+++ new/src/os/windows/vm/os_windows.cpp
1 1 /*
2 2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 20 * or visit www.oracle.com if you need additional information or have any
21 21 * questions.
22 22 *
23 23 */
24 24
25 25 // Must be at least Windows 2000 or XP to use VectoredExceptions and IsDebuggerPresent
26 26 #define _WIN32_WINNT 0x500
27 27
28 28 // no precompiled headers
29 29 #include "classfile/classLoader.hpp"
30 30 #include "classfile/systemDictionary.hpp"
31 31 #include "classfile/vmSymbols.hpp"
32 32 #include "code/icBuffer.hpp"
33 33 #include "code/vtableStubs.hpp"
34 34 #include "compiler/compileBroker.hpp"
35 35 #include "interpreter/interpreter.hpp"
36 36 #include "jvm_windows.h"
37 37 #include "memory/allocation.inline.hpp"
38 38 #include "memory/filemap.hpp"
39 39 #include "mutex_windows.inline.hpp"
40 40 #include "oops/oop.inline.hpp"
41 41 #include "os_share_windows.hpp"
42 42 #include "prims/jniFastGetField.hpp"
43 43 #include "prims/jvm.h"
44 44 #include "prims/jvm_misc.hpp"
45 45 #include "runtime/arguments.hpp"
46 46 #include "runtime/extendedPC.hpp"
47 47 #include "runtime/globals.hpp"
48 48 #include "runtime/interfaceSupport.hpp"
49 49 #include "runtime/java.hpp"
50 50 #include "runtime/javaCalls.hpp"
51 51 #include "runtime/mutexLocker.hpp"
52 52 #include "runtime/objectMonitor.hpp"
53 53 #include "runtime/osThread.hpp"
54 54 #include "runtime/perfMemory.hpp"
55 55 #include "runtime/sharedRuntime.hpp"
56 56 #include "runtime/statSampler.hpp"
57 57 #include "runtime/stubRoutines.hpp"
58 58 #include "runtime/threadCritical.hpp"
59 59 #include "runtime/timer.hpp"
60 60 #include "services/attachListener.hpp"
61 61 #include "services/memTracker.hpp"
62 62 #include "services/runtimeService.hpp"
63 63 #include "thread_windows.inline.hpp"
64 64 #include "utilities/decoder.hpp"
65 65 #include "utilities/defaultStream.hpp"
66 66 #include "utilities/events.hpp"
67 67 #include "utilities/growableArray.hpp"
68 68 #include "utilities/vmError.hpp"
69 69 #ifdef TARGET_ARCH_x86
70 70 # include "assembler_x86.inline.hpp"
71 71 # include "nativeInst_x86.hpp"
72 72 #endif
73 73
74 74 #ifdef _DEBUG
75 75 #include <crtdbg.h>
76 76 #endif
77 77
78 78
79 79 #include <windows.h>
80 80 #include <sys/types.h>
81 81 #include <sys/stat.h>
82 82 #include <sys/timeb.h>
83 83 #include <objidl.h>
84 84 #include <shlobj.h>
85 85
86 86 #include <malloc.h>
87 87 #include <signal.h>
88 88 #include <direct.h>
89 89 #include <errno.h>
90 90 #include <fcntl.h>
91 91 #include <io.h>
92 92 #include <process.h> // For _beginthreadex(), _endthreadex()
93 93 #include <imagehlp.h> // For os::dll_address_to_function_name
94 94 /* for enumerating dll libraries */
95 95 #include <vdmdbg.h>
96 96
97 97 // for timer info max values which include all bits
98 98 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
99 99
100 100 // For DLL loading/load error detection
101 101 // Values of PE COFF
102 102 #define IMAGE_FILE_PTR_TO_SIGNATURE 0x3c
103 103 #define IMAGE_FILE_SIGNATURE_LENGTH 4
104 104
105 105 static HANDLE main_process;
106 106 static HANDLE main_thread;
107 107 static int main_thread_id;
108 108
109 109 static FILETIME process_creation_time;
110 110 static FILETIME process_exit_time;
111 111 static FILETIME process_user_time;
112 112 static FILETIME process_kernel_time;
113 113
114 114 #ifdef _WIN64
115 115 PVOID topLevelVectoredExceptionHandler = NULL;
116 116 #endif
117 117
118 118 #ifdef _M_IA64
119 119 #define __CPU__ ia64
120 120 #elif _M_AMD64
121 121 #define __CPU__ amd64
122 122 #else
123 123 #define __CPU__ i486
124 124 #endif
125 125
126 126 // save DLL module handle, used by GetModuleFileName
127 127
128 128 HINSTANCE vm_lib_handle;
129 129
130 130 BOOL WINAPI DllMain(HINSTANCE hinst, DWORD reason, LPVOID reserved) {
131 131 switch (reason) {
132 132 case DLL_PROCESS_ATTACH:
133 133 vm_lib_handle = hinst;
134 134 if(ForceTimeHighResolution)
135 135 timeBeginPeriod(1L);
136 136 break;
137 137 case DLL_PROCESS_DETACH:
138 138 if(ForceTimeHighResolution)
139 139 timeEndPeriod(1L);
140 140 #ifdef _WIN64
141 141 if (topLevelVectoredExceptionHandler != NULL) {
142 142 RemoveVectoredExceptionHandler(topLevelVectoredExceptionHandler);
143 143 topLevelVectoredExceptionHandler = NULL;
144 144 }
145 145 #endif
146 146 break;
147 147 default:
148 148 break;
149 149 }
150 150 return true;
151 151 }
152 152
153 153 static inline double fileTimeAsDouble(FILETIME* time) {
154 154 const double high = (double) ((unsigned int) ~0);
155 155 const double split = 10000000.0;
156 156 double result = (time->dwLowDateTime / split) +
157 157 time->dwHighDateTime * (high/split);
158 158 return result;
159 159 }
160 160
161 161 // Implementation of os
162 162
163 163 bool os::getenv(const char* name, char* buffer, int len) {
164 164 int result = GetEnvironmentVariable(name, buffer, len);
165 165 return result > 0 && result < len;
166 166 }
167 167
168 168
169 169 // No setuid programs under Windows.
170 170 bool os::have_special_privileges() {
171 171 return false;
172 172 }
173 173
174 174
175 175 // This method is a periodic task to check for misbehaving JNI applications
176 176 // under CheckJNI, we can add any periodic checks here.
177 177 // For Windows at the moment does nothing
178 178 void os::run_periodic_checks() {
179 179 return;
180 180 }
181 181
182 182 #ifndef _WIN64
183 183 // previous UnhandledExceptionFilter, if there is one
184 184 static LPTOP_LEVEL_EXCEPTION_FILTER prev_uef_handler = NULL;
185 185
186 186 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo);
187 187 #endif
188 188 void os::init_system_properties_values() {
189 189 /* sysclasspath, java_home, dll_dir */
190 190 {
191 191 char *home_path;
192 192 char *dll_path;
193 193 char *pslash;
194 194 char *bin = "\\bin";
195 195 char home_dir[MAX_PATH];
196 196
197 197 if (!getenv("_ALT_JAVA_HOME_DIR", home_dir, MAX_PATH)) {
198 198 os::jvm_path(home_dir, sizeof(home_dir));
199 199 // Found the full path to jvm[_g].dll.
200 200 // Now cut the path to <java_home>/jre if we can.
201 201 *(strrchr(home_dir, '\\')) = '\0'; /* get rid of \jvm.dll */
202 202 pslash = strrchr(home_dir, '\\');
203 203 if (pslash != NULL) {
204 204 *pslash = '\0'; /* get rid of \{client|server} */
205 205 pslash = strrchr(home_dir, '\\');
206 206 if (pslash != NULL)
207 207 *pslash = '\0'; /* get rid of \bin */
208 208 }
209 209 }
210 210
211 211 home_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + 1, mtInternal);
212 212 if (home_path == NULL)
213 213 return;
214 214 strcpy(home_path, home_dir);
215 215 Arguments::set_java_home(home_path);
216 216
217 217 dll_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + strlen(bin) + 1, mtInternal);
218 218 if (dll_path == NULL)
219 219 return;
220 220 strcpy(dll_path, home_dir);
221 221 strcat(dll_path, bin);
222 222 Arguments::set_dll_dir(dll_path);
223 223
224 224 if (!set_boot_path('\\', ';'))
225 225 return;
226 226 }
227 227
228 228 /* library_path */
229 229 #define EXT_DIR "\\lib\\ext"
230 230 #define BIN_DIR "\\bin"
231 231 #define PACKAGE_DIR "\\Sun\\Java"
232 232 {
233 233 /* Win32 library search order (See the documentation for LoadLibrary):
234 234 *
235 235 * 1. The directory from which application is loaded.
236 236 * 2. The system wide Java Extensions directory (Java only)
237 237 * 3. System directory (GetSystemDirectory)
238 238 * 4. Windows directory (GetWindowsDirectory)
239 239 * 5. The PATH environment variable
240 240 * 6. The current directory
241 241 */
242 242
243 243 char *library_path;
244 244 char tmp[MAX_PATH];
245 245 char *path_str = ::getenv("PATH");
246 246
247 247 library_path = NEW_C_HEAP_ARRAY(char, MAX_PATH * 5 + sizeof(PACKAGE_DIR) +
248 248 sizeof(BIN_DIR) + (path_str ? strlen(path_str) : 0) + 10, mtInternal);
249 249
250 250 library_path[0] = '\0';
251 251
252 252 GetModuleFileName(NULL, tmp, sizeof(tmp));
253 253 *(strrchr(tmp, '\\')) = '\0';
254 254 strcat(library_path, tmp);
255 255
256 256 GetWindowsDirectory(tmp, sizeof(tmp));
257 257 strcat(library_path, ";");
258 258 strcat(library_path, tmp);
259 259 strcat(library_path, PACKAGE_DIR BIN_DIR);
260 260
261 261 GetSystemDirectory(tmp, sizeof(tmp));
262 262 strcat(library_path, ";");
263 263 strcat(library_path, tmp);
264 264
265 265 GetWindowsDirectory(tmp, sizeof(tmp));
266 266 strcat(library_path, ";");
267 267 strcat(library_path, tmp);
268 268
269 269 if (path_str) {
270 270 strcat(library_path, ";");
271 271 strcat(library_path, path_str);
272 272 }
273 273
274 274 strcat(library_path, ";.");
275 275
276 276 Arguments::set_library_path(library_path);
277 277 FREE_C_HEAP_ARRAY(char, library_path, mtInternal);
278 278 }
279 279
280 280 /* Default extensions directory */
281 281 {
282 282 char path[MAX_PATH];
283 283 char buf[2 * MAX_PATH + 2 * sizeof(EXT_DIR) + sizeof(PACKAGE_DIR) + 1];
284 284 GetWindowsDirectory(path, MAX_PATH);
285 285 sprintf(buf, "%s%s;%s%s%s", Arguments::get_java_home(), EXT_DIR,
286 286 path, PACKAGE_DIR, EXT_DIR);
287 287 Arguments::set_ext_dirs(buf);
288 288 }
289 289 #undef EXT_DIR
290 290 #undef BIN_DIR
291 291 #undef PACKAGE_DIR
292 292
293 293 /* Default endorsed standards directory. */
294 294 {
295 295 #define ENDORSED_DIR "\\lib\\endorsed"
296 296 size_t len = strlen(Arguments::get_java_home()) + sizeof(ENDORSED_DIR);
297 297 char * buf = NEW_C_HEAP_ARRAY(char, len, mtInternal);
298 298 sprintf(buf, "%s%s", Arguments::get_java_home(), ENDORSED_DIR);
299 299 Arguments::set_endorsed_dirs(buf);
300 300 #undef ENDORSED_DIR
301 301 }
302 302
303 303 #ifndef _WIN64
304 304 // set our UnhandledExceptionFilter and save any previous one
305 305 prev_uef_handler = SetUnhandledExceptionFilter(Handle_FLT_Exception);
306 306 #endif
307 307
308 308 // Done
309 309 return;
310 310 }
311 311
312 312 void os::breakpoint() {
313 313 DebugBreak();
314 314 }
315 315
316 316 // Invoked from the BREAKPOINT Macro
317 317 extern "C" void breakpoint() {
318 318 os::breakpoint();
319 319 }
320 320
321 321 /*
322 322 * RtlCaptureStackBackTrace Windows API may not exist prior to Windows XP.
323 323 * So far, this method is only used by Native Memory Tracking, which is
324 324 * only supported on Windows XP or later.
325 325 */
326 326 address os::get_caller_pc(int n) {
327 327 #ifdef _NMT_NOINLINE_
328 328 n ++;
329 329 #endif
330 330 address pc;
331 331 if (os::Kernel32Dll::RtlCaptureStackBackTrace(n + 1, 1, (PVOID*)&pc, NULL) == 1) {
332 332 return pc;
333 333 }
334 334 return NULL;
335 335 }
336 336
337 337
338 338 // os::current_stack_base()
339 339 //
340 340 // Returns the base of the stack, which is the stack's
341 341 // starting address. This function must be called
342 342 // while running on the stack of the thread being queried.
343 343
344 344 address os::current_stack_base() {
345 345 MEMORY_BASIC_INFORMATION minfo;
346 346 address stack_bottom;
347 347 size_t stack_size;
348 348
349 349 VirtualQuery(&minfo, &minfo, sizeof(minfo));
350 350 stack_bottom = (address)minfo.AllocationBase;
351 351 stack_size = minfo.RegionSize;
352 352
353 353 // Add up the sizes of all the regions with the same
354 354 // AllocationBase.
355 355 while( 1 )
356 356 {
357 357 VirtualQuery(stack_bottom+stack_size, &minfo, sizeof(minfo));
358 358 if ( stack_bottom == (address)minfo.AllocationBase )
359 359 stack_size += minfo.RegionSize;
360 360 else
361 361 break;
362 362 }
363 363
364 364 #ifdef _M_IA64
365 365 // IA64 has memory and register stacks
366 366 stack_size = stack_size / 2;
367 367 #endif
368 368 return stack_bottom + stack_size;
369 369 }
370 370
371 371 size_t os::current_stack_size() {
372 372 size_t sz;
373 373 MEMORY_BASIC_INFORMATION minfo;
374 374 VirtualQuery(&minfo, &minfo, sizeof(minfo));
375 375 sz = (size_t)os::current_stack_base() - (size_t)minfo.AllocationBase;
376 376 return sz;
377 377 }
378 378
379 379 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
380 380 const struct tm* time_struct_ptr = localtime(clock);
381 381 if (time_struct_ptr != NULL) {
382 382 *res = *time_struct_ptr;
383 383 return res;
384 384 }
385 385 return NULL;
386 386 }
387 387
388 388 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo);
389 389
390 390 // Thread start routine for all new Java threads
391 391 static unsigned __stdcall java_start(Thread* thread) {
392 392 // Try to randomize the cache line index of hot stack frames.
393 393 // This helps when threads of the same stack traces evict each other's
394 394 // cache lines. The threads can be either from the same JVM instance, or
395 395 // from different JVM instances. The benefit is especially true for
396 396 // processors with hyperthreading technology.
397 397 static int counter = 0;
398 398 int pid = os::current_process_id();
399 399 _alloca(((pid ^ counter++) & 7) * 128);
400 400
401 401 OSThread* osthr = thread->osthread();
402 402 assert(osthr->get_state() == RUNNABLE, "invalid os thread state");
403 403
404 404 if (UseNUMA) {
405 405 int lgrp_id = os::numa_get_group_id();
406 406 if (lgrp_id != -1) {
407 407 thread->set_lgrp_id(lgrp_id);
408 408 }
409 409 }
410 410
411 411
412 412 if (UseVectoredExceptions) {
413 413 // If we are using vectored exception we don't need to set a SEH
414 414 thread->run();
415 415 }
416 416 else {
417 417 // Install a win32 structured exception handler around every thread created
418 418 // by VM, so VM can genrate error dump when an exception occurred in non-
419 419 // Java thread (e.g. VM thread).
420 420 __try {
421 421 thread->run();
422 422 } __except(topLevelExceptionFilter(
423 423 (_EXCEPTION_POINTERS*)_exception_info())) {
424 424 // Nothing to do.
425 425 }
426 426 }
427 427
428 428 // One less thread is executing
429 429 // When the VMThread gets here, the main thread may have already exited
430 430 // which frees the CodeHeap containing the Atomic::add code
431 431 if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) {
432 432 Atomic::dec_ptr((intptr_t*)&os::win32::_os_thread_count);
433 433 }
434 434
435 435 return 0;
436 436 }
437 437
438 438 static OSThread* create_os_thread(Thread* thread, HANDLE thread_handle, int thread_id) {
439 439 // Allocate the OSThread object
440 440 OSThread* osthread = new OSThread(NULL, NULL);
441 441 if (osthread == NULL) return NULL;
442 442
443 443 // Initialize support for Java interrupts
444 444 HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL);
445 445 if (interrupt_event == NULL) {
446 446 delete osthread;
447 447 return NULL;
448 448 }
449 449 osthread->set_interrupt_event(interrupt_event);
450 450
451 451 // Store info on the Win32 thread into the OSThread
452 452 osthread->set_thread_handle(thread_handle);
453 453 osthread->set_thread_id(thread_id);
454 454
455 455 if (UseNUMA) {
456 456 int lgrp_id = os::numa_get_group_id();
457 457 if (lgrp_id != -1) {
458 458 thread->set_lgrp_id(lgrp_id);
459 459 }
460 460 }
461 461
462 462 // Initial thread state is INITIALIZED, not SUSPENDED
463 463 osthread->set_state(INITIALIZED);
464 464
465 465 return osthread;
466 466 }
467 467
468 468
469 469 bool os::create_attached_thread(JavaThread* thread) {
470 470 #ifdef ASSERT
471 471 thread->verify_not_published();
472 472 #endif
473 473 HANDLE thread_h;
474 474 if (!DuplicateHandle(main_process, GetCurrentThread(), GetCurrentProcess(),
475 475 &thread_h, THREAD_ALL_ACCESS, false, 0)) {
476 476 fatal("DuplicateHandle failed\n");
477 477 }
478 478 OSThread* osthread = create_os_thread(thread, thread_h,
479 479 (int)current_thread_id());
480 480 if (osthread == NULL) {
481 481 return false;
482 482 }
483 483
484 484 // Initial thread state is RUNNABLE
485 485 osthread->set_state(RUNNABLE);
486 486
487 487 thread->set_osthread(osthread);
488 488 return true;
489 489 }
490 490
491 491 bool os::create_main_thread(JavaThread* thread) {
492 492 #ifdef ASSERT
493 493 thread->verify_not_published();
494 494 #endif
495 495 if (_starting_thread == NULL) {
496 496 _starting_thread = create_os_thread(thread, main_thread, main_thread_id);
497 497 if (_starting_thread == NULL) {
498 498 return false;
499 499 }
500 500 }
501 501
502 502 // The primordial thread is runnable from the start)
503 503 _starting_thread->set_state(RUNNABLE);
504 504
505 505 thread->set_osthread(_starting_thread);
506 506 return true;
507 507 }
508 508
509 509 // Allocate and initialize a new OSThread
510 510 bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) {
511 511 unsigned thread_id;
512 512
513 513 // Allocate the OSThread object
514 514 OSThread* osthread = new OSThread(NULL, NULL);
515 515 if (osthread == NULL) {
516 516 return false;
517 517 }
518 518
519 519 // Initialize support for Java interrupts
520 520 HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL);
521 521 if (interrupt_event == NULL) {
522 522 delete osthread;
523 523 return NULL;
524 524 }
525 525 osthread->set_interrupt_event(interrupt_event);
526 526 osthread->set_interrupted(false);
527 527
528 528 thread->set_osthread(osthread);
529 529
530 530 if (stack_size == 0) {
531 531 switch (thr_type) {
532 532 case os::java_thread:
533 533 // Java threads use ThreadStackSize which default value can be changed with the flag -Xss
534 534 if (JavaThread::stack_size_at_create() > 0)
535 535 stack_size = JavaThread::stack_size_at_create();
536 536 break;
537 537 case os::compiler_thread:
538 538 if (CompilerThreadStackSize > 0) {
539 539 stack_size = (size_t)(CompilerThreadStackSize * K);
540 540 break;
541 541 } // else fall through:
542 542 // use VMThreadStackSize if CompilerThreadStackSize is not defined
543 543 case os::vm_thread:
544 544 case os::pgc_thread:
545 545 case os::cgc_thread:
546 546 case os::watcher_thread:
547 547 if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
548 548 break;
549 549 }
550 550 }
551 551
552 552 // Create the Win32 thread
553 553 //
554 554 // Contrary to what MSDN document says, "stack_size" in _beginthreadex()
555 555 // does not specify stack size. Instead, it specifies the size of
556 556 // initially committed space. The stack size is determined by
557 557 // PE header in the executable. If the committed "stack_size" is larger
558 558 // than default value in the PE header, the stack is rounded up to the
559 559 // nearest multiple of 1MB. For example if the launcher has default
560 560 // stack size of 320k, specifying any size less than 320k does not
561 561 // affect the actual stack size at all, it only affects the initial
562 562 // commitment. On the other hand, specifying 'stack_size' larger than
563 563 // default value may cause significant increase in memory usage, because
564 564 // not only the stack space will be rounded up to MB, but also the
565 565 // entire space is committed upfront.
566 566 //
567 567 // Finally Windows XP added a new flag 'STACK_SIZE_PARAM_IS_A_RESERVATION'
568 568 // for CreateThread() that can treat 'stack_size' as stack size. However we
569 569 // are not supposed to call CreateThread() directly according to MSDN
570 570 // document because JVM uses C runtime library. The good news is that the
571 571 // flag appears to work with _beginthredex() as well.
572 572
573 573 #ifndef STACK_SIZE_PARAM_IS_A_RESERVATION
574 574 #define STACK_SIZE_PARAM_IS_A_RESERVATION (0x10000)
575 575 #endif
576 576
577 577 HANDLE thread_handle =
578 578 (HANDLE)_beginthreadex(NULL,
579 579 (unsigned)stack_size,
580 580 (unsigned (__stdcall *)(void*)) java_start,
581 581 thread,
582 582 CREATE_SUSPENDED | STACK_SIZE_PARAM_IS_A_RESERVATION,
583 583 &thread_id);
584 584 if (thread_handle == NULL) {
585 585 // perhaps STACK_SIZE_PARAM_IS_A_RESERVATION is not supported, try again
586 586 // without the flag.
587 587 thread_handle =
588 588 (HANDLE)_beginthreadex(NULL,
589 589 (unsigned)stack_size,
590 590 (unsigned (__stdcall *)(void*)) java_start,
591 591 thread,
592 592 CREATE_SUSPENDED,
593 593 &thread_id);
594 594 }
595 595 if (thread_handle == NULL) {
596 596 // Need to clean up stuff we've allocated so far
597 597 CloseHandle(osthread->interrupt_event());
598 598 thread->set_osthread(NULL);
599 599 delete osthread;
600 600 return NULL;
601 601 }
602 602
603 603 Atomic::inc_ptr((intptr_t*)&os::win32::_os_thread_count);
604 604
605 605 // Store info on the Win32 thread into the OSThread
606 606 osthread->set_thread_handle(thread_handle);
607 607 osthread->set_thread_id(thread_id);
608 608
609 609 // Initial thread state is INITIALIZED, not SUSPENDED
610 610 osthread->set_state(INITIALIZED);
611 611
612 612 // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain
613 613 return true;
614 614 }
615 615
616 616
617 617 // Free Win32 resources related to the OSThread
618 618 void os::free_thread(OSThread* osthread) {
619 619 assert(osthread != NULL, "osthread not set");
620 620 CloseHandle(osthread->thread_handle());
621 621 CloseHandle(osthread->interrupt_event());
622 622 delete osthread;
623 623 }
624 624
625 625
626 626 static int has_performance_count = 0;
627 627 static jlong first_filetime;
628 628 static jlong initial_performance_count;
629 629 static jlong performance_frequency;
630 630
631 631
632 632 jlong as_long(LARGE_INTEGER x) {
633 633 jlong result = 0; // initialization to avoid warning
634 634 set_high(&result, x.HighPart);
635 635 set_low(&result, x.LowPart);
636 636 return result;
637 637 }
638 638
639 639
640 640 jlong os::elapsed_counter() {
641 641 LARGE_INTEGER count;
642 642 if (has_performance_count) {
643 643 QueryPerformanceCounter(&count);
644 644 return as_long(count) - initial_performance_count;
645 645 } else {
646 646 FILETIME wt;
647 647 GetSystemTimeAsFileTime(&wt);
648 648 return (jlong_from(wt.dwHighDateTime, wt.dwLowDateTime) - first_filetime);
649 649 }
650 650 }
651 651
652 652
653 653 jlong os::elapsed_frequency() {
654 654 if (has_performance_count) {
655 655 return performance_frequency;
656 656 } else {
657 657 // the FILETIME time is the number of 100-nanosecond intervals since January 1,1601.
658 658 return 10000000;
659 659 }
660 660 }
661 661
662 662
663 663 julong os::available_memory() {
664 664 return win32::available_memory();
665 665 }
666 666
667 667 julong os::win32::available_memory() {
668 668 // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect
669 669 // value if total memory is larger than 4GB
670 670 MEMORYSTATUSEX ms;
671 671 ms.dwLength = sizeof(ms);
672 672 GlobalMemoryStatusEx(&ms);
673 673
674 674 return (julong)ms.ullAvailPhys;
675 675 }
676 676
677 677 julong os::physical_memory() {
678 678 return win32::physical_memory();
679 679 }
680 680
681 681 julong os::allocatable_physical_memory(julong size) {
682 682 #ifdef _LP64
683 683 return size;
684 684 #else
685 685 // Limit to 1400m because of the 2gb address space wall
686 686 return MIN2(size, (julong)1400*M);
687 687 #endif
688 688 }
689 689
690 690 // VC6 lacks DWORD_PTR
691 691 #if _MSC_VER < 1300
692 692 typedef UINT_PTR DWORD_PTR;
693 693 #endif
694 694
695 695 int os::active_processor_count() {
696 696 DWORD_PTR lpProcessAffinityMask = 0;
697 697 DWORD_PTR lpSystemAffinityMask = 0;
698 698 int proc_count = processor_count();
699 699 if (proc_count <= sizeof(UINT_PTR) * BitsPerByte &&
700 700 GetProcessAffinityMask(GetCurrentProcess(), &lpProcessAffinityMask, &lpSystemAffinityMask)) {
701 701 // Nof active processors is number of bits in process affinity mask
702 702 int bitcount = 0;
703 703 while (lpProcessAffinityMask != 0) {
704 704 lpProcessAffinityMask = lpProcessAffinityMask & (lpProcessAffinityMask-1);
705 705 bitcount++;
706 706 }
707 707 return bitcount;
708 708 } else {
709 709 return proc_count;
710 710 }
711 711 }
712 712
713 713 void os::set_native_thread_name(const char *name) {
714 714 // Not yet implemented.
715 715 return;
716 716 }
717 717
718 718 bool os::distribute_processes(uint length, uint* distribution) {
719 719 // Not yet implemented.
720 720 return false;
721 721 }
722 722
723 723 bool os::bind_to_processor(uint processor_id) {
724 724 // Not yet implemented.
725 725 return false;
726 726 }
727 727
728 728 static void initialize_performance_counter() {
729 729 LARGE_INTEGER count;
730 730 if (QueryPerformanceFrequency(&count)) {
731 731 has_performance_count = 1;
732 732 performance_frequency = as_long(count);
733 733 QueryPerformanceCounter(&count);
734 734 initial_performance_count = as_long(count);
735 735 } else {
736 736 has_performance_count = 0;
737 737 FILETIME wt;
738 738 GetSystemTimeAsFileTime(&wt);
739 739 first_filetime = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime);
740 740 }
741 741 }
742 742
743 743
744 744 double os::elapsedTime() {
745 745 return (double) elapsed_counter() / (double) elapsed_frequency();
746 746 }
747 747
748 748
749 749 // Windows format:
750 750 // The FILETIME structure is a 64-bit value representing the number of 100-nanosecond intervals since January 1, 1601.
751 751 // Java format:
752 752 // Java standards require the number of milliseconds since 1/1/1970
753 753
754 754 // Constant offset - calculated using offset()
755 755 static jlong _offset = 116444736000000000;
756 756 // Fake time counter for reproducible results when debugging
757 757 static jlong fake_time = 0;
758 758
759 759 #ifdef ASSERT
760 760 // Just to be safe, recalculate the offset in debug mode
761 761 static jlong _calculated_offset = 0;
762 762 static int _has_calculated_offset = 0;
763 763
764 764 jlong offset() {
765 765 if (_has_calculated_offset) return _calculated_offset;
766 766 SYSTEMTIME java_origin;
767 767 java_origin.wYear = 1970;
768 768 java_origin.wMonth = 1;
769 769 java_origin.wDayOfWeek = 0; // ignored
770 770 java_origin.wDay = 1;
771 771 java_origin.wHour = 0;
772 772 java_origin.wMinute = 0;
773 773 java_origin.wSecond = 0;
774 774 java_origin.wMilliseconds = 0;
775 775 FILETIME jot;
776 776 if (!SystemTimeToFileTime(&java_origin, &jot)) {
777 777 fatal(err_msg("Error = %d\nWindows error", GetLastError()));
778 778 }
779 779 _calculated_offset = jlong_from(jot.dwHighDateTime, jot.dwLowDateTime);
780 780 _has_calculated_offset = 1;
781 781 assert(_calculated_offset == _offset, "Calculated and constant time offsets must be equal");
782 782 return _calculated_offset;
783 783 }
784 784 #else
785 785 jlong offset() {
786 786 return _offset;
787 787 }
788 788 #endif
789 789
790 790 jlong windows_to_java_time(FILETIME wt) {
791 791 jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime);
792 792 return (a - offset()) / 10000;
793 793 }
794 794
795 795 FILETIME java_to_windows_time(jlong l) {
796 796 jlong a = (l * 10000) + offset();
797 797 FILETIME result;
798 798 result.dwHighDateTime = high(a);
799 799 result.dwLowDateTime = low(a);
800 800 return result;
801 801 }
802 802
803 803 // For now, we say that Windows does not support vtime. I have no idea
804 804 // whether it can actually be made to (DLD, 9/13/05).
805 805
806 806 bool os::supports_vtime() { return false; }
807 807 bool os::enable_vtime() { return false; }
808 808 bool os::vtime_enabled() { return false; }
809 809 double os::elapsedVTime() {
810 810 // better than nothing, but not much
811 811 return elapsedTime();
812 812 }
813 813
814 814 jlong os::javaTimeMillis() {
815 815 if (UseFakeTimers) {
816 816 return fake_time++;
817 817 } else {
818 818 FILETIME wt;
819 819 GetSystemTimeAsFileTime(&wt);
820 820 return windows_to_java_time(wt);
821 821 }
822 822 }
823 823
824 824 jlong os::javaTimeNanos() {
825 825 if (!has_performance_count) {
826 826 return javaTimeMillis() * NANOSECS_PER_MILLISEC; // the best we can do.
827 827 } else {
828 828 LARGE_INTEGER current_count;
829 829 QueryPerformanceCounter(¤t_count);
830 830 double current = as_long(current_count);
831 831 double freq = performance_frequency;
832 832 jlong time = (jlong)((current/freq) * NANOSECS_PER_SEC);
833 833 return time;
834 834 }
835 835 }
836 836
837 837 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
838 838 if (!has_performance_count) {
839 839 // javaTimeMillis() doesn't have much percision,
840 840 // but it is not going to wrap -- so all 64 bits
841 841 info_ptr->max_value = ALL_64_BITS;
842 842
843 843 // this is a wall clock timer, so may skip
844 844 info_ptr->may_skip_backward = true;
845 845 info_ptr->may_skip_forward = true;
846 846 } else {
847 847 jlong freq = performance_frequency;
848 848 if (freq < NANOSECS_PER_SEC) {
849 849 // the performance counter is 64 bits and we will
850 850 // be multiplying it -- so no wrap in 64 bits
851 851 info_ptr->max_value = ALL_64_BITS;
852 852 } else if (freq > NANOSECS_PER_SEC) {
853 853 // use the max value the counter can reach to
854 854 // determine the max value which could be returned
855 855 julong max_counter = (julong)ALL_64_BITS;
856 856 info_ptr->max_value = (jlong)(max_counter / (freq / NANOSECS_PER_SEC));
857 857 } else {
858 858 // the performance counter is 64 bits and we will
859 859 // be using it directly -- so no wrap in 64 bits
860 860 info_ptr->max_value = ALL_64_BITS;
861 861 }
862 862
863 863 // using a counter, so no skipping
864 864 info_ptr->may_skip_backward = false;
865 865 info_ptr->may_skip_forward = false;
866 866 }
867 867 info_ptr->kind = JVMTI_TIMER_ELAPSED; // elapsed not CPU time
868 868 }
869 869
870 870 char* os::local_time_string(char *buf, size_t buflen) {
871 871 SYSTEMTIME st;
872 872 GetLocalTime(&st);
873 873 jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
874 874 st.wYear, st.wMonth, st.wDay, st.wHour, st.wMinute, st.wSecond);
875 875 return buf;
876 876 }
877 877
878 878 bool os::getTimesSecs(double* process_real_time,
879 879 double* process_user_time,
880 880 double* process_system_time) {
881 881 HANDLE h_process = GetCurrentProcess();
882 882 FILETIME create_time, exit_time, kernel_time, user_time;
883 883 BOOL result = GetProcessTimes(h_process,
884 884 &create_time,
885 885 &exit_time,
886 886 &kernel_time,
887 887 &user_time);
888 888 if (result != 0) {
889 889 FILETIME wt;
890 890 GetSystemTimeAsFileTime(&wt);
891 891 jlong rtc_millis = windows_to_java_time(wt);
892 892 jlong user_millis = windows_to_java_time(user_time);
893 893 jlong system_millis = windows_to_java_time(kernel_time);
894 894 *process_real_time = ((double) rtc_millis) / ((double) MILLIUNITS);
895 895 *process_user_time = ((double) user_millis) / ((double) MILLIUNITS);
896 896 *process_system_time = ((double) system_millis) / ((double) MILLIUNITS);
897 897 return true;
898 898 } else {
899 899 return false;
900 900 }
901 901 }
902 902
903 903 void os::shutdown() {
904 904
905 905 // allow PerfMemory to attempt cleanup of any persistent resources
906 906 perfMemory_exit();
907 907
908 908 // flush buffered output, finish log files
909 909 ostream_abort();
910 910
911 911 // Check for abort hook
912 912 abort_hook_t abort_hook = Arguments::abort_hook();
913 913 if (abort_hook != NULL) {
914 914 abort_hook();
915 915 }
916 916 }
917 917
918 918
919 919 static BOOL (WINAPI *_MiniDumpWriteDump) ( HANDLE, DWORD, HANDLE, MINIDUMP_TYPE, PMINIDUMP_EXCEPTION_INFORMATION,
920 920 PMINIDUMP_USER_STREAM_INFORMATION, PMINIDUMP_CALLBACK_INFORMATION);
921 921
922 922 void os::check_or_create_dump(void* exceptionRecord, void* contextRecord, char* buffer, size_t bufferSize) {
923 923 HINSTANCE dbghelp;
924 924 EXCEPTION_POINTERS ep;
925 925 MINIDUMP_EXCEPTION_INFORMATION mei;
926 926 MINIDUMP_EXCEPTION_INFORMATION* pmei;
927 927
928 928 HANDLE hProcess = GetCurrentProcess();
929 929 DWORD processId = GetCurrentProcessId();
930 930 HANDLE dumpFile;
931 931 MINIDUMP_TYPE dumpType;
932 932 static const char* cwd;
933 933
934 934 // If running on a client version of Windows and user has not explicitly enabled dumping
935 935 if (!os::win32::is_windows_server() && !CreateMinidumpOnCrash) {
936 936 VMError::report_coredump_status("Minidumps are not enabled by default on client versions of Windows", false);
937 937 return;
938 938 // If running on a server version of Windows and user has explictly disabled dumping
939 939 } else if (os::win32::is_windows_server() && !FLAG_IS_DEFAULT(CreateMinidumpOnCrash) && !CreateMinidumpOnCrash) {
940 940 VMError::report_coredump_status("Minidump has been disabled from the command line", false);
941 941 return;
942 942 }
943 943
944 944 dbghelp = os::win32::load_Windows_dll("DBGHELP.DLL", NULL, 0);
945 945
946 946 if (dbghelp == NULL) {
947 947 VMError::report_coredump_status("Failed to load dbghelp.dll", false);
948 948 return;
949 949 }
950 950
951 951 _MiniDumpWriteDump = CAST_TO_FN_PTR(
952 952 BOOL(WINAPI *)( HANDLE, DWORD, HANDLE, MINIDUMP_TYPE, PMINIDUMP_EXCEPTION_INFORMATION,
953 953 PMINIDUMP_USER_STREAM_INFORMATION, PMINIDUMP_CALLBACK_INFORMATION),
954 954 GetProcAddress(dbghelp, "MiniDumpWriteDump"));
955 955
956 956 if (_MiniDumpWriteDump == NULL) {
957 957 VMError::report_coredump_status("Failed to find MiniDumpWriteDump() in module dbghelp.dll", false);
958 958 return;
959 959 }
960 960
961 961 dumpType = (MINIDUMP_TYPE)(MiniDumpWithFullMemory | MiniDumpWithHandleData);
962 962
963 963 // Older versions of dbghelp.h doesn't contain all the dumptypes we want, dbghelp.h with
964 964 // API_VERSION_NUMBER 11 or higher contains the ones we want though
965 965 #if API_VERSION_NUMBER >= 11
966 966 dumpType = (MINIDUMP_TYPE)(dumpType | MiniDumpWithFullMemoryInfo | MiniDumpWithThreadInfo |
967 967 MiniDumpWithUnloadedModules);
968 968 #endif
969 969
970 970 cwd = get_current_directory(NULL, 0);
971 971 jio_snprintf(buffer, bufferSize, "%s\\hs_err_pid%u.mdmp",cwd, current_process_id());
972 972 dumpFile = CreateFile(buffer, GENERIC_WRITE, 0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL);
973 973
974 974 if (dumpFile == INVALID_HANDLE_VALUE) {
975 975 VMError::report_coredump_status("Failed to create file for dumping", false);
976 976 return;
977 977 }
978 978 if (exceptionRecord != NULL && contextRecord != NULL) {
979 979 ep.ContextRecord = (PCONTEXT) contextRecord;
980 980 ep.ExceptionRecord = (PEXCEPTION_RECORD) exceptionRecord;
981 981
982 982 mei.ThreadId = GetCurrentThreadId();
983 983 mei.ExceptionPointers = &ep;
984 984 pmei = &mei;
985 985 } else {
986 986 pmei = NULL;
987 987 }
988 988
989 989
990 990 // Older versions of dbghelp.dll (the one shipped with Win2003 for example) may not support all
991 991 // the dump types we really want. If first call fails, lets fall back to just use MiniDumpWithFullMemory then.
992 992 if (_MiniDumpWriteDump(hProcess, processId, dumpFile, dumpType, pmei, NULL, NULL) == false &&
993 993 _MiniDumpWriteDump(hProcess, processId, dumpFile, (MINIDUMP_TYPE)MiniDumpWithFullMemory, pmei, NULL, NULL) == false) {
994 994 VMError::report_coredump_status("Call to MiniDumpWriteDump() failed", false);
995 995 } else {
996 996 VMError::report_coredump_status(buffer, true);
997 997 }
998 998
999 999 CloseHandle(dumpFile);
1000 1000 }
1001 1001
1002 1002
1003 1003
1004 1004 void os::abort(bool dump_core)
1005 1005 {
1006 1006 os::shutdown();
1007 1007 // no core dump on Windows
1008 1008 ::exit(1);
1009 1009 }
1010 1010
1011 1011 // Die immediately, no exit hook, no abort hook, no cleanup.
1012 1012 void os::die() {
1013 1013 _exit(-1);
1014 1014 }
1015 1015
1016 1016 // Directory routines copied from src/win32/native/java/io/dirent_md.c
1017 1017 // * dirent_md.c 1.15 00/02/02
1018 1018 //
1019 1019 // The declarations for DIR and struct dirent are in jvm_win32.h.
1020 1020
1021 1021 /* Caller must have already run dirname through JVM_NativePath, which removes
1022 1022 duplicate slashes and converts all instances of '/' into '\\'. */
1023 1023
1024 1024 DIR *
1025 1025 os::opendir(const char *dirname)
1026 1026 {
1027 1027 assert(dirname != NULL, "just checking"); // hotspot change
1028 1028 DIR *dirp = (DIR *)malloc(sizeof(DIR), mtInternal);
1029 1029 DWORD fattr; // hotspot change
1030 1030 char alt_dirname[4] = { 0, 0, 0, 0 };
1031 1031
1032 1032 if (dirp == 0) {
1033 1033 errno = ENOMEM;
1034 1034 return 0;
1035 1035 }
1036 1036
1037 1037 /*
1038 1038 * Win32 accepts "\" in its POSIX stat(), but refuses to treat it
1039 1039 * as a directory in FindFirstFile(). We detect this case here and
1040 1040 * prepend the current drive name.
1041 1041 */
1042 1042 if (dirname[1] == '\0' && dirname[0] == '\\') {
1043 1043 alt_dirname[0] = _getdrive() + 'A' - 1;
1044 1044 alt_dirname[1] = ':';
1045 1045 alt_dirname[2] = '\\';
1046 1046 alt_dirname[3] = '\0';
1047 1047 dirname = alt_dirname;
1048 1048 }
1049 1049
1050 1050 dirp->path = (char *)malloc(strlen(dirname) + 5, mtInternal);
1051 1051 if (dirp->path == 0) {
1052 1052 free(dirp, mtInternal);
1053 1053 errno = ENOMEM;
1054 1054 return 0;
1055 1055 }
1056 1056 strcpy(dirp->path, dirname);
1057 1057
1058 1058 fattr = GetFileAttributes(dirp->path);
1059 1059 if (fattr == 0xffffffff) {
1060 1060 free(dirp->path, mtInternal);
1061 1061 free(dirp, mtInternal);
1062 1062 errno = ENOENT;
1063 1063 return 0;
1064 1064 } else if ((fattr & FILE_ATTRIBUTE_DIRECTORY) == 0) {
1065 1065 free(dirp->path, mtInternal);
1066 1066 free(dirp, mtInternal);
1067 1067 errno = ENOTDIR;
1068 1068 return 0;
1069 1069 }
1070 1070
1071 1071 /* Append "*.*", or possibly "\\*.*", to path */
1072 1072 if (dirp->path[1] == ':'
1073 1073 && (dirp->path[2] == '\0'
1074 1074 || (dirp->path[2] == '\\' && dirp->path[3] == '\0'))) {
1075 1075 /* No '\\' needed for cases like "Z:" or "Z:\" */
1076 1076 strcat(dirp->path, "*.*");
1077 1077 } else {
1078 1078 strcat(dirp->path, "\\*.*");
1079 1079 }
1080 1080
1081 1081 dirp->handle = FindFirstFile(dirp->path, &dirp->find_data);
1082 1082 if (dirp->handle == INVALID_HANDLE_VALUE) {
1083 1083 if (GetLastError() != ERROR_FILE_NOT_FOUND) {
1084 1084 free(dirp->path, mtInternal);
1085 1085 free(dirp, mtInternal);
1086 1086 errno = EACCES;
1087 1087 return 0;
1088 1088 }
1089 1089 }
1090 1090 return dirp;
1091 1091 }
1092 1092
1093 1093 /* parameter dbuf unused on Windows */
1094 1094
1095 1095 struct dirent *
1096 1096 os::readdir(DIR *dirp, dirent *dbuf)
1097 1097 {
1098 1098 assert(dirp != NULL, "just checking"); // hotspot change
1099 1099 if (dirp->handle == INVALID_HANDLE_VALUE) {
1100 1100 return 0;
1101 1101 }
1102 1102
1103 1103 strcpy(dirp->dirent.d_name, dirp->find_data.cFileName);
1104 1104
1105 1105 if (!FindNextFile(dirp->handle, &dirp->find_data)) {
1106 1106 if (GetLastError() == ERROR_INVALID_HANDLE) {
1107 1107 errno = EBADF;
1108 1108 return 0;
1109 1109 }
1110 1110 FindClose(dirp->handle);
1111 1111 dirp->handle = INVALID_HANDLE_VALUE;
1112 1112 }
1113 1113
1114 1114 return &dirp->dirent;
1115 1115 }
1116 1116
1117 1117 int
1118 1118 os::closedir(DIR *dirp)
1119 1119 {
1120 1120 assert(dirp != NULL, "just checking"); // hotspot change
1121 1121 if (dirp->handle != INVALID_HANDLE_VALUE) {
1122 1122 if (!FindClose(dirp->handle)) {
1123 1123 errno = EBADF;
1124 1124 return -1;
1125 1125 }
1126 1126 dirp->handle = INVALID_HANDLE_VALUE;
1127 1127 }
1128 1128 free(dirp->path, mtInternal);
1129 1129 free(dirp, mtInternal);
1130 1130 return 0;
1131 1131 }
1132 1132
1133 1133 // This must be hard coded because it's the system's temporary
1134 1134 // directory not the java application's temp directory, ala java.io.tmpdir.
1135 1135 const char* os::get_temp_directory() {
1136 1136 static char path_buf[MAX_PATH];
1137 1137 if (GetTempPath(MAX_PATH, path_buf)>0)
1138 1138 return path_buf;
1139 1139 else{
1140 1140 path_buf[0]='\0';
1141 1141 return path_buf;
1142 1142 }
1143 1143 }
1144 1144
1145 1145 static bool file_exists(const char* filename) {
1146 1146 if (filename == NULL || strlen(filename) == 0) {
1147 1147 return false;
1148 1148 }
1149 1149 return GetFileAttributes(filename) != INVALID_FILE_ATTRIBUTES;
1150 1150 }
1151 1151
1152 1152 void os::dll_build_name(char *buffer, size_t buflen,
1153 1153 const char* pname, const char* fname) {
1154 1154 const size_t pnamelen = pname ? strlen(pname) : 0;
1155 1155 const char c = (pnamelen > 0) ? pname[pnamelen-1] : 0;
1156 1156
1157 1157 // Quietly truncates on buffer overflow. Should be an error.
1158 1158 if (pnamelen + strlen(fname) + 10 > buflen) {
1159 1159 *buffer = '\0';
1160 1160 return;
1161 1161 }
1162 1162
1163 1163 if (pnamelen == 0) {
1164 1164 jio_snprintf(buffer, buflen, "%s.dll", fname);
1165 1165 } else if (c == ':' || c == '\\') {
1166 1166 jio_snprintf(buffer, buflen, "%s%s.dll", pname, fname);
1167 1167 } else if (strchr(pname, *os::path_separator()) != NULL) {
1168 1168 int n;
1169 1169 char** pelements = split_path(pname, &n);
1170 1170 for (int i = 0 ; i < n ; i++) {
1171 1171 char* path = pelements[i];
1172 1172 // Really shouldn't be NULL, but check can't hurt
1173 1173 size_t plen = (path == NULL) ? 0 : strlen(path);
1174 1174 if (plen == 0) {
1175 1175 continue; // skip the empty path values
1176 1176 }
1177 1177 const char lastchar = path[plen - 1];
1178 1178 if (lastchar == ':' || lastchar == '\\') {
1179 1179 jio_snprintf(buffer, buflen, "%s%s.dll", path, fname);
1180 1180 } else {
1181 1181 jio_snprintf(buffer, buflen, "%s\\%s.dll", path, fname);
1182 1182 }
1183 1183 if (file_exists(buffer)) {
1184 1184 break;
1185 1185 }
1186 1186 }
1187 1187 // release the storage
1188 1188 for (int i = 0 ; i < n ; i++) {
1189 1189 if (pelements[i] != NULL) {
1190 1190 FREE_C_HEAP_ARRAY(char, pelements[i], mtInternal);
1191 1191 }
1192 1192 }
1193 1193 if (pelements != NULL) {
1194 1194 FREE_C_HEAP_ARRAY(char*, pelements, mtInternal);
1195 1195 }
1196 1196 } else {
1197 1197 jio_snprintf(buffer, buflen, "%s\\%s.dll", pname, fname);
1198 1198 }
1199 1199 }
1200 1200
1201 1201 // Needs to be in os specific directory because windows requires another
1202 1202 // header file <direct.h>
1203 1203 const char* os::get_current_directory(char *buf, int buflen) {
1204 1204 return _getcwd(buf, buflen);
1205 1205 }
1206 1206
1207 1207 //-----------------------------------------------------------
1208 1208 // Helper functions for fatal error handler
1209 1209 #ifdef _WIN64
1210 1210 // Helper routine which returns true if address in
1211 1211 // within the NTDLL address space.
1212 1212 //
1213 1213 static bool _addr_in_ntdll( address addr )
1214 1214 {
1215 1215 HMODULE hmod;
1216 1216 MODULEINFO minfo;
1217 1217
1218 1218 hmod = GetModuleHandle("NTDLL.DLL");
1219 1219 if ( hmod == NULL ) return false;
1220 1220 if ( !os::PSApiDll::GetModuleInformation( GetCurrentProcess(), hmod,
1221 1221 &minfo, sizeof(MODULEINFO)) )
1222 1222 return false;
1223 1223
1224 1224 if ( (addr >= minfo.lpBaseOfDll) &&
1225 1225 (addr < (address)((uintptr_t)minfo.lpBaseOfDll + (uintptr_t)minfo.SizeOfImage)))
1226 1226 return true;
1227 1227 else
1228 1228 return false;
1229 1229 }
1230 1230 #endif
1231 1231
1232 1232
1233 1233 // Enumerate all modules for a given process ID
1234 1234 //
1235 1235 // Notice that Windows 95/98/Me and Windows NT/2000/XP have
1236 1236 // different API for doing this. We use PSAPI.DLL on NT based
1237 1237 // Windows and ToolHelp on 95/98/Me.
1238 1238
1239 1239 // Callback function that is called by enumerate_modules() on
1240 1240 // every DLL module.
1241 1241 // Input parameters:
1242 1242 // int pid,
1243 1243 // char* module_file_name,
1244 1244 // address module_base_addr,
1245 1245 // unsigned module_size,
1246 1246 // void* param
1247 1247 typedef int (*EnumModulesCallbackFunc)(int, char *, address, unsigned, void *);
1248 1248
1249 1249 // enumerate_modules for Windows NT, using PSAPI
1250 1250 static int _enumerate_modules_winnt( int pid, EnumModulesCallbackFunc func, void * param)
1251 1251 {
1252 1252 HANDLE hProcess ;
1253 1253
1254 1254 # define MAX_NUM_MODULES 128
1255 1255 HMODULE modules[MAX_NUM_MODULES];
1256 1256 static char filename[ MAX_PATH ];
1257 1257 int result = 0;
1258 1258
1259 1259 if (!os::PSApiDll::PSApiAvailable()) {
1260 1260 return 0;
1261 1261 }
1262 1262
1263 1263 hProcess = OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ,
1264 1264 FALSE, pid ) ;
1265 1265 if (hProcess == NULL) return 0;
1266 1266
1267 1267 DWORD size_needed;
1268 1268 if (!os::PSApiDll::EnumProcessModules(hProcess, modules,
1269 1269 sizeof(modules), &size_needed)) {
1270 1270 CloseHandle( hProcess );
1271 1271 return 0;
1272 1272 }
1273 1273
1274 1274 // number of modules that are currently loaded
1275 1275 int num_modules = size_needed / sizeof(HMODULE);
1276 1276
1277 1277 for (int i = 0; i < MIN2(num_modules, MAX_NUM_MODULES); i++) {
1278 1278 // Get Full pathname:
1279 1279 if(!os::PSApiDll::GetModuleFileNameEx(hProcess, modules[i],
1280 1280 filename, sizeof(filename))) {
1281 1281 filename[0] = '\0';
1282 1282 }
1283 1283
1284 1284 MODULEINFO modinfo;
1285 1285 if (!os::PSApiDll::GetModuleInformation(hProcess, modules[i],
1286 1286 &modinfo, sizeof(modinfo))) {
1287 1287 modinfo.lpBaseOfDll = NULL;
1288 1288 modinfo.SizeOfImage = 0;
1289 1289 }
1290 1290
1291 1291 // Invoke callback function
1292 1292 result = func(pid, filename, (address)modinfo.lpBaseOfDll,
1293 1293 modinfo.SizeOfImage, param);
1294 1294 if (result) break;
1295 1295 }
1296 1296
1297 1297 CloseHandle( hProcess ) ;
1298 1298 return result;
1299 1299 }
1300 1300
1301 1301
1302 1302 // enumerate_modules for Windows 95/98/ME, using TOOLHELP
1303 1303 static int _enumerate_modules_windows( int pid, EnumModulesCallbackFunc func, void *param)
1304 1304 {
1305 1305 HANDLE hSnapShot ;
1306 1306 static MODULEENTRY32 modentry ;
1307 1307 int result = 0;
1308 1308
1309 1309 if (!os::Kernel32Dll::HelpToolsAvailable()) {
1310 1310 return 0;
1311 1311 }
1312 1312
1313 1313 // Get a handle to a Toolhelp snapshot of the system
1314 1314 hSnapShot = os::Kernel32Dll::CreateToolhelp32Snapshot(TH32CS_SNAPMODULE, pid ) ;
1315 1315 if( hSnapShot == INVALID_HANDLE_VALUE ) {
1316 1316 return FALSE ;
1317 1317 }
1318 1318
1319 1319 // iterate through all modules
1320 1320 modentry.dwSize = sizeof(MODULEENTRY32) ;
1321 1321 bool not_done = os::Kernel32Dll::Module32First( hSnapShot, &modentry ) != 0;
1322 1322
1323 1323 while( not_done ) {
1324 1324 // invoke the callback
1325 1325 result=func(pid, modentry.szExePath, (address)modentry.modBaseAddr,
1326 1326 modentry.modBaseSize, param);
1327 1327 if (result) break;
1328 1328
1329 1329 modentry.dwSize = sizeof(MODULEENTRY32) ;
1330 1330 not_done = os::Kernel32Dll::Module32Next( hSnapShot, &modentry ) != 0;
1331 1331 }
1332 1332
1333 1333 CloseHandle(hSnapShot);
1334 1334 return result;
1335 1335 }
1336 1336
1337 1337 int enumerate_modules( int pid, EnumModulesCallbackFunc func, void * param )
1338 1338 {
1339 1339 // Get current process ID if caller doesn't provide it.
1340 1340 if (!pid) pid = os::current_process_id();
1341 1341
1342 1342 if (os::win32::is_nt()) return _enumerate_modules_winnt (pid, func, param);
1343 1343 else return _enumerate_modules_windows(pid, func, param);
1344 1344 }
1345 1345
1346 1346 struct _modinfo {
1347 1347 address addr;
1348 1348 char* full_path; // point to a char buffer
1349 1349 int buflen; // size of the buffer
1350 1350 address base_addr;
1351 1351 };
1352 1352
1353 1353 static int _locate_module_by_addr(int pid, char * mod_fname, address base_addr,
1354 1354 unsigned size, void * param) {
1355 1355 struct _modinfo *pmod = (struct _modinfo *)param;
1356 1356 if (!pmod) return -1;
1357 1357
1358 1358 if (base_addr <= pmod->addr &&
1359 1359 base_addr+size > pmod->addr) {
1360 1360 // if a buffer is provided, copy path name to the buffer
1361 1361 if (pmod->full_path) {
1362 1362 jio_snprintf(pmod->full_path, pmod->buflen, "%s", mod_fname);
1363 1363 }
1364 1364 pmod->base_addr = base_addr;
1365 1365 return 1;
1366 1366 }
1367 1367 return 0;
1368 1368 }
1369 1369
1370 1370 bool os::dll_address_to_library_name(address addr, char* buf,
1371 1371 int buflen, int* offset) {
1372 1372 // NOTE: the reason we don't use SymGetModuleInfo() is it doesn't always
1373 1373 // return the full path to the DLL file, sometimes it returns path
1374 1374 // to the corresponding PDB file (debug info); sometimes it only
1375 1375 // returns partial path, which makes life painful.
1376 1376
1377 1377 struct _modinfo mi;
1378 1378 mi.addr = addr;
1379 1379 mi.full_path = buf;
1380 1380 mi.buflen = buflen;
1381 1381 int pid = os::current_process_id();
1382 1382 if (enumerate_modules(pid, _locate_module_by_addr, (void *)&mi)) {
1383 1383 // buf already contains path name
1384 1384 if (offset) *offset = addr - mi.base_addr;
1385 1385 return true;
1386 1386 } else {
1387 1387 if (buf) buf[0] = '\0';
1388 1388 if (offset) *offset = -1;
1389 1389 return false;
1390 1390 }
1391 1391 }
1392 1392
1393 1393 bool os::dll_address_to_function_name(address addr, char *buf,
1394 1394 int buflen, int *offset) {
1395 1395 if (Decoder::decode(addr, buf, buflen, offset)) {
1396 1396 return true;
1397 1397 }
1398 1398 if (offset != NULL) *offset = -1;
1399 1399 if (buf != NULL) buf[0] = '\0';
1400 1400 return false;
1401 1401 }
1402 1402
1403 1403 // save the start and end address of jvm.dll into param[0] and param[1]
1404 1404 static int _locate_jvm_dll(int pid, char* mod_fname, address base_addr,
1405 1405 unsigned size, void * param) {
1406 1406 if (!param) return -1;
1407 1407
1408 1408 if (base_addr <= (address)_locate_jvm_dll &&
1409 1409 base_addr+size > (address)_locate_jvm_dll) {
1410 1410 ((address*)param)[0] = base_addr;
1411 1411 ((address*)param)[1] = base_addr + size;
1412 1412 return 1;
1413 1413 }
1414 1414 return 0;
1415 1415 }
1416 1416
1417 1417 address vm_lib_location[2]; // start and end address of jvm.dll
1418 1418
1419 1419 // check if addr is inside jvm.dll
1420 1420 bool os::address_is_in_vm(address addr) {
1421 1421 if (!vm_lib_location[0] || !vm_lib_location[1]) {
1422 1422 int pid = os::current_process_id();
1423 1423 if (!enumerate_modules(pid, _locate_jvm_dll, (void *)vm_lib_location)) {
1424 1424 assert(false, "Can't find jvm module.");
1425 1425 return false;
1426 1426 }
1427 1427 }
1428 1428
1429 1429 return (vm_lib_location[0] <= addr) && (addr < vm_lib_location[1]);
1430 1430 }
1431 1431
1432 1432 // print module info; param is outputStream*
1433 1433 static int _print_module(int pid, char* fname, address base,
1434 1434 unsigned size, void* param) {
1435 1435 if (!param) return -1;
1436 1436
1437 1437 outputStream* st = (outputStream*)param;
1438 1438
1439 1439 address end_addr = base + size;
1440 1440 st->print(PTR_FORMAT " - " PTR_FORMAT " \t%s\n", base, end_addr, fname);
1441 1441 return 0;
1442 1442 }
1443 1443
1444 1444 // Loads .dll/.so and
1445 1445 // in case of error it checks if .dll/.so was built for the
1446 1446 // same architecture as Hotspot is running on
1447 1447 void * os::dll_load(const char *name, char *ebuf, int ebuflen)
1448 1448 {
1449 1449 void * result = LoadLibrary(name);
1450 1450 if (result != NULL)
1451 1451 {
1452 1452 return result;
1453 1453 }
1454 1454
1455 1455 DWORD errcode = GetLastError();
1456 1456 if (errcode == ERROR_MOD_NOT_FOUND) {
1457 1457 strncpy(ebuf, "Can't find dependent libraries", ebuflen-1);
1458 1458 ebuf[ebuflen-1]='\0';
1459 1459 return NULL;
1460 1460 }
1461 1461
1462 1462 // Parsing dll below
1463 1463 // If we can read dll-info and find that dll was built
1464 1464 // for an architecture other than Hotspot is running in
1465 1465 // - then print to buffer "DLL was built for a different architecture"
1466 1466 // else call os::lasterror to obtain system error message
1467 1467
1468 1468 // Read system error message into ebuf
1469 1469 // It may or may not be overwritten below (in the for loop and just above)
1470 1470 lasterror(ebuf, (size_t) ebuflen);
1471 1471 ebuf[ebuflen-1]='\0';
1472 1472 int file_descriptor=::open(name, O_RDONLY | O_BINARY, 0);
1473 1473 if (file_descriptor<0)
1474 1474 {
1475 1475 return NULL;
1476 1476 }
1477 1477
1478 1478 uint32_t signature_offset;
1479 1479 uint16_t lib_arch=0;
1480 1480 bool failed_to_get_lib_arch=
1481 1481 (
1482 1482 //Go to position 3c in the dll
1483 1483 (os::seek_to_file_offset(file_descriptor,IMAGE_FILE_PTR_TO_SIGNATURE)<0)
1484 1484 ||
1485 1485 // Read loacation of signature
1486 1486 (sizeof(signature_offset)!=
1487 1487 (os::read(file_descriptor, (void*)&signature_offset,sizeof(signature_offset))))
1488 1488 ||
1489 1489 //Go to COFF File Header in dll
1490 1490 //that is located after"signature" (4 bytes long)
1491 1491 (os::seek_to_file_offset(file_descriptor,
1492 1492 signature_offset+IMAGE_FILE_SIGNATURE_LENGTH)<0)
1493 1493 ||
1494 1494 //Read field that contains code of architecture
1495 1495 // that dll was build for
1496 1496 (sizeof(lib_arch)!=
1497 1497 (os::read(file_descriptor, (void*)&lib_arch,sizeof(lib_arch))))
1498 1498 );
1499 1499
1500 1500 ::close(file_descriptor);
1501 1501 if (failed_to_get_lib_arch)
1502 1502 {
1503 1503 // file i/o error - report os::lasterror(...) msg
1504 1504 return NULL;
1505 1505 }
1506 1506
1507 1507 typedef struct
1508 1508 {
1509 1509 uint16_t arch_code;
1510 1510 char* arch_name;
1511 1511 } arch_t;
1512 1512
1513 1513 static const arch_t arch_array[]={
1514 1514 {IMAGE_FILE_MACHINE_I386, (char*)"IA 32"},
1515 1515 {IMAGE_FILE_MACHINE_AMD64, (char*)"AMD 64"},
1516 1516 {IMAGE_FILE_MACHINE_IA64, (char*)"IA 64"}
1517 1517 };
1518 1518 #if (defined _M_IA64)
1519 1519 static const uint16_t running_arch=IMAGE_FILE_MACHINE_IA64;
1520 1520 #elif (defined _M_AMD64)
1521 1521 static const uint16_t running_arch=IMAGE_FILE_MACHINE_AMD64;
1522 1522 #elif (defined _M_IX86)
1523 1523 static const uint16_t running_arch=IMAGE_FILE_MACHINE_I386;
1524 1524 #else
1525 1525 #error Method os::dll_load requires that one of following \
1526 1526 is defined :_M_IA64,_M_AMD64 or _M_IX86
1527 1527 #endif
1528 1528
1529 1529
1530 1530 // Obtain a string for printf operation
1531 1531 // lib_arch_str shall contain string what platform this .dll was built for
1532 1532 // running_arch_str shall string contain what platform Hotspot was built for
1533 1533 char *running_arch_str=NULL,*lib_arch_str=NULL;
1534 1534 for (unsigned int i=0;i<ARRAY_SIZE(arch_array);i++)
1535 1535 {
1536 1536 if (lib_arch==arch_array[i].arch_code)
1537 1537 lib_arch_str=arch_array[i].arch_name;
1538 1538 if (running_arch==arch_array[i].arch_code)
1539 1539 running_arch_str=arch_array[i].arch_name;
1540 1540 }
1541 1541
1542 1542 assert(running_arch_str,
1543 1543 "Didn't find runing architecture code in arch_array");
1544 1544
1545 1545 // If the architure is right
1546 1546 // but some other error took place - report os::lasterror(...) msg
1547 1547 if (lib_arch == running_arch)
1548 1548 {
1549 1549 return NULL;
1550 1550 }
1551 1551
1552 1552 if (lib_arch_str!=NULL)
1553 1553 {
1554 1554 ::_snprintf(ebuf, ebuflen-1,
1555 1555 "Can't load %s-bit .dll on a %s-bit platform",
1556 1556 lib_arch_str,running_arch_str);
1557 1557 }
1558 1558 else
1559 1559 {
1560 1560 // don't know what architecture this dll was build for
1561 1561 ::_snprintf(ebuf, ebuflen-1,
1562 1562 "Can't load this .dll (machine code=0x%x) on a %s-bit platform",
1563 1563 lib_arch,running_arch_str);
1564 1564 }
1565 1565
1566 1566 return NULL;
1567 1567 }
1568 1568
1569 1569
1570 1570 void os::print_dll_info(outputStream *st) {
1571 1571 int pid = os::current_process_id();
1572 1572 st->print_cr("Dynamic libraries:");
1573 1573 enumerate_modules(pid, _print_module, (void *)st);
1574 1574 }
1575 1575
1576 1576 void os::print_os_info_brief(outputStream* st) {
1577 1577 os::print_os_info(st);
1578 1578 }
1579 1579
1580 1580 void os::print_os_info(outputStream* st) {
1581 1581 st->print("OS:");
1582 1582
1583 1583 os::win32::print_windows_version(st);
1584 1584 }
1585 1585
1586 1586 void os::win32::print_windows_version(outputStream* st) {
1587 1587 OSVERSIONINFOEX osvi;
1588 1588 ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX));
1589 1589 osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
1590 1590
1591 1591 if (!GetVersionEx((OSVERSIONINFO *)&osvi)) {
1592 1592 st->print_cr("N/A");
1593 1593 return;
1594 1594 }
1595 1595
1596 1596 int os_vers = osvi.dwMajorVersion * 1000 + osvi.dwMinorVersion;
1597 1597 if (osvi.dwPlatformId == VER_PLATFORM_WIN32_NT) {
1598 1598 switch (os_vers) {
1599 1599 case 3051: st->print(" Windows NT 3.51"); break;
1600 1600 case 4000: st->print(" Windows NT 4.0"); break;
1601 1601 case 5000: st->print(" Windows 2000"); break;
1602 1602 case 5001: st->print(" Windows XP"); break;
1603 1603 case 5002:
1604 1604 case 6000:
1605 1605 case 6001:
1606 1606 case 6002: {
1607 1607 // Retrieve SYSTEM_INFO from GetNativeSystemInfo call so that we could
1608 1608 // find out whether we are running on 64 bit processor or not.
1609 1609 SYSTEM_INFO si;
1610 1610 ZeroMemory(&si, sizeof(SYSTEM_INFO));
1611 1611 if (!os::Kernel32Dll::GetNativeSystemInfoAvailable()){
1612 1612 GetSystemInfo(&si);
1613 1613 } else {
1614 1614 os::Kernel32Dll::GetNativeSystemInfo(&si);
1615 1615 }
1616 1616 if (os_vers == 5002) {
1617 1617 if (osvi.wProductType == VER_NT_WORKSTATION &&
1618 1618 si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64)
1619 1619 st->print(" Windows XP x64 Edition");
1620 1620 else
1621 1621 st->print(" Windows Server 2003 family");
1622 1622 } else if (os_vers == 6000) {
1623 1623 if (osvi.wProductType == VER_NT_WORKSTATION)
1624 1624 st->print(" Windows Vista");
1625 1625 else
1626 1626 st->print(" Windows Server 2008");
1627 1627 if (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64)
1628 1628 st->print(" , 64 bit");
1629 1629 } else if (os_vers == 6001) {
1630 1630 if (osvi.wProductType == VER_NT_WORKSTATION) {
1631 1631 st->print(" Windows 7");
1632 1632 } else {
1633 1633 // Unrecognized windows, print out its major and minor versions
1634 1634 st->print(" Windows NT %d.%d", osvi.dwMajorVersion, osvi.dwMinorVersion);
1635 1635 }
1636 1636 if (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64)
1637 1637 st->print(" , 64 bit");
1638 1638 } else if (os_vers == 6002) {
1639 1639 if (osvi.wProductType == VER_NT_WORKSTATION) {
1640 1640 st->print(" Windows 8");
1641 1641 } else {
1642 1642 st->print(" Windows Server 2012");
1643 1643 }
1644 1644 if (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64)
1645 1645 st->print(" , 64 bit");
1646 1646 } else { // future os
1647 1647 // Unrecognized windows, print out its major and minor versions
1648 1648 st->print(" Windows NT %d.%d", osvi.dwMajorVersion, osvi.dwMinorVersion);
1649 1649 if (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64)
1650 1650 st->print(" , 64 bit");
1651 1651 }
1652 1652 break;
1653 1653 }
1654 1654 default: // future windows, print out its major and minor versions
1655 1655 st->print(" Windows NT %d.%d", osvi.dwMajorVersion, osvi.dwMinorVersion);
1656 1656 }
1657 1657 } else {
1658 1658 switch (os_vers) {
1659 1659 case 4000: st->print(" Windows 95"); break;
1660 1660 case 4010: st->print(" Windows 98"); break;
1661 1661 case 4090: st->print(" Windows Me"); break;
1662 1662 default: // future windows, print out its major and minor versions
1663 1663 st->print(" Windows %d.%d", osvi.dwMajorVersion, osvi.dwMinorVersion);
1664 1664 }
1665 1665 }
1666 1666 st->print(" Build %d", osvi.dwBuildNumber);
1667 1667 st->print(" %s", osvi.szCSDVersion); // service pack
1668 1668 st->cr();
1669 1669 }
1670 1670
1671 1671 void os::pd_print_cpu_info(outputStream* st) {
1672 1672 // Nothing to do for now.
1673 1673 }
1674 1674
1675 1675 void os::print_memory_info(outputStream* st) {
1676 1676 st->print("Memory:");
1677 1677 st->print(" %dk page", os::vm_page_size()>>10);
1678 1678
1679 1679 // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect
1680 1680 // value if total memory is larger than 4GB
1681 1681 MEMORYSTATUSEX ms;
1682 1682 ms.dwLength = sizeof(ms);
1683 1683 GlobalMemoryStatusEx(&ms);
1684 1684
1685 1685 st->print(", physical %uk", os::physical_memory() >> 10);
1686 1686 st->print("(%uk free)", os::available_memory() >> 10);
1687 1687
1688 1688 st->print(", swap %uk", ms.ullTotalPageFile >> 10);
1689 1689 st->print("(%uk free)", ms.ullAvailPageFile >> 10);
1690 1690 st->cr();
1691 1691 }
1692 1692
1693 1693 void os::print_siginfo(outputStream *st, void *siginfo) {
1694 1694 EXCEPTION_RECORD* er = (EXCEPTION_RECORD*)siginfo;
1695 1695 st->print("siginfo:");
1696 1696 st->print(" ExceptionCode=0x%x", er->ExceptionCode);
1697 1697
1698 1698 if (er->ExceptionCode == EXCEPTION_ACCESS_VIOLATION &&
1699 1699 er->NumberParameters >= 2) {
1700 1700 switch (er->ExceptionInformation[0]) {
1701 1701 case 0: st->print(", reading address"); break;
1702 1702 case 1: st->print(", writing address"); break;
1703 1703 default: st->print(", ExceptionInformation=" INTPTR_FORMAT,
1704 1704 er->ExceptionInformation[0]);
1705 1705 }
1706 1706 st->print(" " INTPTR_FORMAT, er->ExceptionInformation[1]);
1707 1707 } else if (er->ExceptionCode == EXCEPTION_IN_PAGE_ERROR &&
1708 1708 er->NumberParameters >= 2 && UseSharedSpaces) {
1709 1709 FileMapInfo* mapinfo = FileMapInfo::current_info();
1710 1710 if (mapinfo->is_in_shared_space((void*)er->ExceptionInformation[1])) {
1711 1711 st->print("\n\nError accessing class data sharing archive." \
1712 1712 " Mapped file inaccessible during execution, " \
1713 1713 " possible disk/network problem.");
1714 1714 }
1715 1715 } else {
1716 1716 int num = er->NumberParameters;
1717 1717 if (num > 0) {
1718 1718 st->print(", ExceptionInformation=");
1719 1719 for (int i = 0; i < num; i++) {
1720 1720 st->print(INTPTR_FORMAT " ", er->ExceptionInformation[i]);
1721 1721 }
1722 1722 }
1723 1723 }
1724 1724 st->cr();
1725 1725 }
1726 1726
1727 1727 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
1728 1728 // do nothing
1729 1729 }
1730 1730
1731 1731 static char saved_jvm_path[MAX_PATH] = {0};
1732 1732
1733 1733 // Find the full path to the current module, jvm.dll or jvm_g.dll
1734 1734 void os::jvm_path(char *buf, jint buflen) {
1735 1735 // Error checking.
1736 1736 if (buflen < MAX_PATH) {
1737 1737 assert(false, "must use a large-enough buffer");
1738 1738 buf[0] = '\0';
1739 1739 return;
1740 1740 }
1741 1741 // Lazy resolve the path to current module.
1742 1742 if (saved_jvm_path[0] != 0) {
1743 1743 strcpy(buf, saved_jvm_path);
1744 1744 return;
1745 1745 }
1746 1746
1747 1747 buf[0] = '\0';
1748 1748 if (Arguments::created_by_gamma_launcher()) {
1749 1749 // Support for the gamma launcher. Check for an
1750 1750 // JAVA_HOME environment variable
1751 1751 // and fix up the path so it looks like
1752 1752 // libjvm.so is installed there (append a fake suffix
1753 1753 // hotspot/libjvm.so).
1754 1754 char* java_home_var = ::getenv("JAVA_HOME");
1755 1755 if (java_home_var != NULL && java_home_var[0] != 0) {
1756 1756
1757 1757 strncpy(buf, java_home_var, buflen);
1758 1758
1759 1759 // determine if this is a legacy image or modules image
1760 1760 // modules image doesn't have "jre" subdirectory
1761 1761 size_t len = strlen(buf);
1762 1762 char* jrebin_p = buf + len;
1763 1763 jio_snprintf(jrebin_p, buflen-len, "\\jre\\bin\\");
1764 1764 if (0 != _access(buf, 0)) {
1765 1765 jio_snprintf(jrebin_p, buflen-len, "\\bin\\");
1766 1766 }
1767 1767 len = strlen(buf);
1768 1768 jio_snprintf(buf + len, buflen-len, "hotspot\\jvm.dll");
1769 1769 }
1770 1770 }
1771 1771
1772 1772 if(buf[0] == '\0') {
1773 1773 GetModuleFileName(vm_lib_handle, buf, buflen);
1774 1774 }
1775 1775 strcpy(saved_jvm_path, buf);
1776 1776 }
1777 1777
1778 1778
1779 1779 void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
1780 1780 #ifndef _WIN64
1781 1781 st->print("_");
1782 1782 #endif
1783 1783 }
1784 1784
1785 1785
1786 1786 void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
1787 1787 #ifndef _WIN64
1788 1788 st->print("@%d", args_size * sizeof(int));
1789 1789 #endif
1790 1790 }
1791 1791
1792 1792 // This method is a copy of JDK's sysGetLastErrorString
1793 1793 // from src/windows/hpi/src/system_md.c
1794 1794
1795 1795 size_t os::lasterror(char* buf, size_t len) {
1796 1796 DWORD errval;
1797 1797
1798 1798 if ((errval = GetLastError()) != 0) {
1799 1799 // DOS error
1800 1800 size_t n = (size_t)FormatMessage(
1801 1801 FORMAT_MESSAGE_FROM_SYSTEM|FORMAT_MESSAGE_IGNORE_INSERTS,
1802 1802 NULL,
1803 1803 errval,
1804 1804 0,
1805 1805 buf,
1806 1806 (DWORD)len,
1807 1807 NULL);
1808 1808 if (n > 3) {
1809 1809 // Drop final '.', CR, LF
1810 1810 if (buf[n - 1] == '\n') n--;
1811 1811 if (buf[n - 1] == '\r') n--;
1812 1812 if (buf[n - 1] == '.') n--;
1813 1813 buf[n] = '\0';
1814 1814 }
1815 1815 return n;
1816 1816 }
1817 1817
1818 1818 if (errno != 0) {
1819 1819 // C runtime error that has no corresponding DOS error code
1820 1820 const char* s = strerror(errno);
1821 1821 size_t n = strlen(s);
1822 1822 if (n >= len) n = len - 1;
1823 1823 strncpy(buf, s, n);
1824 1824 buf[n] = '\0';
1825 1825 return n;
1826 1826 }
1827 1827
1828 1828 return 0;
1829 1829 }
1830 1830
1831 1831 int os::get_last_error() {
1832 1832 DWORD error = GetLastError();
1833 1833 if (error == 0)
1834 1834 error = errno;
1835 1835 return (int)error;
1836 1836 }
1837 1837
1838 1838 // sun.misc.Signal
1839 1839 // NOTE that this is a workaround for an apparent kernel bug where if
1840 1840 // a signal handler for SIGBREAK is installed then that signal handler
1841 1841 // takes priority over the console control handler for CTRL_CLOSE_EVENT.
1842 1842 // See bug 4416763.
1843 1843 static void (*sigbreakHandler)(int) = NULL;
1844 1844
1845 1845 static void UserHandler(int sig, void *siginfo, void *context) {
1846 1846 os::signal_notify(sig);
1847 1847 // We need to reinstate the signal handler each time...
1848 1848 os::signal(sig, (void*)UserHandler);
1849 1849 }
1850 1850
1851 1851 void* os::user_handler() {
1852 1852 return (void*) UserHandler;
1853 1853 }
1854 1854
1855 1855 void* os::signal(int signal_number, void* handler) {
1856 1856 if ((signal_number == SIGBREAK) && (!ReduceSignalUsage)) {
1857 1857 void (*oldHandler)(int) = sigbreakHandler;
1858 1858 sigbreakHandler = (void (*)(int)) handler;
1859 1859 return (void*) oldHandler;
1860 1860 } else {
1861 1861 return (void*)::signal(signal_number, (void (*)(int))handler);
1862 1862 }
1863 1863 }
1864 1864
1865 1865 void os::signal_raise(int signal_number) {
1866 1866 raise(signal_number);
1867 1867 }
1868 1868
1869 1869 // The Win32 C runtime library maps all console control events other than ^C
1870 1870 // into SIGBREAK, which makes it impossible to distinguish ^BREAK from close,
1871 1871 // logoff, and shutdown events. We therefore install our own console handler
1872 1872 // that raises SIGTERM for the latter cases.
1873 1873 //
1874 1874 static BOOL WINAPI consoleHandler(DWORD event) {
1875 1875 switch(event) {
1876 1876 case CTRL_C_EVENT:
1877 1877 if (is_error_reported()) {
1878 1878 // Ctrl-C is pressed during error reporting, likely because the error
1879 1879 // handler fails to abort. Let VM die immediately.
1880 1880 os::die();
1881 1881 }
1882 1882
1883 1883 os::signal_raise(SIGINT);
1884 1884 return TRUE;
1885 1885 break;
1886 1886 case CTRL_BREAK_EVENT:
1887 1887 if (sigbreakHandler != NULL) {
1888 1888 (*sigbreakHandler)(SIGBREAK);
1889 1889 }
1890 1890 return TRUE;
1891 1891 break;
1892 1892 case CTRL_LOGOFF_EVENT: {
1893 1893 // Don't terminate JVM if it is running in a non-interactive session,
1894 1894 // such as a service process.
1895 1895 USEROBJECTFLAGS flags;
1896 1896 HANDLE handle = GetProcessWindowStation();
1897 1897 if (handle != NULL &&
1898 1898 GetUserObjectInformation(handle, UOI_FLAGS, &flags,
1899 1899 sizeof( USEROBJECTFLAGS), NULL)) {
1900 1900 // If it is a non-interactive session, let next handler to deal
1901 1901 // with it.
1902 1902 if ((flags.dwFlags & WSF_VISIBLE) == 0) {
1903 1903 return FALSE;
1904 1904 }
1905 1905 }
1906 1906 }
1907 1907 case CTRL_CLOSE_EVENT:
1908 1908 case CTRL_SHUTDOWN_EVENT:
1909 1909 os::signal_raise(SIGTERM);
1910 1910 return TRUE;
1911 1911 break;
1912 1912 default:
1913 1913 break;
1914 1914 }
1915 1915 return FALSE;
1916 1916 }
1917 1917
1918 1918 /*
1919 1919 * The following code is moved from os.cpp for making this
1920 1920 * code platform specific, which it is by its very nature.
1921 1921 */
1922 1922
1923 1923 // Return maximum OS signal used + 1 for internal use only
1924 1924 // Used as exit signal for signal_thread
1925 1925 int os::sigexitnum_pd(){
1926 1926 return NSIG;
1927 1927 }
1928 1928
1929 1929 // a counter for each possible signal value, including signal_thread exit signal
1930 1930 static volatile jint pending_signals[NSIG+1] = { 0 };
1931 1931 static HANDLE sig_sem = NULL;
1932 1932
1933 1933 void os::signal_init_pd() {
1934 1934 // Initialize signal structures
1935 1935 memset((void*)pending_signals, 0, sizeof(pending_signals));
1936 1936
1937 1937 sig_sem = ::CreateSemaphore(NULL, 0, NSIG+1, NULL);
1938 1938
1939 1939 // Programs embedding the VM do not want it to attempt to receive
1940 1940 // events like CTRL_LOGOFF_EVENT, which are used to implement the
1941 1941 // shutdown hooks mechanism introduced in 1.3. For example, when
1942 1942 // the VM is run as part of a Windows NT service (i.e., a servlet
1943 1943 // engine in a web server), the correct behavior is for any console
1944 1944 // control handler to return FALSE, not TRUE, because the OS's
1945 1945 // "final" handler for such events allows the process to continue if
1946 1946 // it is a service (while terminating it if it is not a service).
1947 1947 // To make this behavior uniform and the mechanism simpler, we
1948 1948 // completely disable the VM's usage of these console events if -Xrs
1949 1949 // (=ReduceSignalUsage) is specified. This means, for example, that
1950 1950 // the CTRL-BREAK thread dump mechanism is also disabled in this
1951 1951 // case. See bugs 4323062, 4345157, and related bugs.
1952 1952
1953 1953 if (!ReduceSignalUsage) {
1954 1954 // Add a CTRL-C handler
1955 1955 SetConsoleCtrlHandler(consoleHandler, TRUE);
1956 1956 }
1957 1957 }
1958 1958
1959 1959 void os::signal_notify(int signal_number) {
1960 1960 BOOL ret;
1961 1961 if (sig_sem != NULL) {
1962 1962 Atomic::inc(&pending_signals[signal_number]);
1963 1963 ret = ::ReleaseSemaphore(sig_sem, 1, NULL);
1964 1964 assert(ret != 0, "ReleaseSemaphore() failed");
1965 1965 }
1966 1966 }
1967 1967
1968 1968 static int check_pending_signals(bool wait_for_signal) {
1969 1969 DWORD ret;
1970 1970 while (true) {
1971 1971 for (int i = 0; i < NSIG + 1; i++) {
1972 1972 jint n = pending_signals[i];
1973 1973 if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
1974 1974 return i;
1975 1975 }
1976 1976 }
1977 1977 if (!wait_for_signal) {
1978 1978 return -1;
1979 1979 }
1980 1980
1981 1981 JavaThread *thread = JavaThread::current();
1982 1982
1983 1983 ThreadBlockInVM tbivm(thread);
1984 1984
1985 1985 bool threadIsSuspended;
1986 1986 do {
1987 1987 thread->set_suspend_equivalent();
1988 1988 // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
1989 1989 ret = ::WaitForSingleObject(sig_sem, INFINITE);
1990 1990 assert(ret == WAIT_OBJECT_0, "WaitForSingleObject() failed");
1991 1991
1992 1992 // were we externally suspended while we were waiting?
1993 1993 threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
1994 1994 if (threadIsSuspended) {
1995 1995 //
1996 1996 // The semaphore has been incremented, but while we were waiting
1997 1997 // another thread suspended us. We don't want to continue running
1998 1998 // while suspended because that would surprise the thread that
1999 1999 // suspended us.
2000 2000 //
2001 2001 ret = ::ReleaseSemaphore(sig_sem, 1, NULL);
2002 2002 assert(ret != 0, "ReleaseSemaphore() failed");
2003 2003
2004 2004 thread->java_suspend_self();
2005 2005 }
2006 2006 } while (threadIsSuspended);
2007 2007 }
2008 2008 }
2009 2009
2010 2010 int os::signal_lookup() {
2011 2011 return check_pending_signals(false);
2012 2012 }
2013 2013
2014 2014 int os::signal_wait() {
2015 2015 return check_pending_signals(true);
2016 2016 }
2017 2017
2018 2018 // Implicit OS exception handling
2019 2019
2020 2020 LONG Handle_Exception(struct _EXCEPTION_POINTERS* exceptionInfo, address handler) {
2021 2021 JavaThread* thread = JavaThread::current();
2022 2022 // Save pc in thread
2023 2023 #ifdef _M_IA64
2024 2024 thread->set_saved_exception_pc((address)exceptionInfo->ContextRecord->StIIP);
2025 2025 // Set pc to handler
2026 2026 exceptionInfo->ContextRecord->StIIP = (DWORD64)handler;
2027 2027 #elif _M_AMD64
2028 2028 thread->set_saved_exception_pc((address)exceptionInfo->ContextRecord->Rip);
2029 2029 // Set pc to handler
2030 2030 exceptionInfo->ContextRecord->Rip = (DWORD64)handler;
2031 2031 #else
2032 2032 thread->set_saved_exception_pc((address)exceptionInfo->ContextRecord->Eip);
2033 2033 // Set pc to handler
2034 2034 exceptionInfo->ContextRecord->Eip = (LONG)handler;
2035 2035 #endif
2036 2036
2037 2037 // Continue the execution
2038 2038 return EXCEPTION_CONTINUE_EXECUTION;
2039 2039 }
2040 2040
2041 2041
2042 2042 // Used for PostMortemDump
2043 2043 extern "C" void safepoints();
2044 2044 extern "C" void find(int x);
2045 2045 extern "C" void events();
2046 2046
2047 2047 // According to Windows API documentation, an illegal instruction sequence should generate
2048 2048 // the 0xC000001C exception code. However, real world experience shows that occasionnaly
2049 2049 // the execution of an illegal instruction can generate the exception code 0xC000001E. This
2050 2050 // seems to be an undocumented feature of Win NT 4.0 (and probably other Windows systems).
2051 2051
2052 2052 #define EXCEPTION_ILLEGAL_INSTRUCTION_2 0xC000001E
2053 2053
2054 2054 // From "Execution Protection in the Windows Operating System" draft 0.35
2055 2055 // Once a system header becomes available, the "real" define should be
2056 2056 // included or copied here.
2057 2057 #define EXCEPTION_INFO_EXEC_VIOLATION 0x08
2058 2058
2059 2059 #define def_excpt(val) #val, val
2060 2060
2061 2061 struct siglabel {
2062 2062 char *name;
2063 2063 int number;
2064 2064 };
2065 2065
2066 2066 // All Visual C++ exceptions thrown from code generated by the Microsoft Visual
2067 2067 // C++ compiler contain this error code. Because this is a compiler-generated
2068 2068 // error, the code is not listed in the Win32 API header files.
2069 2069 // The code is actually a cryptic mnemonic device, with the initial "E"
2070 2070 // standing for "exception" and the final 3 bytes (0x6D7363) representing the
2071 2071 // ASCII values of "msc".
2072 2072
2073 2073 #define EXCEPTION_UNCAUGHT_CXX_EXCEPTION 0xE06D7363
2074 2074
2075 2075
2076 2076 struct siglabel exceptlabels[] = {
2077 2077 def_excpt(EXCEPTION_ACCESS_VIOLATION),
2078 2078 def_excpt(EXCEPTION_DATATYPE_MISALIGNMENT),
2079 2079 def_excpt(EXCEPTION_BREAKPOINT),
2080 2080 def_excpt(EXCEPTION_SINGLE_STEP),
2081 2081 def_excpt(EXCEPTION_ARRAY_BOUNDS_EXCEEDED),
2082 2082 def_excpt(EXCEPTION_FLT_DENORMAL_OPERAND),
2083 2083 def_excpt(EXCEPTION_FLT_DIVIDE_BY_ZERO),
2084 2084 def_excpt(EXCEPTION_FLT_INEXACT_RESULT),
2085 2085 def_excpt(EXCEPTION_FLT_INVALID_OPERATION),
2086 2086 def_excpt(EXCEPTION_FLT_OVERFLOW),
2087 2087 def_excpt(EXCEPTION_FLT_STACK_CHECK),
2088 2088 def_excpt(EXCEPTION_FLT_UNDERFLOW),
2089 2089 def_excpt(EXCEPTION_INT_DIVIDE_BY_ZERO),
2090 2090 def_excpt(EXCEPTION_INT_OVERFLOW),
2091 2091 def_excpt(EXCEPTION_PRIV_INSTRUCTION),
2092 2092 def_excpt(EXCEPTION_IN_PAGE_ERROR),
2093 2093 def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION),
2094 2094 def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION_2),
2095 2095 def_excpt(EXCEPTION_NONCONTINUABLE_EXCEPTION),
2096 2096 def_excpt(EXCEPTION_STACK_OVERFLOW),
2097 2097 def_excpt(EXCEPTION_INVALID_DISPOSITION),
2098 2098 def_excpt(EXCEPTION_GUARD_PAGE),
2099 2099 def_excpt(EXCEPTION_INVALID_HANDLE),
2100 2100 def_excpt(EXCEPTION_UNCAUGHT_CXX_EXCEPTION),
2101 2101 NULL, 0
2102 2102 };
2103 2103
2104 2104 const char* os::exception_name(int exception_code, char *buf, size_t size) {
2105 2105 for (int i = 0; exceptlabels[i].name != NULL; i++) {
2106 2106 if (exceptlabels[i].number == exception_code) {
2107 2107 jio_snprintf(buf, size, "%s", exceptlabels[i].name);
2108 2108 return buf;
2109 2109 }
2110 2110 }
2111 2111
2112 2112 return NULL;
2113 2113 }
2114 2114
2115 2115 //-----------------------------------------------------------------------------
2116 2116 LONG Handle_IDiv_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
2117 2117 // handle exception caused by idiv; should only happen for -MinInt/-1
2118 2118 // (division by zero is handled explicitly)
2119 2119 #ifdef _M_IA64
2120 2120 assert(0, "Fix Handle_IDiv_Exception");
2121 2121 #elif _M_AMD64
2122 2122 PCONTEXT ctx = exceptionInfo->ContextRecord;
2123 2123 address pc = (address)ctx->Rip;
2124 2124 assert(pc[0] == 0xF7, "not an idiv opcode");
2125 2125 assert((pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands");
2126 2126 assert(ctx->Rax == min_jint, "unexpected idiv exception");
2127 2127 // set correct result values and continue after idiv instruction
2128 2128 ctx->Rip = (DWORD)pc + 2; // idiv reg, reg is 2 bytes
2129 2129 ctx->Rax = (DWORD)min_jint; // result
2130 2130 ctx->Rdx = (DWORD)0; // remainder
2131 2131 // Continue the execution
2132 2132 #else
2133 2133 PCONTEXT ctx = exceptionInfo->ContextRecord;
2134 2134 address pc = (address)ctx->Eip;
2135 2135 assert(pc[0] == 0xF7, "not an idiv opcode");
2136 2136 assert((pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands");
2137 2137 assert(ctx->Eax == min_jint, "unexpected idiv exception");
2138 2138 // set correct result values and continue after idiv instruction
2139 2139 ctx->Eip = (DWORD)pc + 2; // idiv reg, reg is 2 bytes
2140 2140 ctx->Eax = (DWORD)min_jint; // result
2141 2141 ctx->Edx = (DWORD)0; // remainder
2142 2142 // Continue the execution
2143 2143 #endif
2144 2144 return EXCEPTION_CONTINUE_EXECUTION;
2145 2145 }
2146 2146
2147 2147 #ifndef _WIN64
2148 2148 //-----------------------------------------------------------------------------
2149 2149 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
2150 2150 // handle exception caused by native method modifying control word
2151 2151 PCONTEXT ctx = exceptionInfo->ContextRecord;
2152 2152 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2153 2153
2154 2154 switch (exception_code) {
2155 2155 case EXCEPTION_FLT_DENORMAL_OPERAND:
2156 2156 case EXCEPTION_FLT_DIVIDE_BY_ZERO:
2157 2157 case EXCEPTION_FLT_INEXACT_RESULT:
2158 2158 case EXCEPTION_FLT_INVALID_OPERATION:
2159 2159 case EXCEPTION_FLT_OVERFLOW:
2160 2160 case EXCEPTION_FLT_STACK_CHECK:
2161 2161 case EXCEPTION_FLT_UNDERFLOW:
2162 2162 jint fp_control_word = (* (jint*) StubRoutines::addr_fpu_cntrl_wrd_std());
2163 2163 if (fp_control_word != ctx->FloatSave.ControlWord) {
2164 2164 // Restore FPCW and mask out FLT exceptions
2165 2165 ctx->FloatSave.ControlWord = fp_control_word | 0xffffffc0;
2166 2166 // Mask out pending FLT exceptions
2167 2167 ctx->FloatSave.StatusWord &= 0xffffff00;
2168 2168 return EXCEPTION_CONTINUE_EXECUTION;
2169 2169 }
2170 2170 }
2171 2171
2172 2172 if (prev_uef_handler != NULL) {
2173 2173 // We didn't handle this exception so pass it to the previous
2174 2174 // UnhandledExceptionFilter.
2175 2175 return (prev_uef_handler)(exceptionInfo);
2176 2176 }
2177 2177
2178 2178 return EXCEPTION_CONTINUE_SEARCH;
2179 2179 }
2180 2180 #else //_WIN64
2181 2181 /*
2182 2182 On Windows, the mxcsr control bits are non-volatile across calls
2183 2183 See also CR 6192333
2184 2184 If EXCEPTION_FLT_* happened after some native method modified
2185 2185 mxcsr - it is not a jvm fault.
2186 2186 However should we decide to restore of mxcsr after a faulty
2187 2187 native method we can uncomment following code
2188 2188 jint MxCsr = INITIAL_MXCSR;
2189 2189 // we can't use StubRoutines::addr_mxcsr_std()
2190 2190 // because in Win64 mxcsr is not saved there
2191 2191 if (MxCsr != ctx->MxCsr) {
2192 2192 ctx->MxCsr = MxCsr;
2193 2193 return EXCEPTION_CONTINUE_EXECUTION;
2194 2194 }
2195 2195
2196 2196 */
2197 2197 #endif //_WIN64
2198 2198
2199 2199
2200 2200 // Fatal error reporting is single threaded so we can make this a
2201 2201 // static and preallocated. If it's more than MAX_PATH silently ignore
2202 2202 // it.
2203 2203 static char saved_error_file[MAX_PATH] = {0};
2204 2204
2205 2205 void os::set_error_file(const char *logfile) {
2206 2206 if (strlen(logfile) <= MAX_PATH) {
2207 2207 strncpy(saved_error_file, logfile, MAX_PATH);
2208 2208 }
2209 2209 }
2210 2210
2211 2211 static inline void report_error(Thread* t, DWORD exception_code,
2212 2212 address addr, void* siginfo, void* context) {
2213 2213 VMError err(t, exception_code, addr, siginfo, context);
2214 2214 err.report_and_die();
2215 2215
2216 2216 // If UseOsErrorReporting, this will return here and save the error file
2217 2217 // somewhere where we can find it in the minidump.
2218 2218 }
2219 2219
2220 2220 //-----------------------------------------------------------------------------
2221 2221 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2222 2222 if (InterceptOSException) return EXCEPTION_CONTINUE_SEARCH;
2223 2223 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2224 2224 #ifdef _M_IA64
2225 2225 address pc = (address) exceptionInfo->ContextRecord->StIIP;
2226 2226 #elif _M_AMD64
2227 2227 address pc = (address) exceptionInfo->ContextRecord->Rip;
2228 2228 #else
2229 2229 address pc = (address) exceptionInfo->ContextRecord->Eip;
2230 2230 #endif
2231 2231 Thread* t = ThreadLocalStorage::get_thread_slow(); // slow & steady
2232 2232
2233 2233 #ifndef _WIN64
2234 2234 // Execution protection violation - win32 running on AMD64 only
2235 2235 // Handled first to avoid misdiagnosis as a "normal" access violation;
2236 2236 // This is safe to do because we have a new/unique ExceptionInformation
2237 2237 // code for this condition.
2238 2238 if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2239 2239 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2240 2240 int exception_subcode = (int) exceptionRecord->ExceptionInformation[0];
2241 2241 address addr = (address) exceptionRecord->ExceptionInformation[1];
2242 2242
2243 2243 if (exception_subcode == EXCEPTION_INFO_EXEC_VIOLATION) {
2244 2244 int page_size = os::vm_page_size();
2245 2245
2246 2246 // Make sure the pc and the faulting address are sane.
2247 2247 //
2248 2248 // If an instruction spans a page boundary, and the page containing
2249 2249 // the beginning of the instruction is executable but the following
2250 2250 // page is not, the pc and the faulting address might be slightly
2251 2251 // different - we still want to unguard the 2nd page in this case.
2252 2252 //
2253 2253 // 15 bytes seems to be a (very) safe value for max instruction size.
2254 2254 bool pc_is_near_addr =
2255 2255 (pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15);
2256 2256 bool instr_spans_page_boundary =
2257 2257 (align_size_down((intptr_t) pc ^ (intptr_t) addr,
2258 2258 (intptr_t) page_size) > 0);
2259 2259
2260 2260 if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) {
2261 2261 static volatile address last_addr =
2262 2262 (address) os::non_memory_address_word();
2263 2263
2264 2264 // In conservative mode, don't unguard unless the address is in the VM
2265 2265 if (UnguardOnExecutionViolation > 0 && addr != last_addr &&
2266 2266 (UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) {
2267 2267
2268 2268 // Set memory to RWX and retry
2269 2269 address page_start =
2270 2270 (address) align_size_down((intptr_t) addr, (intptr_t) page_size);
2271 2271 bool res = os::protect_memory((char*) page_start, page_size,
2272 2272 os::MEM_PROT_RWX);
2273 2273
2274 2274 if (PrintMiscellaneous && Verbose) {
2275 2275 char buf[256];
2276 2276 jio_snprintf(buf, sizeof(buf), "Execution protection violation "
2277 2277 "at " INTPTR_FORMAT
2278 2278 ", unguarding " INTPTR_FORMAT ": %s", addr,
2279 2279 page_start, (res ? "success" : strerror(errno)));
2280 2280 tty->print_raw_cr(buf);
2281 2281 }
2282 2282
2283 2283 // Set last_addr so if we fault again at the same address, we don't
2284 2284 // end up in an endless loop.
2285 2285 //
2286 2286 // There are two potential complications here. Two threads trapping
2287 2287 // at the same address at the same time could cause one of the
2288 2288 // threads to think it already unguarded, and abort the VM. Likely
2289 2289 // very rare.
2290 2290 //
2291 2291 // The other race involves two threads alternately trapping at
2292 2292 // different addresses and failing to unguard the page, resulting in
2293 2293 // an endless loop. This condition is probably even more unlikely
2294 2294 // than the first.
2295 2295 //
2296 2296 // Although both cases could be avoided by using locks or thread
2297 2297 // local last_addr, these solutions are unnecessary complication:
2298 2298 // this handler is a best-effort safety net, not a complete solution.
2299 2299 // It is disabled by default and should only be used as a workaround
2300 2300 // in case we missed any no-execute-unsafe VM code.
2301 2301
2302 2302 last_addr = addr;
2303 2303
2304 2304 return EXCEPTION_CONTINUE_EXECUTION;
2305 2305 }
2306 2306 }
2307 2307
2308 2308 // Last unguard failed or not unguarding
2309 2309 tty->print_raw_cr("Execution protection violation");
2310 2310 report_error(t, exception_code, addr, exceptionInfo->ExceptionRecord,
2311 2311 exceptionInfo->ContextRecord);
2312 2312 return EXCEPTION_CONTINUE_SEARCH;
2313 2313 }
2314 2314 }
2315 2315 #endif // _WIN64
2316 2316
2317 2317 // Check to see if we caught the safepoint code in the
2318 2318 // process of write protecting the memory serialization page.
2319 2319 // It write enables the page immediately after protecting it
2320 2320 // so just return.
2321 2321 if ( exception_code == EXCEPTION_ACCESS_VIOLATION ) {
2322 2322 JavaThread* thread = (JavaThread*) t;
2323 2323 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2324 2324 address addr = (address) exceptionRecord->ExceptionInformation[1];
2325 2325 if ( os::is_memory_serialize_page(thread, addr) ) {
2326 2326 // Block current thread until the memory serialize page permission restored.
2327 2327 os::block_on_serialize_page_trap();
2328 2328 return EXCEPTION_CONTINUE_EXECUTION;
2329 2329 }
2330 2330 }
2331 2331
2332 2332 if (t != NULL && t->is_Java_thread()) {
2333 2333 JavaThread* thread = (JavaThread*) t;
2334 2334 bool in_java = thread->thread_state() == _thread_in_Java;
2335 2335
2336 2336 // Handle potential stack overflows up front.
2337 2337 if (exception_code == EXCEPTION_STACK_OVERFLOW) {
2338 2338 if (os::uses_stack_guard_pages()) {
2339 2339 #ifdef _M_IA64
2340 2340 //
2341 2341 // If it's a legal stack address continue, Windows will map it in.
2342 2342 //
2343 2343 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2344 2344 address addr = (address) exceptionRecord->ExceptionInformation[1];
2345 2345 if (addr > thread->stack_yellow_zone_base() && addr < thread->stack_base() )
2346 2346 return EXCEPTION_CONTINUE_EXECUTION;
2347 2347
2348 2348 // The register save area is the same size as the memory stack
2349 2349 // and starts at the page just above the start of the memory stack.
2350 2350 // If we get a fault in this area, we've run out of register
2351 2351 // stack. If we are in java, try throwing a stack overflow exception.
2352 2352 if (addr > thread->stack_base() &&
2353 2353 addr <= (thread->stack_base()+thread->stack_size()) ) {
2354 2354 char buf[256];
2355 2355 jio_snprintf(buf, sizeof(buf),
2356 2356 "Register stack overflow, addr:%p, stack_base:%p\n",
2357 2357 addr, thread->stack_base() );
2358 2358 tty->print_raw_cr(buf);
2359 2359 // If not in java code, return and hope for the best.
2360 2360 return in_java ? Handle_Exception(exceptionInfo,
2361 2361 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW))
2362 2362 : EXCEPTION_CONTINUE_EXECUTION;
2363 2363 }
2364 2364 #endif
2365 2365 if (thread->stack_yellow_zone_enabled()) {
2366 2366 // Yellow zone violation. The o/s has unprotected the first yellow
2367 2367 // zone page for us. Note: must call disable_stack_yellow_zone to
2368 2368 // update the enabled status, even if the zone contains only one page.
2369 2369 thread->disable_stack_yellow_zone();
2370 2370 // If not in java code, return and hope for the best.
2371 2371 return in_java ? Handle_Exception(exceptionInfo,
2372 2372 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW))
2373 2373 : EXCEPTION_CONTINUE_EXECUTION;
2374 2374 } else {
2375 2375 // Fatal red zone violation.
2376 2376 thread->disable_stack_red_zone();
2377 2377 tty->print_raw_cr("An unrecoverable stack overflow has occurred.");
2378 2378 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2379 2379 exceptionInfo->ContextRecord);
2380 2380 return EXCEPTION_CONTINUE_SEARCH;
2381 2381 }
2382 2382 } else if (in_java) {
2383 2383 // JVM-managed guard pages cannot be used on win95/98. The o/s provides
2384 2384 // a one-time-only guard page, which it has released to us. The next
2385 2385 // stack overflow on this thread will result in an ACCESS_VIOLATION.
2386 2386 return Handle_Exception(exceptionInfo,
2387 2387 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW));
2388 2388 } else {
2389 2389 // Can only return and hope for the best. Further stack growth will
2390 2390 // result in an ACCESS_VIOLATION.
2391 2391 return EXCEPTION_CONTINUE_EXECUTION;
2392 2392 }
2393 2393 } else if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2394 2394 // Either stack overflow or null pointer exception.
2395 2395 if (in_java) {
2396 2396 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2397 2397 address addr = (address) exceptionRecord->ExceptionInformation[1];
2398 2398 address stack_end = thread->stack_base() - thread->stack_size();
2399 2399 if (addr < stack_end && addr >= stack_end - os::vm_page_size()) {
2400 2400 // Stack overflow.
2401 2401 assert(!os::uses_stack_guard_pages(),
2402 2402 "should be caught by red zone code above.");
2403 2403 return Handle_Exception(exceptionInfo,
2404 2404 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW));
2405 2405 }
2406 2406 //
2407 2407 // Check for safepoint polling and implicit null
2408 2408 // We only expect null pointers in the stubs (vtable)
2409 2409 // the rest are checked explicitly now.
2410 2410 //
2411 2411 CodeBlob* cb = CodeCache::find_blob(pc);
2412 2412 if (cb != NULL) {
2413 2413 if (os::is_poll_address(addr)) {
2414 2414 address stub = SharedRuntime::get_poll_stub(pc);
2415 2415 return Handle_Exception(exceptionInfo, stub);
2416 2416 }
2417 2417 }
2418 2418 {
2419 2419 #ifdef _WIN64
2420 2420 //
2421 2421 // If it's a legal stack address map the entire region in
2422 2422 //
2423 2423 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2424 2424 address addr = (address) exceptionRecord->ExceptionInformation[1];
2425 2425 if (addr > thread->stack_yellow_zone_base() && addr < thread->stack_base() ) {
2426 2426 addr = (address)((uintptr_t)addr &
2427 2427 (~((uintptr_t)os::vm_page_size() - (uintptr_t)1)));
2428 2428 os::commit_memory((char *)addr, thread->stack_base() - addr,
2429 2429 false );
2430 2430 return EXCEPTION_CONTINUE_EXECUTION;
2431 2431 }
2432 2432 else
2433 2433 #endif
2434 2434 {
2435 2435 // Null pointer exception.
2436 2436 #ifdef _M_IA64
2437 2437 // We catch register stack overflows in compiled code by doing
2438 2438 // an explicit compare and executing a st8(G0, G0) if the
2439 2439 // BSP enters into our guard area. We test for the overflow
2440 2440 // condition and fall into the normal null pointer exception
2441 2441 // code if BSP hasn't overflowed.
2442 2442 if ( in_java ) {
2443 2443 if(thread->register_stack_overflow()) {
2444 2444 assert((address)exceptionInfo->ContextRecord->IntS3 ==
2445 2445 thread->register_stack_limit(),
2446 2446 "GR7 doesn't contain register_stack_limit");
2447 2447 // Disable the yellow zone which sets the state that
2448 2448 // we've got a stack overflow problem.
2449 2449 if (thread->stack_yellow_zone_enabled()) {
2450 2450 thread->disable_stack_yellow_zone();
2451 2451 }
2452 2452 // Give us some room to process the exception
2453 2453 thread->disable_register_stack_guard();
2454 2454 // Update GR7 with the new limit so we can continue running
2455 2455 // compiled code.
2456 2456 exceptionInfo->ContextRecord->IntS3 =
2457 2457 (ULONGLONG)thread->register_stack_limit();
2458 2458 return Handle_Exception(exceptionInfo,
2459 2459 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW));
2460 2460 } else {
2461 2461 //
2462 2462 // Check for implicit null
2463 2463 // We only expect null pointers in the stubs (vtable)
2464 2464 // the rest are checked explicitly now.
2465 2465 //
2466 2466 if (((uintptr_t)addr) < os::vm_page_size() ) {
2467 2467 // an access to the first page of VM--assume it is a null pointer
2468 2468 address stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
2469 2469 if (stub != NULL) return Handle_Exception(exceptionInfo, stub);
2470 2470 }
2471 2471 }
2472 2472 } // in_java
2473 2473
2474 2474 // IA64 doesn't use implicit null checking yet. So we shouldn't
2475 2475 // get here.
2476 2476 tty->print_raw_cr("Access violation, possible null pointer exception");
2477 2477 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2478 2478 exceptionInfo->ContextRecord);
2479 2479 return EXCEPTION_CONTINUE_SEARCH;
2480 2480 #else /* !IA64 */
2481 2481
2482 2482 // Windows 98 reports faulting addresses incorrectly
2483 2483 if (!MacroAssembler::needs_explicit_null_check((intptr_t)addr) ||
2484 2484 !os::win32::is_nt()) {
2485 2485 address stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
2486 2486 if (stub != NULL) return Handle_Exception(exceptionInfo, stub);
2487 2487 }
2488 2488 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2489 2489 exceptionInfo->ContextRecord);
2490 2490 return EXCEPTION_CONTINUE_SEARCH;
2491 2491 #endif
2492 2492 }
2493 2493 }
2494 2494 }
2495 2495
2496 2496 #ifdef _WIN64
2497 2497 // Special care for fast JNI field accessors.
2498 2498 // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks
2499 2499 // in and the heap gets shrunk before the field access.
2500 2500 if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2501 2501 address addr = JNI_FastGetField::find_slowcase_pc(pc);
2502 2502 if (addr != (address)-1) {
2503 2503 return Handle_Exception(exceptionInfo, addr);
2504 2504 }
2505 2505 }
2506 2506 #endif
2507 2507
2508 2508 #ifdef _WIN64
2509 2509 // Windows will sometimes generate an access violation
2510 2510 // when we call malloc. Since we use VectoredExceptions
2511 2511 // on 64 bit platforms, we see this exception. We must
2512 2512 // pass this exception on so Windows can recover.
2513 2513 // We check to see if the pc of the fault is in NTDLL.DLL
2514 2514 // if so, we pass control on to Windows for handling.
2515 2515 if (UseVectoredExceptions && _addr_in_ntdll(pc)) return EXCEPTION_CONTINUE_SEARCH;
2516 2516 #endif
2517 2517
2518 2518 // Stack overflow or null pointer exception in native code.
2519 2519 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2520 2520 exceptionInfo->ContextRecord);
2521 2521 return EXCEPTION_CONTINUE_SEARCH;
2522 2522 }
2523 2523
2524 2524 if (in_java) {
2525 2525 switch (exception_code) {
2526 2526 case EXCEPTION_INT_DIVIDE_BY_ZERO:
2527 2527 return Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO));
2528 2528
2529 2529 case EXCEPTION_INT_OVERFLOW:
2530 2530 return Handle_IDiv_Exception(exceptionInfo);
2531 2531
2532 2532 } // switch
2533 2533 }
2534 2534 #ifndef _WIN64
2535 2535 if (((thread->thread_state() == _thread_in_Java) ||
2536 2536 (thread->thread_state() == _thread_in_native)) &&
2537 2537 exception_code != EXCEPTION_UNCAUGHT_CXX_EXCEPTION)
2538 2538 {
2539 2539 LONG result=Handle_FLT_Exception(exceptionInfo);
2540 2540 if (result==EXCEPTION_CONTINUE_EXECUTION) return result;
2541 2541 }
2542 2542 #endif //_WIN64
2543 2543 }
2544 2544
2545 2545 if (exception_code != EXCEPTION_BREAKPOINT) {
2546 2546 #ifndef _WIN64
2547 2547 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2548 2548 exceptionInfo->ContextRecord);
2549 2549 #else
2550 2550 // Itanium Windows uses a VectoredExceptionHandler
2551 2551 // Which means that C++ programatic exception handlers (try/except)
2552 2552 // will get here. Continue the search for the right except block if
2553 2553 // the exception code is not a fatal code.
2554 2554 switch ( exception_code ) {
2555 2555 case EXCEPTION_ACCESS_VIOLATION:
2556 2556 case EXCEPTION_STACK_OVERFLOW:
2557 2557 case EXCEPTION_ILLEGAL_INSTRUCTION:
2558 2558 case EXCEPTION_ILLEGAL_INSTRUCTION_2:
2559 2559 case EXCEPTION_INT_OVERFLOW:
2560 2560 case EXCEPTION_INT_DIVIDE_BY_ZERO:
2561 2561 case EXCEPTION_UNCAUGHT_CXX_EXCEPTION:
2562 2562 { report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2563 2563 exceptionInfo->ContextRecord);
2564 2564 }
2565 2565 break;
2566 2566 default:
2567 2567 break;
2568 2568 }
2569 2569 #endif
2570 2570 }
2571 2571 return EXCEPTION_CONTINUE_SEARCH;
2572 2572 }
2573 2573
2574 2574 #ifndef _WIN64
2575 2575 // Special care for fast JNI accessors.
2576 2576 // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in and
2577 2577 // the heap gets shrunk before the field access.
2578 2578 // Need to install our own structured exception handler since native code may
2579 2579 // install its own.
2580 2580 LONG WINAPI fastJNIAccessorExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2581 2581 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2582 2582 if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2583 2583 address pc = (address) exceptionInfo->ContextRecord->Eip;
2584 2584 address addr = JNI_FastGetField::find_slowcase_pc(pc);
2585 2585 if (addr != (address)-1) {
2586 2586 return Handle_Exception(exceptionInfo, addr);
2587 2587 }
2588 2588 }
2589 2589 return EXCEPTION_CONTINUE_SEARCH;
2590 2590 }
2591 2591
2592 2592 #define DEFINE_FAST_GETFIELD(Return,Fieldname,Result) \
2593 2593 Return JNICALL jni_fast_Get##Result##Field_wrapper(JNIEnv *env, jobject obj, jfieldID fieldID) { \
2594 2594 __try { \
2595 2595 return (*JNI_FastGetField::jni_fast_Get##Result##Field_fp)(env, obj, fieldID); \
2596 2596 } __except(fastJNIAccessorExceptionFilter((_EXCEPTION_POINTERS*)_exception_info())) { \
2597 2597 } \
2598 2598 return 0; \
2599 2599 }
2600 2600
2601 2601 DEFINE_FAST_GETFIELD(jboolean, bool, Boolean)
2602 2602 DEFINE_FAST_GETFIELD(jbyte, byte, Byte)
2603 2603 DEFINE_FAST_GETFIELD(jchar, char, Char)
2604 2604 DEFINE_FAST_GETFIELD(jshort, short, Short)
2605 2605 DEFINE_FAST_GETFIELD(jint, int, Int)
2606 2606 DEFINE_FAST_GETFIELD(jlong, long, Long)
2607 2607 DEFINE_FAST_GETFIELD(jfloat, float, Float)
2608 2608 DEFINE_FAST_GETFIELD(jdouble, double, Double)
2609 2609
2610 2610 address os::win32::fast_jni_accessor_wrapper(BasicType type) {
2611 2611 switch (type) {
2612 2612 case T_BOOLEAN: return (address)jni_fast_GetBooleanField_wrapper;
2613 2613 case T_BYTE: return (address)jni_fast_GetByteField_wrapper;
2614 2614 case T_CHAR: return (address)jni_fast_GetCharField_wrapper;
2615 2615 case T_SHORT: return (address)jni_fast_GetShortField_wrapper;
2616 2616 case T_INT: return (address)jni_fast_GetIntField_wrapper;
2617 2617 case T_LONG: return (address)jni_fast_GetLongField_wrapper;
2618 2618 case T_FLOAT: return (address)jni_fast_GetFloatField_wrapper;
2619 2619 case T_DOUBLE: return (address)jni_fast_GetDoubleField_wrapper;
2620 2620 default: ShouldNotReachHere();
2621 2621 }
2622 2622 return (address)-1;
2623 2623 }
2624 2624 #endif
2625 2625
2626 2626 // Virtual Memory
2627 2627
2628 2628 int os::vm_page_size() { return os::win32::vm_page_size(); }
2629 2629 int os::vm_allocation_granularity() {
2630 2630 return os::win32::vm_allocation_granularity();
2631 2631 }
2632 2632
2633 2633 // Windows large page support is available on Windows 2003. In order to use
2634 2634 // large page memory, the administrator must first assign additional privilege
2635 2635 // to the user:
2636 2636 // + select Control Panel -> Administrative Tools -> Local Security Policy
2637 2637 // + select Local Policies -> User Rights Assignment
2638 2638 // + double click "Lock pages in memory", add users and/or groups
2639 2639 // + reboot
2640 2640 // Note the above steps are needed for administrator as well, as administrators
2641 2641 // by default do not have the privilege to lock pages in memory.
2642 2642 //
2643 2643 // Note about Windows 2003: although the API supports committing large page
2644 2644 // memory on a page-by-page basis and VirtualAlloc() returns success under this
2645 2645 // scenario, I found through experiment it only uses large page if the entire
2646 2646 // memory region is reserved and committed in a single VirtualAlloc() call.
2647 2647 // This makes Windows large page support more or less like Solaris ISM, in
2648 2648 // that the entire heap must be committed upfront. This probably will change
2649 2649 // in the future, if so the code below needs to be revisited.
2650 2650
2651 2651 #ifndef MEM_LARGE_PAGES
2652 2652 #define MEM_LARGE_PAGES 0x20000000
2653 2653 #endif
2654 2654
2655 2655 static HANDLE _hProcess;
2656 2656 static HANDLE _hToken;
2657 2657
2658 2658 // Container for NUMA node list info
2659 2659 class NUMANodeListHolder {
2660 2660 private:
2661 2661 int *_numa_used_node_list; // allocated below
2662 2662 int _numa_used_node_count;
2663 2663
2664 2664 void free_node_list() {
2665 2665 if (_numa_used_node_list != NULL) {
2666 2666 FREE_C_HEAP_ARRAY(int, _numa_used_node_list, mtInternal);
2667 2667 }
2668 2668 }
2669 2669
2670 2670 public:
2671 2671 NUMANodeListHolder() {
2672 2672 _numa_used_node_count = 0;
2673 2673 _numa_used_node_list = NULL;
2674 2674 // do rest of initialization in build routine (after function pointers are set up)
2675 2675 }
2676 2676
2677 2677 ~NUMANodeListHolder() {
2678 2678 free_node_list();
2679 2679 }
2680 2680
2681 2681 bool build() {
2682 2682 DWORD_PTR proc_aff_mask;
2683 2683 DWORD_PTR sys_aff_mask;
2684 2684 if (!GetProcessAffinityMask(GetCurrentProcess(), &proc_aff_mask, &sys_aff_mask)) return false;
2685 2685 ULONG highest_node_number;
2686 2686 if (!os::Kernel32Dll::GetNumaHighestNodeNumber(&highest_node_number)) return false;
2687 2687 free_node_list();
2688 2688 _numa_used_node_list = NEW_C_HEAP_ARRAY(int, highest_node_number + 1, mtInternal);
2689 2689 for (unsigned int i = 0; i <= highest_node_number; i++) {
2690 2690 ULONGLONG proc_mask_numa_node;
2691 2691 if (!os::Kernel32Dll::GetNumaNodeProcessorMask(i, &proc_mask_numa_node)) return false;
2692 2692 if ((proc_aff_mask & proc_mask_numa_node)!=0) {
2693 2693 _numa_used_node_list[_numa_used_node_count++] = i;
2694 2694 }
2695 2695 }
2696 2696 return (_numa_used_node_count > 1);
2697 2697 }
2698 2698
2699 2699 int get_count() {return _numa_used_node_count;}
2700 2700 int get_node_list_entry(int n) {
2701 2701 // for indexes out of range, returns -1
2702 2702 return (n < _numa_used_node_count ? _numa_used_node_list[n] : -1);
2703 2703 }
2704 2704
2705 2705 } numa_node_list_holder;
2706 2706
2707 2707
2708 2708
2709 2709 static size_t _large_page_size = 0;
2710 2710
2711 2711 static bool resolve_functions_for_large_page_init() {
2712 2712 return os::Kernel32Dll::GetLargePageMinimumAvailable() &&
2713 2713 os::Advapi32Dll::AdvapiAvailable();
2714 2714 }
2715 2715
2716 2716 static bool request_lock_memory_privilege() {
2717 2717 _hProcess = OpenProcess(PROCESS_QUERY_INFORMATION, FALSE,
2718 2718 os::current_process_id());
2719 2719
2720 2720 LUID luid;
2721 2721 if (_hProcess != NULL &&
2722 2722 os::Advapi32Dll::OpenProcessToken(_hProcess, TOKEN_ADJUST_PRIVILEGES, &_hToken) &&
2723 2723 os::Advapi32Dll::LookupPrivilegeValue(NULL, "SeLockMemoryPrivilege", &luid)) {
2724 2724
2725 2725 TOKEN_PRIVILEGES tp;
2726 2726 tp.PrivilegeCount = 1;
2727 2727 tp.Privileges[0].Luid = luid;
2728 2728 tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED;
2729 2729
2730 2730 // AdjustTokenPrivileges() may return TRUE even when it couldn't change the
2731 2731 // privilege. Check GetLastError() too. See MSDN document.
2732 2732 if (os::Advapi32Dll::AdjustTokenPrivileges(_hToken, false, &tp, sizeof(tp), NULL, NULL) &&
2733 2733 (GetLastError() == ERROR_SUCCESS)) {
2734 2734 return true;
2735 2735 }
2736 2736 }
2737 2737
2738 2738 return false;
2739 2739 }
2740 2740
2741 2741 static void cleanup_after_large_page_init() {
2742 2742 if (_hProcess) CloseHandle(_hProcess);
2743 2743 _hProcess = NULL;
2744 2744 if (_hToken) CloseHandle(_hToken);
2745 2745 _hToken = NULL;
2746 2746 }
2747 2747
2748 2748 static bool numa_interleaving_init() {
2749 2749 bool success = false;
2750 2750 bool use_numa_interleaving_specified = !FLAG_IS_DEFAULT(UseNUMAInterleaving);
2751 2751
2752 2752 // print a warning if UseNUMAInterleaving flag is specified on command line
2753 2753 bool warn_on_failure = use_numa_interleaving_specified;
2754 2754 # define WARN(msg) if (warn_on_failure) { warning(msg); }
2755 2755
2756 2756 // NUMAInterleaveGranularity cannot be less than vm_allocation_granularity (or _large_page_size if using large pages)
2757 2757 size_t min_interleave_granularity = UseLargePages ? _large_page_size : os::vm_allocation_granularity();
2758 2758 NUMAInterleaveGranularity = align_size_up(NUMAInterleaveGranularity, min_interleave_granularity);
2759 2759
2760 2760 if (os::Kernel32Dll::NumaCallsAvailable()) {
2761 2761 if (numa_node_list_holder.build()) {
2762 2762 if (PrintMiscellaneous && Verbose) {
2763 2763 tty->print("NUMA UsedNodeCount=%d, namely ", numa_node_list_holder.get_count());
2764 2764 for (int i = 0; i < numa_node_list_holder.get_count(); i++) {
2765 2765 tty->print("%d ", numa_node_list_holder.get_node_list_entry(i));
2766 2766 }
2767 2767 tty->print("\n");
2768 2768 }
2769 2769 success = true;
2770 2770 } else {
2771 2771 WARN("Process does not cover multiple NUMA nodes.");
2772 2772 }
2773 2773 } else {
2774 2774 WARN("NUMA Interleaving is not supported by the operating system.");
2775 2775 }
2776 2776 if (!success) {
2777 2777 if (use_numa_interleaving_specified) WARN("...Ignoring UseNUMAInterleaving flag.");
2778 2778 }
2779 2779 return success;
2780 2780 #undef WARN
2781 2781 }
2782 2782
2783 2783 // this routine is used whenever we need to reserve a contiguous VA range
2784 2784 // but we need to make separate VirtualAlloc calls for each piece of the range
2785 2785 // Reasons for doing this:
2786 2786 // * UseLargePagesIndividualAllocation was set (normally only needed on WS2003 but possible to be set otherwise)
2787 2787 // * UseNUMAInterleaving requires a separate node for each piece
2788 2788 static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags, DWORD prot,
2789 2789 bool should_inject_error=false) {
2790 2790 char * p_buf;
2791 2791 // note: at setup time we guaranteed that NUMAInterleaveGranularity was aligned up to a page size
2792 2792 size_t page_size = UseLargePages ? _large_page_size : os::vm_allocation_granularity();
2793 2793 size_t chunk_size = UseNUMAInterleaving ? NUMAInterleaveGranularity : page_size;
2794 2794
2795 2795 // first reserve enough address space in advance since we want to be
2796 2796 // able to break a single contiguous virtual address range into multiple
2797 2797 // large page commits but WS2003 does not allow reserving large page space
2798 2798 // so we just use 4K pages for reserve, this gives us a legal contiguous
2799 2799 // address space. then we will deallocate that reservation, and re alloc
2800 2800 // using large pages
2801 2801 const size_t size_of_reserve = bytes + chunk_size;
2802 2802 if (bytes > size_of_reserve) {
2803 2803 // Overflowed.
2804 2804 return NULL;
2805 2805 }
2806 2806 p_buf = (char *) VirtualAlloc(addr,
2807 2807 size_of_reserve, // size of Reserve
2808 2808 MEM_RESERVE,
2809 2809 PAGE_READWRITE);
2810 2810 // If reservation failed, return NULL
2811 2811 if (p_buf == NULL) return NULL;
2812 2812 MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, CALLER_PC);
2813 2813 os::release_memory(p_buf, bytes + chunk_size);
2814 2814
2815 2815 // we still need to round up to a page boundary (in case we are using large pages)
2816 2816 // but not to a chunk boundary (in case InterleavingGranularity doesn't align with page size)
2817 2817 // instead we handle this in the bytes_to_rq computation below
2818 2818 p_buf = (char *) align_size_up((size_t)p_buf, page_size);
2819 2819
2820 2820 // now go through and allocate one chunk at a time until all bytes are
2821 2821 // allocated
2822 2822 size_t bytes_remaining = bytes;
2823 2823 // An overflow of align_size_up() would have been caught above
2824 2824 // in the calculation of size_of_reserve.
2825 2825 char * next_alloc_addr = p_buf;
2826 2826 HANDLE hProc = GetCurrentProcess();
2827 2827
2828 2828 #ifdef ASSERT
2829 2829 // Variable for the failure injection
2830 2830 long ran_num = os::random();
2831 2831 size_t fail_after = ran_num % bytes;
2832 2832 #endif
2833 2833
2834 2834 int count=0;
2835 2835 while (bytes_remaining) {
2836 2836 // select bytes_to_rq to get to the next chunk_size boundary
2837 2837
2838 2838 size_t bytes_to_rq = MIN2(bytes_remaining, chunk_size - ((size_t)next_alloc_addr % chunk_size));
2839 2839 // Note allocate and commit
2840 2840 char * p_new;
2841 2841
2842 2842 #ifdef ASSERT
2843 2843 bool inject_error_now = should_inject_error && (bytes_remaining <= fail_after);
2844 2844 #else
2845 2845 const bool inject_error_now = false;
2846 2846 #endif
2847 2847
2848 2848 if (inject_error_now) {
2849 2849 p_new = NULL;
2850 2850 } else {
2851 2851 if (!UseNUMAInterleaving) {
2852 2852 p_new = (char *) VirtualAlloc(next_alloc_addr,
2853 2853 bytes_to_rq,
2854 2854 flags,
2855 2855 prot);
2856 2856 } else {
2857 2857 // get the next node to use from the used_node_list
2858 2858 assert(numa_node_list_holder.get_count() > 0, "Multiple NUMA nodes expected");
2859 2859 DWORD node = numa_node_list_holder.get_node_list_entry(count % numa_node_list_holder.get_count());
2860 2860 p_new = (char *)os::Kernel32Dll::VirtualAllocExNuma(hProc,
2861 2861 next_alloc_addr,
2862 2862 bytes_to_rq,
2863 2863 flags,
2864 2864 prot,
2865 2865 node);
2866 2866 }
2867 2867 }
2868 2868
2869 2869 if (p_new == NULL) {
2870 2870 // Free any allocated pages
2871 2871 if (next_alloc_addr > p_buf) {
2872 2872 // Some memory was committed so release it.
2873 2873 size_t bytes_to_release = bytes - bytes_remaining;
2874 2874 // NMT has yet to record any individual blocks, so it
2875 2875 // need to create a dummy 'reserve' record to match
2876 2876 // the release.
2877 2877 MemTracker::record_virtual_memory_reserve((address)p_buf,
2878 2878 bytes_to_release, CALLER_PC);
2879 2879 os::release_memory(p_buf, bytes_to_release);
2880 2880 }
2881 2881 #ifdef ASSERT
2882 2882 if (should_inject_error) {
2883 2883 if (TracePageSizes && Verbose) {
2884 2884 tty->print_cr("Reserving pages individually failed.");
2885 2885 }
2886 2886 }
2887 2887 #endif
2888 2888 return NULL;
2889 2889 }
2890 2890
2891 2891 bytes_remaining -= bytes_to_rq;
2892 2892 next_alloc_addr += bytes_to_rq;
2893 2893 count++;
2894 2894 }
2895 2895 // Although the memory is allocated individually, it is returned as one.
2896 2896 // NMT records it as one block.
2897 2897 address pc = CALLER_PC;
2898 2898 MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, pc);
2899 2899 if ((flags & MEM_COMMIT) != 0) {
2900 2900 MemTracker::record_virtual_memory_commit((address)p_buf, bytes, pc);
2901 2901 }
2902 2902
2903 2903 // made it this far, success
2904 2904 return p_buf;
2905 2905 }
2906 2906
2907 2907
2908 2908
2909 2909 void os::large_page_init() {
2910 2910 if (!UseLargePages) return;
2911 2911
2912 2912 // print a warning if any large page related flag is specified on command line
2913 2913 bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) ||
2914 2914 !FLAG_IS_DEFAULT(LargePageSizeInBytes);
2915 2915 bool success = false;
2916 2916
2917 2917 # define WARN(msg) if (warn_on_failure) { warning(msg); }
2918 2918 if (resolve_functions_for_large_page_init()) {
2919 2919 if (request_lock_memory_privilege()) {
2920 2920 size_t s = os::Kernel32Dll::GetLargePageMinimum();
2921 2921 if (s) {
2922 2922 #if defined(IA32) || defined(AMD64)
2923 2923 if (s > 4*M || LargePageSizeInBytes > 4*M) {
2924 2924 WARN("JVM cannot use large pages bigger than 4mb.");
2925 2925 } else {
2926 2926 #endif
2927 2927 if (LargePageSizeInBytes && LargePageSizeInBytes % s == 0) {
2928 2928 _large_page_size = LargePageSizeInBytes;
2929 2929 } else {
2930 2930 _large_page_size = s;
2931 2931 }
2932 2932 success = true;
2933 2933 #if defined(IA32) || defined(AMD64)
2934 2934 }
2935 2935 #endif
2936 2936 } else {
2937 2937 WARN("Large page is not supported by the processor.");
2938 2938 }
2939 2939 } else {
2940 2940 WARN("JVM cannot use large page memory because it does not have enough privilege to lock pages in memory.");
2941 2941 }
2942 2942 } else {
2943 2943 WARN("Large page is not supported by the operating system.");
2944 2944 }
2945 2945 #undef WARN
2946 2946
2947 2947 const size_t default_page_size = (size_t) vm_page_size();
2948 2948 if (success && _large_page_size > default_page_size) {
2949 2949 _page_sizes[0] = _large_page_size;
2950 2950 _page_sizes[1] = default_page_size;
2951 2951 _page_sizes[2] = 0;
2952 2952 }
2953 2953
2954 2954 cleanup_after_large_page_init();
2955 2955 UseLargePages = success;
2956 2956 }
2957 2957
2958 2958 // On win32, one cannot release just a part of reserved memory, it's an
2959 2959 // all or nothing deal. When we split a reservation, we must break the
2960 2960 // reservation into two reservations.
2961 2961 void os::pd_split_reserved_memory(char *base, size_t size, size_t split,
2962 2962 bool realloc) {
2963 2963 if (size > 0) {
↓ open down ↓ |
2963 lines elided |
↑ open up ↑ |
2964 2964 release_memory(base, size);
2965 2965 if (realloc) {
2966 2966 reserve_memory(split, base);
2967 2967 }
2968 2968 if (size != split) {
2969 2969 reserve_memory(size - split, base + split);
2970 2970 }
2971 2971 }
2972 2972 }
2973 2973
2974 +bool os::can_release_partial_region() {
2975 + return false;
2976 +}
2977 +
2974 2978 // Multiple threads can race in this code but it's not possible to unmap small sections of
2975 2979 // virtual space to get requested alignment, like posix-like os's.
2976 2980 // Windows prevents multiple thread from remapping over each other so this loop is thread-safe.
2977 2981 char* os::reserve_memory_aligned(size_t size, size_t alignment) {
2978 2982 assert((alignment & (os::vm_allocation_granularity() - 1)) == 0,
2979 2983 "Alignment must be a multiple of allocation granularity (page size)");
2980 2984 assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned");
2981 2985
2982 2986 size_t extra_size = size + alignment;
2983 2987 assert(extra_size >= size, "overflow, size is too large to allow alignment");
2984 2988
2985 2989 char* aligned_base = NULL;
2986 2990
2987 2991 do {
2988 2992 char* extra_base = os::reserve_memory(extra_size, NULL, alignment);
2989 2993 if (extra_base == NULL) {
2990 2994 return NULL;
2991 2995 }
2992 2996 // Do manual alignment
2993 2997 aligned_base = (char*) align_size_up((uintptr_t) extra_base, alignment);
2994 2998
2995 2999 os::release_memory(extra_base, extra_size);
2996 3000
2997 3001 aligned_base = os::reserve_memory(size, aligned_base);
2998 3002
2999 3003 } while (aligned_base == NULL);
3000 3004
3001 3005 return aligned_base;
3002 3006 }
3003 3007
3004 3008 char* os::pd_reserve_memory(size_t bytes, char* addr, size_t alignment_hint) {
3005 3009 assert((size_t)addr % os::vm_allocation_granularity() == 0,
3006 3010 "reserve alignment");
3007 3011 assert(bytes % os::vm_allocation_granularity() == 0, "reserve block size");
3008 3012 char* res;
3009 3013 // note that if UseLargePages is on, all the areas that require interleaving
3010 3014 // will go thru reserve_memory_special rather than thru here.
3011 3015 bool use_individual = (UseNUMAInterleaving && !UseLargePages);
3012 3016 if (!use_individual) {
3013 3017 res = (char*)VirtualAlloc(addr, bytes, MEM_RESERVE, PAGE_READWRITE);
3014 3018 } else {
3015 3019 elapsedTimer reserveTimer;
3016 3020 if( Verbose && PrintMiscellaneous ) reserveTimer.start();
3017 3021 // in numa interleaving, we have to allocate pages individually
3018 3022 // (well really chunks of NUMAInterleaveGranularity size)
3019 3023 res = allocate_pages_individually(bytes, addr, MEM_RESERVE, PAGE_READWRITE);
3020 3024 if (res == NULL) {
3021 3025 warning("NUMA page allocation failed");
3022 3026 }
3023 3027 if( Verbose && PrintMiscellaneous ) {
3024 3028 reserveTimer.stop();
3025 3029 tty->print_cr("reserve_memory of %Ix bytes took %ld ms (%ld ticks)", bytes,
3026 3030 reserveTimer.milliseconds(), reserveTimer.ticks());
3027 3031 }
3028 3032 }
3029 3033 assert(res == NULL || addr == NULL || addr == res,
3030 3034 "Unexpected address from reserve.");
3031 3035
3032 3036 return res;
3033 3037 }
3034 3038
3035 3039 // Reserve memory at an arbitrary address, only if that area is
3036 3040 // available (and not reserved for something else).
3037 3041 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
3038 3042 // Windows os::reserve_memory() fails of the requested address range is
3039 3043 // not avilable.
3040 3044 return reserve_memory(bytes, requested_addr);
3041 3045 }
3042 3046
3043 3047 size_t os::large_page_size() {
3044 3048 return _large_page_size;
3045 3049 }
3046 3050
3047 3051 bool os::can_commit_large_page_memory() {
3048 3052 // Windows only uses large page memory when the entire region is reserved
3049 3053 // and committed in a single VirtualAlloc() call. This may change in the
3050 3054 // future, but with Windows 2003 it's not possible to commit on demand.
3051 3055 return false;
3052 3056 }
3053 3057
3054 3058 bool os::can_execute_large_page_memory() {
3055 3059 return true;
3056 3060 }
3057 3061
3058 3062 char* os::reserve_memory_special(size_t bytes, char* addr, bool exec) {
3059 3063
3060 3064 const DWORD prot = exec ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
3061 3065 const DWORD flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
3062 3066
3063 3067 // with large pages, there are two cases where we need to use Individual Allocation
3064 3068 // 1) the UseLargePagesIndividualAllocation flag is set (set by default on WS2003)
3065 3069 // 2) NUMA Interleaving is enabled, in which case we use a different node for each page
3066 3070 if (UseLargePagesIndividualAllocation || UseNUMAInterleaving) {
3067 3071 if (TracePageSizes && Verbose) {
3068 3072 tty->print_cr("Reserving large pages individually.");
3069 3073 }
3070 3074 char * p_buf = allocate_pages_individually(bytes, addr, flags, prot, LargePagesIndividualAllocationInjectError);
3071 3075 if (p_buf == NULL) {
3072 3076 // give an appropriate warning message
3073 3077 if (UseNUMAInterleaving) {
3074 3078 warning("NUMA large page allocation failed, UseLargePages flag ignored");
3075 3079 }
3076 3080 if (UseLargePagesIndividualAllocation) {
3077 3081 warning("Individually allocated large pages failed, "
3078 3082 "use -XX:-UseLargePagesIndividualAllocation to turn off");
3079 3083 }
3080 3084 return NULL;
3081 3085 }
3082 3086
3083 3087 return p_buf;
3084 3088
3085 3089 } else {
3086 3090 // normal policy just allocate it all at once
3087 3091 DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
3088 3092 char * res = (char *)VirtualAlloc(NULL, bytes, flag, prot);
3089 3093 if (res != NULL) {
3090 3094 address pc = CALLER_PC;
3091 3095 MemTracker::record_virtual_memory_reserve((address)res, bytes, pc);
3092 3096 MemTracker::record_virtual_memory_commit((address)res, bytes, pc);
3093 3097 }
3094 3098
3095 3099 return res;
3096 3100 }
3097 3101 }
3098 3102
3099 3103 bool os::release_memory_special(char* base, size_t bytes) {
3100 3104 assert(base != NULL, "Sanity check");
3101 3105 // Memory allocated via reserve_memory_special() is committed
3102 3106 MemTracker::record_virtual_memory_uncommit((address)base, bytes);
3103 3107 return release_memory(base, bytes);
3104 3108 }
3105 3109
3106 3110 void os::print_statistics() {
3107 3111 }
3108 3112
3109 3113 bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) {
3110 3114 if (bytes == 0) {
3111 3115 // Don't bother the OS with noops.
3112 3116 return true;
3113 3117 }
3114 3118 assert((size_t) addr % os::vm_page_size() == 0, "commit on page boundaries");
3115 3119 assert(bytes % os::vm_page_size() == 0, "commit in page-sized chunks");
3116 3120 // Don't attempt to print anything if the OS call fails. We're
3117 3121 // probably low on resources, so the print itself may cause crashes.
3118 3122
3119 3123 // unless we have NUMAInterleaving enabled, the range of a commit
3120 3124 // is always within a reserve covered by a single VirtualAlloc
3121 3125 // in that case we can just do a single commit for the requested size
3122 3126 if (!UseNUMAInterleaving) {
3123 3127 if (VirtualAlloc(addr, bytes, MEM_COMMIT, PAGE_READWRITE) == NULL) return false;
3124 3128 if (exec) {
3125 3129 DWORD oldprot;
3126 3130 // Windows doc says to use VirtualProtect to get execute permissions
3127 3131 if (!VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE, &oldprot)) return false;
3128 3132 }
3129 3133 return true;
3130 3134 } else {
3131 3135
3132 3136 // when NUMAInterleaving is enabled, the commit might cover a range that
3133 3137 // came from multiple VirtualAlloc reserves (using allocate_pages_individually).
3134 3138 // VirtualQuery can help us determine that. The RegionSize that VirtualQuery
3135 3139 // returns represents the number of bytes that can be committed in one step.
3136 3140 size_t bytes_remaining = bytes;
3137 3141 char * next_alloc_addr = addr;
3138 3142 while (bytes_remaining > 0) {
3139 3143 MEMORY_BASIC_INFORMATION alloc_info;
3140 3144 VirtualQuery(next_alloc_addr, &alloc_info, sizeof(alloc_info));
3141 3145 size_t bytes_to_rq = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize);
3142 3146 if (VirtualAlloc(next_alloc_addr, bytes_to_rq, MEM_COMMIT, PAGE_READWRITE) == NULL)
3143 3147 return false;
3144 3148 if (exec) {
3145 3149 DWORD oldprot;
3146 3150 if (!VirtualProtect(next_alloc_addr, bytes_to_rq, PAGE_EXECUTE_READWRITE, &oldprot))
3147 3151 return false;
3148 3152 }
3149 3153 bytes_remaining -= bytes_to_rq;
3150 3154 next_alloc_addr += bytes_to_rq;
3151 3155 }
3152 3156 }
3153 3157 // if we made it this far, return true
3154 3158 return true;
3155 3159 }
3156 3160
3157 3161 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
3158 3162 bool exec) {
3159 3163 return commit_memory(addr, size, exec);
3160 3164 }
3161 3165
3162 3166 bool os::pd_uncommit_memory(char* addr, size_t bytes) {
3163 3167 if (bytes == 0) {
3164 3168 // Don't bother the OS with noops.
3165 3169 return true;
3166 3170 }
3167 3171 assert((size_t) addr % os::vm_page_size() == 0, "uncommit on page boundaries");
3168 3172 assert(bytes % os::vm_page_size() == 0, "uncommit in page-sized chunks");
3169 3173 return (VirtualFree(addr, bytes, MEM_DECOMMIT) != 0);
3170 3174 }
3171 3175
3172 3176 bool os::pd_release_memory(char* addr, size_t bytes) {
3173 3177 return VirtualFree(addr, 0, MEM_RELEASE) != 0;
3174 3178 }
3175 3179
3176 3180 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
3177 3181 return os::commit_memory(addr, size);
3178 3182 }
3179 3183
3180 3184 bool os::remove_stack_guard_pages(char* addr, size_t size) {
3181 3185 return os::uncommit_memory(addr, size);
3182 3186 }
3183 3187
3184 3188 // Set protections specified
3185 3189 bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
3186 3190 bool is_committed) {
3187 3191 unsigned int p = 0;
3188 3192 switch (prot) {
3189 3193 case MEM_PROT_NONE: p = PAGE_NOACCESS; break;
3190 3194 case MEM_PROT_READ: p = PAGE_READONLY; break;
3191 3195 case MEM_PROT_RW: p = PAGE_READWRITE; break;
3192 3196 case MEM_PROT_RWX: p = PAGE_EXECUTE_READWRITE; break;
3193 3197 default:
3194 3198 ShouldNotReachHere();
3195 3199 }
3196 3200
3197 3201 DWORD old_status;
3198 3202
3199 3203 // Strange enough, but on Win32 one can change protection only for committed
3200 3204 // memory, not a big deal anyway, as bytes less or equal than 64K
3201 3205 if (!is_committed && !commit_memory(addr, bytes, prot == MEM_PROT_RWX)) {
3202 3206 fatal("cannot commit protection page");
3203 3207 }
3204 3208 // One cannot use os::guard_memory() here, as on Win32 guard page
3205 3209 // have different (one-shot) semantics, from MSDN on PAGE_GUARD:
3206 3210 //
3207 3211 // Pages in the region become guard pages. Any attempt to access a guard page
3208 3212 // causes the system to raise a STATUS_GUARD_PAGE exception and turn off
3209 3213 // the guard page status. Guard pages thus act as a one-time access alarm.
3210 3214 return VirtualProtect(addr, bytes, p, &old_status) != 0;
3211 3215 }
3212 3216
3213 3217 bool os::guard_memory(char* addr, size_t bytes) {
3214 3218 DWORD old_status;
3215 3219 return VirtualProtect(addr, bytes, PAGE_READWRITE | PAGE_GUARD, &old_status) != 0;
3216 3220 }
3217 3221
3218 3222 bool os::unguard_memory(char* addr, size_t bytes) {
3219 3223 DWORD old_status;
3220 3224 return VirtualProtect(addr, bytes, PAGE_READWRITE, &old_status) != 0;
3221 3225 }
3222 3226
3223 3227 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) { }
3224 3228 void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) { }
3225 3229 void os::numa_make_global(char *addr, size_t bytes) { }
3226 3230 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) { }
3227 3231 bool os::numa_topology_changed() { return false; }
3228 3232 size_t os::numa_get_groups_num() { return MAX2(numa_node_list_holder.get_count(), 1); }
3229 3233 int os::numa_get_group_id() { return 0; }
3230 3234 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
3231 3235 if (numa_node_list_holder.get_count() == 0 && size > 0) {
3232 3236 // Provide an answer for UMA systems
3233 3237 ids[0] = 0;
3234 3238 return 1;
3235 3239 } else {
3236 3240 // check for size bigger than actual groups_num
3237 3241 size = MIN2(size, numa_get_groups_num());
3238 3242 for (int i = 0; i < (int)size; i++) {
3239 3243 ids[i] = numa_node_list_holder.get_node_list_entry(i);
3240 3244 }
3241 3245 return size;
3242 3246 }
3243 3247 }
3244 3248
3245 3249 bool os::get_page_info(char *start, page_info* info) {
3246 3250 return false;
3247 3251 }
3248 3252
3249 3253 char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) {
3250 3254 return end;
3251 3255 }
3252 3256
3253 3257 char* os::non_memory_address_word() {
3254 3258 // Must never look like an address returned by reserve_memory,
3255 3259 // even in its subfields (as defined by the CPU immediate fields,
3256 3260 // if the CPU splits constants across multiple instructions).
3257 3261 return (char*)-1;
3258 3262 }
3259 3263
3260 3264 #define MAX_ERROR_COUNT 100
3261 3265 #define SYS_THREAD_ERROR 0xffffffffUL
3262 3266
3263 3267 void os::pd_start_thread(Thread* thread) {
3264 3268 DWORD ret = ResumeThread(thread->osthread()->thread_handle());
3265 3269 // Returns previous suspend state:
3266 3270 // 0: Thread was not suspended
3267 3271 // 1: Thread is running now
3268 3272 // >1: Thread is still suspended.
3269 3273 assert(ret != SYS_THREAD_ERROR, "StartThread failed"); // should propagate back
3270 3274 }
3271 3275
3272 3276 class HighResolutionInterval {
3273 3277 // The default timer resolution seems to be 10 milliseconds.
3274 3278 // (Where is this written down?)
3275 3279 // If someone wants to sleep for only a fraction of the default,
3276 3280 // then we set the timer resolution down to 1 millisecond for
3277 3281 // the duration of their interval.
3278 3282 // We carefully set the resolution back, since otherwise we
3279 3283 // seem to incur an overhead (3%?) that we don't need.
3280 3284 // CONSIDER: if ms is small, say 3, then we should run with a high resolution time.
3281 3285 // Buf if ms is large, say 500, or 503, we should avoid the call to timeBeginPeriod().
3282 3286 // Alternatively, we could compute the relative error (503/500 = .6%) and only use
3283 3287 // timeBeginPeriod() if the relative error exceeded some threshold.
3284 3288 // timeBeginPeriod() has been linked to problems with clock drift on win32 systems and
3285 3289 // to decreased efficiency related to increased timer "tick" rates. We want to minimize
3286 3290 // (a) calls to timeBeginPeriod() and timeEndPeriod() and (b) time spent with high
3287 3291 // resolution timers running.
3288 3292 private:
3289 3293 jlong resolution;
3290 3294 public:
3291 3295 HighResolutionInterval(jlong ms) {
3292 3296 resolution = ms % 10L;
3293 3297 if (resolution != 0) {
3294 3298 MMRESULT result = timeBeginPeriod(1L);
3295 3299 }
3296 3300 }
3297 3301 ~HighResolutionInterval() {
3298 3302 if (resolution != 0) {
3299 3303 MMRESULT result = timeEndPeriod(1L);
3300 3304 }
3301 3305 resolution = 0L;
3302 3306 }
3303 3307 };
3304 3308
3305 3309 int os::sleep(Thread* thread, jlong ms, bool interruptable) {
3306 3310 jlong limit = (jlong) MAXDWORD;
3307 3311
3308 3312 while(ms > limit) {
3309 3313 int res;
3310 3314 if ((res = sleep(thread, limit, interruptable)) != OS_TIMEOUT)
3311 3315 return res;
3312 3316 ms -= limit;
3313 3317 }
3314 3318
3315 3319 assert(thread == Thread::current(), "thread consistency check");
3316 3320 OSThread* osthread = thread->osthread();
3317 3321 OSThreadWaitState osts(osthread, false /* not Object.wait() */);
3318 3322 int result;
3319 3323 if (interruptable) {
3320 3324 assert(thread->is_Java_thread(), "must be java thread");
3321 3325 JavaThread *jt = (JavaThread *) thread;
3322 3326 ThreadBlockInVM tbivm(jt);
3323 3327
3324 3328 jt->set_suspend_equivalent();
3325 3329 // cleared by handle_special_suspend_equivalent_condition() or
3326 3330 // java_suspend_self() via check_and_wait_while_suspended()
3327 3331
3328 3332 HANDLE events[1];
3329 3333 events[0] = osthread->interrupt_event();
3330 3334 HighResolutionInterval *phri=NULL;
3331 3335 if(!ForceTimeHighResolution)
3332 3336 phri = new HighResolutionInterval( ms );
3333 3337 if (WaitForMultipleObjects(1, events, FALSE, (DWORD)ms) == WAIT_TIMEOUT) {
3334 3338 result = OS_TIMEOUT;
3335 3339 } else {
3336 3340 ResetEvent(osthread->interrupt_event());
3337 3341 osthread->set_interrupted(false);
3338 3342 result = OS_INTRPT;
3339 3343 }
3340 3344 delete phri; //if it is NULL, harmless
3341 3345
3342 3346 // were we externally suspended while we were waiting?
3343 3347 jt->check_and_wait_while_suspended();
3344 3348 } else {
3345 3349 assert(!thread->is_Java_thread(), "must not be java thread");
3346 3350 Sleep((long) ms);
3347 3351 result = OS_TIMEOUT;
3348 3352 }
3349 3353 return result;
3350 3354 }
3351 3355
3352 3356 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
3353 3357 void os::infinite_sleep() {
3354 3358 while (true) { // sleep forever ...
3355 3359 Sleep(100000); // ... 100 seconds at a time
3356 3360 }
3357 3361 }
3358 3362
3359 3363 typedef BOOL (WINAPI * STTSignature)(void) ;
3360 3364
3361 3365 os::YieldResult os::NakedYield() {
3362 3366 // Use either SwitchToThread() or Sleep(0)
3363 3367 // Consider passing back the return value from SwitchToThread().
3364 3368 if (os::Kernel32Dll::SwitchToThreadAvailable()) {
3365 3369 return SwitchToThread() ? os::YIELD_SWITCHED : os::YIELD_NONEREADY ;
3366 3370 } else {
3367 3371 Sleep(0);
3368 3372 }
3369 3373 return os::YIELD_UNKNOWN ;
3370 3374 }
3371 3375
3372 3376 void os::yield() { os::NakedYield(); }
3373 3377
3374 3378 void os::yield_all(int attempts) {
3375 3379 // Yields to all threads, including threads with lower priorities
3376 3380 Sleep(1);
3377 3381 }
3378 3382
3379 3383 // Win32 only gives you access to seven real priorities at a time,
3380 3384 // so we compress Java's ten down to seven. It would be better
3381 3385 // if we dynamically adjusted relative priorities.
3382 3386
3383 3387 int os::java_to_os_priority[CriticalPriority + 1] = {
3384 3388 THREAD_PRIORITY_IDLE, // 0 Entry should never be used
3385 3389 THREAD_PRIORITY_LOWEST, // 1 MinPriority
3386 3390 THREAD_PRIORITY_LOWEST, // 2
3387 3391 THREAD_PRIORITY_BELOW_NORMAL, // 3
3388 3392 THREAD_PRIORITY_BELOW_NORMAL, // 4
3389 3393 THREAD_PRIORITY_NORMAL, // 5 NormPriority
3390 3394 THREAD_PRIORITY_NORMAL, // 6
3391 3395 THREAD_PRIORITY_ABOVE_NORMAL, // 7
3392 3396 THREAD_PRIORITY_ABOVE_NORMAL, // 8
3393 3397 THREAD_PRIORITY_HIGHEST, // 9 NearMaxPriority
3394 3398 THREAD_PRIORITY_HIGHEST, // 10 MaxPriority
3395 3399 THREAD_PRIORITY_HIGHEST // 11 CriticalPriority
3396 3400 };
3397 3401
3398 3402 int prio_policy1[CriticalPriority + 1] = {
3399 3403 THREAD_PRIORITY_IDLE, // 0 Entry should never be used
3400 3404 THREAD_PRIORITY_LOWEST, // 1 MinPriority
3401 3405 THREAD_PRIORITY_LOWEST, // 2
3402 3406 THREAD_PRIORITY_BELOW_NORMAL, // 3
3403 3407 THREAD_PRIORITY_BELOW_NORMAL, // 4
3404 3408 THREAD_PRIORITY_NORMAL, // 5 NormPriority
3405 3409 THREAD_PRIORITY_ABOVE_NORMAL, // 6
3406 3410 THREAD_PRIORITY_ABOVE_NORMAL, // 7
3407 3411 THREAD_PRIORITY_HIGHEST, // 8
3408 3412 THREAD_PRIORITY_HIGHEST, // 9 NearMaxPriority
3409 3413 THREAD_PRIORITY_TIME_CRITICAL, // 10 MaxPriority
3410 3414 THREAD_PRIORITY_TIME_CRITICAL // 11 CriticalPriority
3411 3415 };
3412 3416
3413 3417 static int prio_init() {
3414 3418 // If ThreadPriorityPolicy is 1, switch tables
3415 3419 if (ThreadPriorityPolicy == 1) {
3416 3420 int i;
3417 3421 for (i = 0; i < CriticalPriority + 1; i++) {
3418 3422 os::java_to_os_priority[i] = prio_policy1[i];
3419 3423 }
3420 3424 }
3421 3425 if (UseCriticalJavaThreadPriority) {
3422 3426 os::java_to_os_priority[MaxPriority] = os::java_to_os_priority[CriticalPriority] ;
3423 3427 }
3424 3428 return 0;
3425 3429 }
3426 3430
3427 3431 OSReturn os::set_native_priority(Thread* thread, int priority) {
3428 3432 if (!UseThreadPriorities) return OS_OK;
3429 3433 bool ret = SetThreadPriority(thread->osthread()->thread_handle(), priority) != 0;
3430 3434 return ret ? OS_OK : OS_ERR;
3431 3435 }
3432 3436
3433 3437 OSReturn os::get_native_priority(const Thread* const thread, int* priority_ptr) {
3434 3438 if ( !UseThreadPriorities ) {
3435 3439 *priority_ptr = java_to_os_priority[NormPriority];
3436 3440 return OS_OK;
3437 3441 }
3438 3442 int os_prio = GetThreadPriority(thread->osthread()->thread_handle());
3439 3443 if (os_prio == THREAD_PRIORITY_ERROR_RETURN) {
3440 3444 assert(false, "GetThreadPriority failed");
3441 3445 return OS_ERR;
3442 3446 }
3443 3447 *priority_ptr = os_prio;
3444 3448 return OS_OK;
3445 3449 }
3446 3450
3447 3451
3448 3452 // Hint to the underlying OS that a task switch would not be good.
3449 3453 // Void return because it's a hint and can fail.
3450 3454 void os::hint_no_preempt() {}
3451 3455
3452 3456 void os::interrupt(Thread* thread) {
3453 3457 assert(!thread->is_Java_thread() || Thread::current() == thread || Threads_lock->owned_by_self(),
3454 3458 "possibility of dangling Thread pointer");
3455 3459
3456 3460 OSThread* osthread = thread->osthread();
3457 3461 osthread->set_interrupted(true);
3458 3462 // More than one thread can get here with the same value of osthread,
3459 3463 // resulting in multiple notifications. We do, however, want the store
3460 3464 // to interrupted() to be visible to other threads before we post
3461 3465 // the interrupt event.
3462 3466 OrderAccess::release();
3463 3467 SetEvent(osthread->interrupt_event());
3464 3468 // For JSR166: unpark after setting status
3465 3469 if (thread->is_Java_thread())
3466 3470 ((JavaThread*)thread)->parker()->unpark();
3467 3471
3468 3472 ParkEvent * ev = thread->_ParkEvent ;
3469 3473 if (ev != NULL) ev->unpark() ;
3470 3474
3471 3475 }
3472 3476
3473 3477
3474 3478 bool os::is_interrupted(Thread* thread, bool clear_interrupted) {
3475 3479 assert(!thread->is_Java_thread() || Thread::current() == thread || Threads_lock->owned_by_self(),
3476 3480 "possibility of dangling Thread pointer");
3477 3481
3478 3482 OSThread* osthread = thread->osthread();
3479 3483 bool interrupted = osthread->interrupted();
3480 3484 // There is no synchronization between the setting of the interrupt
3481 3485 // and it being cleared here. It is critical - see 6535709 - that
3482 3486 // we only clear the interrupt state, and reset the interrupt event,
3483 3487 // if we are going to report that we were indeed interrupted - else
3484 3488 // an interrupt can be "lost", leading to spurious wakeups or lost wakeups
3485 3489 // depending on the timing
3486 3490 if (interrupted && clear_interrupted) {
3487 3491 osthread->set_interrupted(false);
3488 3492 ResetEvent(osthread->interrupt_event());
3489 3493 } // Otherwise leave the interrupted state alone
3490 3494
3491 3495 return interrupted;
3492 3496 }
3493 3497
3494 3498 // Get's a pc (hint) for a running thread. Currently used only for profiling.
3495 3499 ExtendedPC os::get_thread_pc(Thread* thread) {
3496 3500 CONTEXT context;
3497 3501 context.ContextFlags = CONTEXT_CONTROL;
3498 3502 HANDLE handle = thread->osthread()->thread_handle();
3499 3503 #ifdef _M_IA64
3500 3504 assert(0, "Fix get_thread_pc");
3501 3505 return ExtendedPC(NULL);
3502 3506 #else
3503 3507 if (GetThreadContext(handle, &context)) {
3504 3508 #ifdef _M_AMD64
3505 3509 return ExtendedPC((address) context.Rip);
3506 3510 #else
3507 3511 return ExtendedPC((address) context.Eip);
3508 3512 #endif
3509 3513 } else {
3510 3514 return ExtendedPC(NULL);
3511 3515 }
3512 3516 #endif
3513 3517 }
3514 3518
3515 3519 // GetCurrentThreadId() returns DWORD
3516 3520 intx os::current_thread_id() { return GetCurrentThreadId(); }
3517 3521
3518 3522 static int _initial_pid = 0;
3519 3523
3520 3524 int os::current_process_id()
3521 3525 {
3522 3526 return (_initial_pid ? _initial_pid : _getpid());
3523 3527 }
3524 3528
3525 3529 int os::win32::_vm_page_size = 0;
3526 3530 int os::win32::_vm_allocation_granularity = 0;
3527 3531 int os::win32::_processor_type = 0;
3528 3532 // Processor level is not available on non-NT systems, use vm_version instead
3529 3533 int os::win32::_processor_level = 0;
3530 3534 julong os::win32::_physical_memory = 0;
3531 3535 size_t os::win32::_default_stack_size = 0;
3532 3536
3533 3537 intx os::win32::_os_thread_limit = 0;
3534 3538 volatile intx os::win32::_os_thread_count = 0;
3535 3539
3536 3540 bool os::win32::_is_nt = false;
3537 3541 bool os::win32::_is_windows_2003 = false;
3538 3542 bool os::win32::_is_windows_server = false;
3539 3543
3540 3544 void os::win32::initialize_system_info() {
3541 3545 SYSTEM_INFO si;
3542 3546 GetSystemInfo(&si);
3543 3547 _vm_page_size = si.dwPageSize;
3544 3548 _vm_allocation_granularity = si.dwAllocationGranularity;
3545 3549 _processor_type = si.dwProcessorType;
3546 3550 _processor_level = si.wProcessorLevel;
3547 3551 set_processor_count(si.dwNumberOfProcessors);
3548 3552
3549 3553 MEMORYSTATUSEX ms;
3550 3554 ms.dwLength = sizeof(ms);
3551 3555
3552 3556 // also returns dwAvailPhys (free physical memory bytes), dwTotalVirtual, dwAvailVirtual,
3553 3557 // dwMemoryLoad (% of memory in use)
3554 3558 GlobalMemoryStatusEx(&ms);
3555 3559 _physical_memory = ms.ullTotalPhys;
3556 3560
3557 3561 OSVERSIONINFOEX oi;
3558 3562 oi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
3559 3563 GetVersionEx((OSVERSIONINFO*)&oi);
3560 3564 switch(oi.dwPlatformId) {
3561 3565 case VER_PLATFORM_WIN32_WINDOWS: _is_nt = false; break;
3562 3566 case VER_PLATFORM_WIN32_NT:
3563 3567 _is_nt = true;
3564 3568 {
3565 3569 int os_vers = oi.dwMajorVersion * 1000 + oi.dwMinorVersion;
3566 3570 if (os_vers == 5002) {
3567 3571 _is_windows_2003 = true;
3568 3572 }
3569 3573 if (oi.wProductType == VER_NT_DOMAIN_CONTROLLER ||
3570 3574 oi.wProductType == VER_NT_SERVER) {
3571 3575 _is_windows_server = true;
3572 3576 }
3573 3577 }
3574 3578 break;
3575 3579 default: fatal("Unknown platform");
3576 3580 }
3577 3581
3578 3582 _default_stack_size = os::current_stack_size();
3579 3583 assert(_default_stack_size > (size_t) _vm_page_size, "invalid stack size");
3580 3584 assert((_default_stack_size & (_vm_page_size - 1)) == 0,
3581 3585 "stack size not a multiple of page size");
3582 3586
3583 3587 initialize_performance_counter();
3584 3588
3585 3589 // Win95/Win98 scheduler bug work-around. The Win95/98 scheduler is
3586 3590 // known to deadlock the system, if the VM issues to thread operations with
3587 3591 // a too high frequency, e.g., such as changing the priorities.
3588 3592 // The 6000 seems to work well - no deadlocks has been notices on the test
3589 3593 // programs that we have seen experience this problem.
3590 3594 if (!os::win32::is_nt()) {
3591 3595 StarvationMonitorInterval = 6000;
3592 3596 }
3593 3597 }
3594 3598
3595 3599
3596 3600 HINSTANCE os::win32::load_Windows_dll(const char* name, char *ebuf, int ebuflen) {
3597 3601 char path[MAX_PATH];
3598 3602 DWORD size;
3599 3603 DWORD pathLen = (DWORD)sizeof(path);
3600 3604 HINSTANCE result = NULL;
3601 3605
3602 3606 // only allow library name without path component
3603 3607 assert(strchr(name, '\\') == NULL, "path not allowed");
3604 3608 assert(strchr(name, ':') == NULL, "path not allowed");
3605 3609 if (strchr(name, '\\') != NULL || strchr(name, ':') != NULL) {
3606 3610 jio_snprintf(ebuf, ebuflen,
3607 3611 "Invalid parameter while calling os::win32::load_windows_dll(): cannot take path: %s", name);
3608 3612 return NULL;
3609 3613 }
3610 3614
3611 3615 // search system directory
3612 3616 if ((size = GetSystemDirectory(path, pathLen)) > 0) {
3613 3617 strcat(path, "\\");
3614 3618 strcat(path, name);
3615 3619 if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) {
3616 3620 return result;
3617 3621 }
3618 3622 }
3619 3623
3620 3624 // try Windows directory
3621 3625 if ((size = GetWindowsDirectory(path, pathLen)) > 0) {
3622 3626 strcat(path, "\\");
3623 3627 strcat(path, name);
3624 3628 if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) {
3625 3629 return result;
3626 3630 }
3627 3631 }
3628 3632
3629 3633 jio_snprintf(ebuf, ebuflen,
3630 3634 "os::win32::load_windows_dll() cannot load %s from system directories.", name);
3631 3635 return NULL;
3632 3636 }
3633 3637
3634 3638 void os::win32::setmode_streams() {
3635 3639 _setmode(_fileno(stdin), _O_BINARY);
3636 3640 _setmode(_fileno(stdout), _O_BINARY);
3637 3641 _setmode(_fileno(stderr), _O_BINARY);
3638 3642 }
3639 3643
3640 3644
3641 3645 bool os::is_debugger_attached() {
3642 3646 return IsDebuggerPresent() ? true : false;
3643 3647 }
3644 3648
3645 3649
3646 3650 void os::wait_for_keypress_at_exit(void) {
3647 3651 if (PauseAtExit) {
3648 3652 fprintf(stderr, "Press any key to continue...\n");
3649 3653 fgetc(stdin);
3650 3654 }
3651 3655 }
3652 3656
3653 3657
3654 3658 int os::message_box(const char* title, const char* message) {
3655 3659 int result = MessageBox(NULL, message, title,
3656 3660 MB_YESNO | MB_ICONERROR | MB_SYSTEMMODAL | MB_DEFAULT_DESKTOP_ONLY);
3657 3661 return result == IDYES;
3658 3662 }
3659 3663
3660 3664 int os::allocate_thread_local_storage() {
3661 3665 return TlsAlloc();
3662 3666 }
3663 3667
3664 3668
3665 3669 void os::free_thread_local_storage(int index) {
3666 3670 TlsFree(index);
3667 3671 }
3668 3672
3669 3673
3670 3674 void os::thread_local_storage_at_put(int index, void* value) {
3671 3675 TlsSetValue(index, value);
3672 3676 assert(thread_local_storage_at(index) == value, "Just checking");
3673 3677 }
3674 3678
3675 3679
3676 3680 void* os::thread_local_storage_at(int index) {
3677 3681 return TlsGetValue(index);
3678 3682 }
3679 3683
3680 3684
3681 3685 #ifndef PRODUCT
3682 3686 #ifndef _WIN64
3683 3687 // Helpers to check whether NX protection is enabled
3684 3688 int nx_exception_filter(_EXCEPTION_POINTERS *pex) {
3685 3689 if (pex->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION &&
3686 3690 pex->ExceptionRecord->NumberParameters > 0 &&
3687 3691 pex->ExceptionRecord->ExceptionInformation[0] ==
3688 3692 EXCEPTION_INFO_EXEC_VIOLATION) {
3689 3693 return EXCEPTION_EXECUTE_HANDLER;
3690 3694 }
3691 3695 return EXCEPTION_CONTINUE_SEARCH;
3692 3696 }
3693 3697
3694 3698 void nx_check_protection() {
3695 3699 // If NX is enabled we'll get an exception calling into code on the stack
3696 3700 char code[] = { (char)0xC3 }; // ret
3697 3701 void *code_ptr = (void *)code;
3698 3702 __try {
3699 3703 __asm call code_ptr
3700 3704 } __except(nx_exception_filter((_EXCEPTION_POINTERS*)_exception_info())) {
3701 3705 tty->print_raw_cr("NX protection detected.");
3702 3706 }
3703 3707 }
3704 3708 #endif // _WIN64
3705 3709 #endif // PRODUCT
3706 3710
3707 3711 // this is called _before_ the global arguments have been parsed
3708 3712 void os::init(void) {
3709 3713 _initial_pid = _getpid();
3710 3714
3711 3715 init_random(1234567);
3712 3716
3713 3717 win32::initialize_system_info();
3714 3718 win32::setmode_streams();
3715 3719 init_page_sizes((size_t) win32::vm_page_size());
3716 3720
3717 3721 // For better scalability on MP systems (must be called after initialize_system_info)
3718 3722 #ifndef PRODUCT
3719 3723 if (is_MP()) {
3720 3724 NoYieldsInMicrolock = true;
3721 3725 }
3722 3726 #endif
3723 3727 // This may be overridden later when argument processing is done.
3724 3728 FLAG_SET_ERGO(bool, UseLargePagesIndividualAllocation,
3725 3729 os::win32::is_windows_2003());
3726 3730
3727 3731 // Initialize main_process and main_thread
3728 3732 main_process = GetCurrentProcess(); // Remember main_process is a pseudo handle
3729 3733 if (!DuplicateHandle(main_process, GetCurrentThread(), main_process,
3730 3734 &main_thread, THREAD_ALL_ACCESS, false, 0)) {
3731 3735 fatal("DuplicateHandle failed\n");
3732 3736 }
3733 3737 main_thread_id = (int) GetCurrentThreadId();
3734 3738 }
3735 3739
3736 3740 // To install functions for atexit processing
3737 3741 extern "C" {
3738 3742 static void perfMemory_exit_helper() {
3739 3743 perfMemory_exit();
3740 3744 }
3741 3745 }
3742 3746
3743 3747 // this is called _after_ the global arguments have been parsed
3744 3748 jint os::init_2(void) {
3745 3749 // Allocate a single page and mark it as readable for safepoint polling
3746 3750 address polling_page = (address)VirtualAlloc(NULL, os::vm_page_size(), MEM_RESERVE, PAGE_READONLY);
3747 3751 guarantee( polling_page != NULL, "Reserve Failed for polling page");
3748 3752
3749 3753 address return_page = (address)VirtualAlloc(polling_page, os::vm_page_size(), MEM_COMMIT, PAGE_READONLY);
3750 3754 guarantee( return_page != NULL, "Commit Failed for polling page");
3751 3755
3752 3756 os::set_polling_page( polling_page );
3753 3757
3754 3758 #ifndef PRODUCT
3755 3759 if( Verbose && PrintMiscellaneous )
3756 3760 tty->print("[SafePoint Polling address: " INTPTR_FORMAT "]\n", (intptr_t)polling_page);
3757 3761 #endif
3758 3762
3759 3763 if (!UseMembar) {
3760 3764 address mem_serialize_page = (address)VirtualAlloc(NULL, os::vm_page_size(), MEM_RESERVE, PAGE_READWRITE);
3761 3765 guarantee( mem_serialize_page != NULL, "Reserve Failed for memory serialize page");
3762 3766
3763 3767 return_page = (address)VirtualAlloc(mem_serialize_page, os::vm_page_size(), MEM_COMMIT, PAGE_READWRITE);
3764 3768 guarantee( return_page != NULL, "Commit Failed for memory serialize page");
3765 3769
3766 3770 os::set_memory_serialize_page( mem_serialize_page );
3767 3771
3768 3772 #ifndef PRODUCT
3769 3773 if(Verbose && PrintMiscellaneous)
3770 3774 tty->print("[Memory Serialize Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page);
3771 3775 #endif
3772 3776 }
3773 3777
3774 3778 os::large_page_init();
3775 3779
3776 3780 // Setup Windows Exceptions
3777 3781
3778 3782 // On Itanium systems, Structured Exception Handling does not
3779 3783 // work since stack frames must be walkable by the OS. Since
3780 3784 // much of our code is dynamically generated, and we do not have
3781 3785 // proper unwind .xdata sections, the system simply exits
3782 3786 // rather than delivering the exception. To work around
3783 3787 // this we use VectorExceptions instead.
3784 3788 #ifdef _WIN64
3785 3789 if (UseVectoredExceptions) {
3786 3790 topLevelVectoredExceptionHandler = AddVectoredExceptionHandler( 1, topLevelExceptionFilter);
3787 3791 }
3788 3792 #endif
3789 3793
3790 3794 // for debugging float code generation bugs
3791 3795 if (ForceFloatExceptions) {
3792 3796 #ifndef _WIN64
3793 3797 static long fp_control_word = 0;
3794 3798 __asm { fstcw fp_control_word }
3795 3799 // see Intel PPro Manual, Vol. 2, p 7-16
3796 3800 const long precision = 0x20;
3797 3801 const long underflow = 0x10;
3798 3802 const long overflow = 0x08;
3799 3803 const long zero_div = 0x04;
3800 3804 const long denorm = 0x02;
3801 3805 const long invalid = 0x01;
3802 3806 fp_control_word |= invalid;
3803 3807 __asm { fldcw fp_control_word }
3804 3808 #endif
3805 3809 }
3806 3810
3807 3811 // If stack_commit_size is 0, windows will reserve the default size,
3808 3812 // but only commit a small portion of it.
3809 3813 size_t stack_commit_size = round_to(ThreadStackSize*K, os::vm_page_size());
3810 3814 size_t default_reserve_size = os::win32::default_stack_size();
3811 3815 size_t actual_reserve_size = stack_commit_size;
3812 3816 if (stack_commit_size < default_reserve_size) {
3813 3817 // If stack_commit_size == 0, we want this too
3814 3818 actual_reserve_size = default_reserve_size;
3815 3819 }
3816 3820
3817 3821 // Check minimum allowable stack size for thread creation and to initialize
3818 3822 // the java system classes, including StackOverflowError - depends on page
3819 3823 // size. Add a page for compiler2 recursion in main thread.
3820 3824 // Add in 2*BytesPerWord times page size to account for VM stack during
3821 3825 // class initialization depending on 32 or 64 bit VM.
3822 3826 size_t min_stack_allowed =
3823 3827 (size_t)(StackYellowPages+StackRedPages+StackShadowPages+
3824 3828 2*BytesPerWord COMPILER2_PRESENT(+1)) * os::vm_page_size();
3825 3829 if (actual_reserve_size < min_stack_allowed) {
3826 3830 tty->print_cr("\nThe stack size specified is too small, "
3827 3831 "Specify at least %dk",
3828 3832 min_stack_allowed / K);
3829 3833 return JNI_ERR;
3830 3834 }
3831 3835
3832 3836 JavaThread::set_stack_size_at_create(stack_commit_size);
3833 3837
3834 3838 // Calculate theoretical max. size of Threads to guard gainst artifical
3835 3839 // out-of-memory situations, where all available address-space has been
3836 3840 // reserved by thread stacks.
3837 3841 assert(actual_reserve_size != 0, "Must have a stack");
3838 3842
3839 3843 // Calculate the thread limit when we should start doing Virtual Memory
3840 3844 // banging. Currently when the threads will have used all but 200Mb of space.
3841 3845 //
3842 3846 // TODO: consider performing a similar calculation for commit size instead
3843 3847 // as reserve size, since on a 64-bit platform we'll run into that more
3844 3848 // often than running out of virtual memory space. We can use the
3845 3849 // lower value of the two calculations as the os_thread_limit.
3846 3850 size_t max_address_space = ((size_t)1 << (BitsPerWord - 1)) - (200 * K * K);
3847 3851 win32::_os_thread_limit = (intx)(max_address_space / actual_reserve_size);
3848 3852
3849 3853 // at exit methods are called in the reverse order of their registration.
3850 3854 // there is no limit to the number of functions registered. atexit does
3851 3855 // not set errno.
3852 3856
3853 3857 if (PerfAllowAtExitRegistration) {
3854 3858 // only register atexit functions if PerfAllowAtExitRegistration is set.
3855 3859 // atexit functions can be delayed until process exit time, which
3856 3860 // can be problematic for embedded VM situations. Embedded VMs should
3857 3861 // call DestroyJavaVM() to assure that VM resources are released.
3858 3862
3859 3863 // note: perfMemory_exit_helper atexit function may be removed in
3860 3864 // the future if the appropriate cleanup code can be added to the
3861 3865 // VM_Exit VMOperation's doit method.
3862 3866 if (atexit(perfMemory_exit_helper) != 0) {
3863 3867 warning("os::init_2 atexit(perfMemory_exit_helper) failed");
3864 3868 }
3865 3869 }
3866 3870
3867 3871 #ifndef _WIN64
3868 3872 // Print something if NX is enabled (win32 on AMD64)
3869 3873 NOT_PRODUCT(if (PrintMiscellaneous && Verbose) nx_check_protection());
3870 3874 #endif
3871 3875
3872 3876 // initialize thread priority policy
3873 3877 prio_init();
3874 3878
3875 3879 if (UseNUMA && !ForceNUMA) {
3876 3880 UseNUMA = false; // We don't fully support this yet
3877 3881 }
3878 3882
3879 3883 if (UseNUMAInterleaving) {
3880 3884 // first check whether this Windows OS supports VirtualAllocExNuma, if not ignore this flag
3881 3885 bool success = numa_interleaving_init();
3882 3886 if (!success) UseNUMAInterleaving = false;
3883 3887 }
3884 3888
3885 3889 return JNI_OK;
3886 3890 }
3887 3891
3888 3892 void os::init_3(void) {
3889 3893 return;
3890 3894 }
3891 3895
3892 3896 // Mark the polling page as unreadable
3893 3897 void os::make_polling_page_unreadable(void) {
3894 3898 DWORD old_status;
3895 3899 if( !VirtualProtect((char *)_polling_page, os::vm_page_size(), PAGE_NOACCESS, &old_status) )
3896 3900 fatal("Could not disable polling page");
3897 3901 };
3898 3902
3899 3903 // Mark the polling page as readable
3900 3904 void os::make_polling_page_readable(void) {
3901 3905 DWORD old_status;
3902 3906 if( !VirtualProtect((char *)_polling_page, os::vm_page_size(), PAGE_READONLY, &old_status) )
3903 3907 fatal("Could not enable polling page");
3904 3908 };
3905 3909
3906 3910
3907 3911 int os::stat(const char *path, struct stat *sbuf) {
3908 3912 char pathbuf[MAX_PATH];
3909 3913 if (strlen(path) > MAX_PATH - 1) {
3910 3914 errno = ENAMETOOLONG;
3911 3915 return -1;
3912 3916 }
3913 3917 os::native_path(strcpy(pathbuf, path));
3914 3918 int ret = ::stat(pathbuf, sbuf);
3915 3919 if (sbuf != NULL && UseUTCFileTimestamp) {
3916 3920 // Fix for 6539723. st_mtime returned from stat() is dependent on
3917 3921 // the system timezone and so can return different values for the
3918 3922 // same file if/when daylight savings time changes. This adjustment
3919 3923 // makes sure the same timestamp is returned regardless of the TZ.
3920 3924 //
3921 3925 // See:
3922 3926 // http://msdn.microsoft.com/library/
3923 3927 // default.asp?url=/library/en-us/sysinfo/base/
3924 3928 // time_zone_information_str.asp
3925 3929 // and
3926 3930 // http://msdn.microsoft.com/library/default.asp?url=
3927 3931 // /library/en-us/sysinfo/base/settimezoneinformation.asp
3928 3932 //
3929 3933 // NOTE: there is a insidious bug here: If the timezone is changed
3930 3934 // after the call to stat() but before 'GetTimeZoneInformation()', then
3931 3935 // the adjustment we do here will be wrong and we'll return the wrong
3932 3936 // value (which will likely end up creating an invalid class data
3933 3937 // archive). Absent a better API for this, or some time zone locking
3934 3938 // mechanism, we'll have to live with this risk.
3935 3939 TIME_ZONE_INFORMATION tz;
3936 3940 DWORD tzid = GetTimeZoneInformation(&tz);
3937 3941 int daylightBias =
3938 3942 (tzid == TIME_ZONE_ID_DAYLIGHT) ? tz.DaylightBias : tz.StandardBias;
3939 3943 sbuf->st_mtime += (tz.Bias + daylightBias) * 60;
3940 3944 }
3941 3945 return ret;
3942 3946 }
3943 3947
3944 3948
3945 3949 #define FT2INT64(ft) \
3946 3950 ((jlong)((jlong)(ft).dwHighDateTime << 32 | (julong)(ft).dwLowDateTime))
3947 3951
3948 3952
3949 3953 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
3950 3954 // are used by JVM M&M and JVMTI to get user+sys or user CPU time
3951 3955 // of a thread.
3952 3956 //
3953 3957 // current_thread_cpu_time() and thread_cpu_time(Thread*) returns
3954 3958 // the fast estimate available on the platform.
3955 3959
3956 3960 // current_thread_cpu_time() is not optimized for Windows yet
3957 3961 jlong os::current_thread_cpu_time() {
3958 3962 // return user + sys since the cost is the same
3959 3963 return os::thread_cpu_time(Thread::current(), true /* user+sys */);
3960 3964 }
3961 3965
3962 3966 jlong os::thread_cpu_time(Thread* thread) {
3963 3967 // consistent with what current_thread_cpu_time() returns.
3964 3968 return os::thread_cpu_time(thread, true /* user+sys */);
3965 3969 }
3966 3970
3967 3971 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
3968 3972 return os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
3969 3973 }
3970 3974
3971 3975 jlong os::thread_cpu_time(Thread* thread, bool user_sys_cpu_time) {
3972 3976 // This code is copy from clasic VM -> hpi::sysThreadCPUTime
3973 3977 // If this function changes, os::is_thread_cpu_time_supported() should too
3974 3978 if (os::win32::is_nt()) {
3975 3979 FILETIME CreationTime;
3976 3980 FILETIME ExitTime;
3977 3981 FILETIME KernelTime;
3978 3982 FILETIME UserTime;
3979 3983
3980 3984 if ( GetThreadTimes(thread->osthread()->thread_handle(),
3981 3985 &CreationTime, &ExitTime, &KernelTime, &UserTime) == 0)
3982 3986 return -1;
3983 3987 else
3984 3988 if (user_sys_cpu_time) {
3985 3989 return (FT2INT64(UserTime) + FT2INT64(KernelTime)) * 100;
3986 3990 } else {
3987 3991 return FT2INT64(UserTime) * 100;
3988 3992 }
3989 3993 } else {
3990 3994 return (jlong) timeGetTime() * 1000000;
3991 3995 }
3992 3996 }
3993 3997
3994 3998 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
3995 3999 info_ptr->max_value = ALL_64_BITS; // the max value -- all 64 bits
3996 4000 info_ptr->may_skip_backward = false; // GetThreadTimes returns absolute time
3997 4001 info_ptr->may_skip_forward = false; // GetThreadTimes returns absolute time
3998 4002 info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned
3999 4003 }
4000 4004
4001 4005 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4002 4006 info_ptr->max_value = ALL_64_BITS; // the max value -- all 64 bits
4003 4007 info_ptr->may_skip_backward = false; // GetThreadTimes returns absolute time
4004 4008 info_ptr->may_skip_forward = false; // GetThreadTimes returns absolute time
4005 4009 info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned
4006 4010 }
4007 4011
4008 4012 bool os::is_thread_cpu_time_supported() {
4009 4013 // see os::thread_cpu_time
4010 4014 if (os::win32::is_nt()) {
4011 4015 FILETIME CreationTime;
4012 4016 FILETIME ExitTime;
4013 4017 FILETIME KernelTime;
4014 4018 FILETIME UserTime;
4015 4019
4016 4020 if ( GetThreadTimes(GetCurrentThread(),
4017 4021 &CreationTime, &ExitTime, &KernelTime, &UserTime) == 0)
4018 4022 return false;
4019 4023 else
4020 4024 return true;
4021 4025 } else {
4022 4026 return false;
4023 4027 }
4024 4028 }
4025 4029
4026 4030 // Windows does't provide a loadavg primitive so this is stubbed out for now.
4027 4031 // It does have primitives (PDH API) to get CPU usage and run queue length.
4028 4032 // "\\Processor(_Total)\\% Processor Time", "\\System\\Processor Queue Length"
4029 4033 // If we wanted to implement loadavg on Windows, we have a few options:
4030 4034 //
4031 4035 // a) Query CPU usage and run queue length and "fake" an answer by
4032 4036 // returning the CPU usage if it's under 100%, and the run queue
4033 4037 // length otherwise. It turns out that querying is pretty slow
4034 4038 // on Windows, on the order of 200 microseconds on a fast machine.
4035 4039 // Note that on the Windows the CPU usage value is the % usage
4036 4040 // since the last time the API was called (and the first call
4037 4041 // returns 100%), so we'd have to deal with that as well.
4038 4042 //
4039 4043 // b) Sample the "fake" answer using a sampling thread and store
4040 4044 // the answer in a global variable. The call to loadavg would
4041 4045 // just return the value of the global, avoiding the slow query.
4042 4046 //
4043 4047 // c) Sample a better answer using exponential decay to smooth the
4044 4048 // value. This is basically the algorithm used by UNIX kernels.
4045 4049 //
4046 4050 // Note that sampling thread starvation could affect both (b) and (c).
4047 4051 int os::loadavg(double loadavg[], int nelem) {
4048 4052 return -1;
4049 4053 }
4050 4054
4051 4055
4052 4056 // DontYieldALot=false by default: dutifully perform all yields as requested by JVM_Yield()
4053 4057 bool os::dont_yield() {
4054 4058 return DontYieldALot;
4055 4059 }
4056 4060
4057 4061 // This method is a slightly reworked copy of JDK's sysOpen
4058 4062 // from src/windows/hpi/src/sys_api_md.c
4059 4063
4060 4064 int os::open(const char *path, int oflag, int mode) {
4061 4065 char pathbuf[MAX_PATH];
4062 4066
4063 4067 if (strlen(path) > MAX_PATH - 1) {
4064 4068 errno = ENAMETOOLONG;
4065 4069 return -1;
4066 4070 }
4067 4071 os::native_path(strcpy(pathbuf, path));
4068 4072 return ::open(pathbuf, oflag | O_BINARY | O_NOINHERIT, mode);
4069 4073 }
4070 4074
4071 4075 // Is a (classpath) directory empty?
4072 4076 bool os::dir_is_empty(const char* path) {
4073 4077 WIN32_FIND_DATA fd;
4074 4078 HANDLE f = FindFirstFile(path, &fd);
4075 4079 if (f == INVALID_HANDLE_VALUE) {
4076 4080 return true;
4077 4081 }
4078 4082 FindClose(f);
4079 4083 return false;
4080 4084 }
4081 4085
4082 4086 // create binary file, rewriting existing file if required
4083 4087 int os::create_binary_file(const char* path, bool rewrite_existing) {
4084 4088 int oflags = _O_CREAT | _O_WRONLY | _O_BINARY;
4085 4089 if (!rewrite_existing) {
4086 4090 oflags |= _O_EXCL;
4087 4091 }
4088 4092 return ::open(path, oflags, _S_IREAD | _S_IWRITE);
4089 4093 }
4090 4094
4091 4095 // return current position of file pointer
4092 4096 jlong os::current_file_offset(int fd) {
4093 4097 return (jlong)::_lseeki64(fd, (__int64)0L, SEEK_CUR);
4094 4098 }
4095 4099
4096 4100 // move file pointer to the specified offset
4097 4101 jlong os::seek_to_file_offset(int fd, jlong offset) {
4098 4102 return (jlong)::_lseeki64(fd, (__int64)offset, SEEK_SET);
4099 4103 }
4100 4104
4101 4105
4102 4106 jlong os::lseek(int fd, jlong offset, int whence) {
4103 4107 return (jlong) ::_lseeki64(fd, offset, whence);
4104 4108 }
4105 4109
4106 4110 // This method is a slightly reworked copy of JDK's sysNativePath
4107 4111 // from src/windows/hpi/src/path_md.c
4108 4112
4109 4113 /* Convert a pathname to native format. On win32, this involves forcing all
4110 4114 separators to be '\\' rather than '/' (both are legal inputs, but Win95
4111 4115 sometimes rejects '/') and removing redundant separators. The input path is
4112 4116 assumed to have been converted into the character encoding used by the local
4113 4117 system. Because this might be a double-byte encoding, care is taken to
4114 4118 treat double-byte lead characters correctly.
4115 4119
4116 4120 This procedure modifies the given path in place, as the result is never
4117 4121 longer than the original. There is no error return; this operation always
4118 4122 succeeds. */
4119 4123 char * os::native_path(char *path) {
4120 4124 char *src = path, *dst = path, *end = path;
4121 4125 char *colon = NULL; /* If a drive specifier is found, this will
4122 4126 point to the colon following the drive
4123 4127 letter */
4124 4128
4125 4129 /* Assumption: '/', '\\', ':', and drive letters are never lead bytes */
4126 4130 assert(((!::IsDBCSLeadByte('/'))
4127 4131 && (!::IsDBCSLeadByte('\\'))
4128 4132 && (!::IsDBCSLeadByte(':'))),
4129 4133 "Illegal lead byte");
4130 4134
4131 4135 /* Check for leading separators */
4132 4136 #define isfilesep(c) ((c) == '/' || (c) == '\\')
4133 4137 while (isfilesep(*src)) {
4134 4138 src++;
4135 4139 }
4136 4140
4137 4141 if (::isalpha(*src) && !::IsDBCSLeadByte(*src) && src[1] == ':') {
4138 4142 /* Remove leading separators if followed by drive specifier. This
4139 4143 hack is necessary to support file URLs containing drive
4140 4144 specifiers (e.g., "file://c:/path"). As a side effect,
4141 4145 "/c:/path" can be used as an alternative to "c:/path". */
4142 4146 *dst++ = *src++;
4143 4147 colon = dst;
4144 4148 *dst++ = ':';
4145 4149 src++;
4146 4150 } else {
4147 4151 src = path;
4148 4152 if (isfilesep(src[0]) && isfilesep(src[1])) {
4149 4153 /* UNC pathname: Retain first separator; leave src pointed at
4150 4154 second separator so that further separators will be collapsed
4151 4155 into the second separator. The result will be a pathname
4152 4156 beginning with "\\\\" followed (most likely) by a host name. */
4153 4157 src = dst = path + 1;
4154 4158 path[0] = '\\'; /* Force first separator to '\\' */
4155 4159 }
4156 4160 }
4157 4161
4158 4162 end = dst;
4159 4163
4160 4164 /* Remove redundant separators from remainder of path, forcing all
4161 4165 separators to be '\\' rather than '/'. Also, single byte space
4162 4166 characters are removed from the end of the path because those
4163 4167 are not legal ending characters on this operating system.
4164 4168 */
4165 4169 while (*src != '\0') {
4166 4170 if (isfilesep(*src)) {
4167 4171 *dst++ = '\\'; src++;
4168 4172 while (isfilesep(*src)) src++;
4169 4173 if (*src == '\0') {
4170 4174 /* Check for trailing separator */
4171 4175 end = dst;
4172 4176 if (colon == dst - 2) break; /* "z:\\" */
4173 4177 if (dst == path + 1) break; /* "\\" */
4174 4178 if (dst == path + 2 && isfilesep(path[0])) {
4175 4179 /* "\\\\" is not collapsed to "\\" because "\\\\" marks the
4176 4180 beginning of a UNC pathname. Even though it is not, by
4177 4181 itself, a valid UNC pathname, we leave it as is in order
4178 4182 to be consistent with the path canonicalizer as well
4179 4183 as the win32 APIs, which treat this case as an invalid
4180 4184 UNC pathname rather than as an alias for the root
4181 4185 directory of the current drive. */
4182 4186 break;
4183 4187 }
4184 4188 end = --dst; /* Path does not denote a root directory, so
4185 4189 remove trailing separator */
4186 4190 break;
4187 4191 }
4188 4192 end = dst;
4189 4193 } else {
4190 4194 if (::IsDBCSLeadByte(*src)) { /* Copy a double-byte character */
4191 4195 *dst++ = *src++;
4192 4196 if (*src) *dst++ = *src++;
4193 4197 end = dst;
4194 4198 } else { /* Copy a single-byte character */
4195 4199 char c = *src++;
4196 4200 *dst++ = c;
4197 4201 /* Space is not a legal ending character */
4198 4202 if (c != ' ') end = dst;
4199 4203 }
4200 4204 }
4201 4205 }
4202 4206
4203 4207 *end = '\0';
4204 4208
4205 4209 /* For "z:", add "." to work around a bug in the C runtime library */
4206 4210 if (colon == dst - 1) {
4207 4211 path[2] = '.';
4208 4212 path[3] = '\0';
4209 4213 }
4210 4214
4211 4215 #ifdef DEBUG
4212 4216 jio_fprintf(stderr, "sysNativePath: %s\n", path);
4213 4217 #endif DEBUG
4214 4218 return path;
4215 4219 }
4216 4220
4217 4221 // This code is a copy of JDK's sysSetLength
4218 4222 // from src/windows/hpi/src/sys_api_md.c
4219 4223
4220 4224 int os::ftruncate(int fd, jlong length) {
4221 4225 HANDLE h = (HANDLE)::_get_osfhandle(fd);
4222 4226 long high = (long)(length >> 32);
4223 4227 DWORD ret;
4224 4228
4225 4229 if (h == (HANDLE)(-1)) {
4226 4230 return -1;
4227 4231 }
4228 4232
4229 4233 ret = ::SetFilePointer(h, (long)(length), &high, FILE_BEGIN);
4230 4234 if ((ret == 0xFFFFFFFF) && (::GetLastError() != NO_ERROR)) {
4231 4235 return -1;
4232 4236 }
4233 4237
4234 4238 if (::SetEndOfFile(h) == FALSE) {
4235 4239 return -1;
4236 4240 }
4237 4241
4238 4242 return 0;
4239 4243 }
4240 4244
4241 4245
4242 4246 // This code is a copy of JDK's sysSync
4243 4247 // from src/windows/hpi/src/sys_api_md.c
4244 4248 // except for the legacy workaround for a bug in Win 98
4245 4249
4246 4250 int os::fsync(int fd) {
4247 4251 HANDLE handle = (HANDLE)::_get_osfhandle(fd);
4248 4252
4249 4253 if ( (!::FlushFileBuffers(handle)) &&
4250 4254 (GetLastError() != ERROR_ACCESS_DENIED) ) {
4251 4255 /* from winerror.h */
4252 4256 return -1;
4253 4257 }
4254 4258 return 0;
4255 4259 }
4256 4260
4257 4261 static int nonSeekAvailable(int, long *);
4258 4262 static int stdinAvailable(int, long *);
4259 4263
4260 4264 #define S_ISCHR(mode) (((mode) & _S_IFCHR) == _S_IFCHR)
4261 4265 #define S_ISFIFO(mode) (((mode) & _S_IFIFO) == _S_IFIFO)
4262 4266
4263 4267 // This code is a copy of JDK's sysAvailable
4264 4268 // from src/windows/hpi/src/sys_api_md.c
4265 4269
4266 4270 int os::available(int fd, jlong *bytes) {
4267 4271 jlong cur, end;
4268 4272 struct _stati64 stbuf64;
4269 4273
4270 4274 if (::_fstati64(fd, &stbuf64) >= 0) {
4271 4275 int mode = stbuf64.st_mode;
4272 4276 if (S_ISCHR(mode) || S_ISFIFO(mode)) {
4273 4277 int ret;
4274 4278 long lpbytes;
4275 4279 if (fd == 0) {
4276 4280 ret = stdinAvailable(fd, &lpbytes);
4277 4281 } else {
4278 4282 ret = nonSeekAvailable(fd, &lpbytes);
4279 4283 }
4280 4284 (*bytes) = (jlong)(lpbytes);
4281 4285 return ret;
4282 4286 }
4283 4287 if ((cur = ::_lseeki64(fd, 0L, SEEK_CUR)) == -1) {
4284 4288 return FALSE;
4285 4289 } else if ((end = ::_lseeki64(fd, 0L, SEEK_END)) == -1) {
4286 4290 return FALSE;
4287 4291 } else if (::_lseeki64(fd, cur, SEEK_SET) == -1) {
4288 4292 return FALSE;
4289 4293 }
4290 4294 *bytes = end - cur;
4291 4295 return TRUE;
4292 4296 } else {
4293 4297 return FALSE;
4294 4298 }
4295 4299 }
4296 4300
4297 4301 // This code is a copy of JDK's nonSeekAvailable
4298 4302 // from src/windows/hpi/src/sys_api_md.c
4299 4303
4300 4304 static int nonSeekAvailable(int fd, long *pbytes) {
4301 4305 /* This is used for available on non-seekable devices
4302 4306 * (like both named and anonymous pipes, such as pipes
4303 4307 * connected to an exec'd process).
4304 4308 * Standard Input is a special case.
4305 4309 *
4306 4310 */
4307 4311 HANDLE han;
4308 4312
4309 4313 if ((han = (HANDLE) ::_get_osfhandle(fd)) == (HANDLE)(-1)) {
4310 4314 return FALSE;
4311 4315 }
4312 4316
4313 4317 if (! ::PeekNamedPipe(han, NULL, 0, NULL, (LPDWORD)pbytes, NULL)) {
4314 4318 /* PeekNamedPipe fails when at EOF. In that case we
4315 4319 * simply make *pbytes = 0 which is consistent with the
4316 4320 * behavior we get on Solaris when an fd is at EOF.
4317 4321 * The only alternative is to raise an Exception,
4318 4322 * which isn't really warranted.
4319 4323 */
4320 4324 if (::GetLastError() != ERROR_BROKEN_PIPE) {
4321 4325 return FALSE;
4322 4326 }
4323 4327 *pbytes = 0;
4324 4328 }
4325 4329 return TRUE;
4326 4330 }
4327 4331
4328 4332 #define MAX_INPUT_EVENTS 2000
4329 4333
4330 4334 // This code is a copy of JDK's stdinAvailable
4331 4335 // from src/windows/hpi/src/sys_api_md.c
4332 4336
4333 4337 static int stdinAvailable(int fd, long *pbytes) {
4334 4338 HANDLE han;
4335 4339 DWORD numEventsRead = 0; /* Number of events read from buffer */
4336 4340 DWORD numEvents = 0; /* Number of events in buffer */
4337 4341 DWORD i = 0; /* Loop index */
4338 4342 DWORD curLength = 0; /* Position marker */
4339 4343 DWORD actualLength = 0; /* Number of bytes readable */
4340 4344 BOOL error = FALSE; /* Error holder */
4341 4345 INPUT_RECORD *lpBuffer; /* Pointer to records of input events */
4342 4346
4343 4347 if ((han = ::GetStdHandle(STD_INPUT_HANDLE)) == INVALID_HANDLE_VALUE) {
4344 4348 return FALSE;
4345 4349 }
4346 4350
4347 4351 /* Construct an array of input records in the console buffer */
4348 4352 error = ::GetNumberOfConsoleInputEvents(han, &numEvents);
4349 4353 if (error == 0) {
4350 4354 return nonSeekAvailable(fd, pbytes);
4351 4355 }
4352 4356
4353 4357 /* lpBuffer must fit into 64K or else PeekConsoleInput fails */
4354 4358 if (numEvents > MAX_INPUT_EVENTS) {
4355 4359 numEvents = MAX_INPUT_EVENTS;
4356 4360 }
4357 4361
4358 4362 lpBuffer = (INPUT_RECORD *)os::malloc(numEvents * sizeof(INPUT_RECORD), mtInternal);
4359 4363 if (lpBuffer == NULL) {
4360 4364 return FALSE;
4361 4365 }
4362 4366
4363 4367 error = ::PeekConsoleInput(han, lpBuffer, numEvents, &numEventsRead);
4364 4368 if (error == 0) {
4365 4369 os::free(lpBuffer, mtInternal);
4366 4370 return FALSE;
4367 4371 }
4368 4372
4369 4373 /* Examine input records for the number of bytes available */
4370 4374 for(i=0; i<numEvents; i++) {
4371 4375 if (lpBuffer[i].EventType == KEY_EVENT) {
4372 4376
4373 4377 KEY_EVENT_RECORD *keyRecord = (KEY_EVENT_RECORD *)
4374 4378 &(lpBuffer[i].Event);
4375 4379 if (keyRecord->bKeyDown == TRUE) {
4376 4380 CHAR *keyPressed = (CHAR *) &(keyRecord->uChar);
4377 4381 curLength++;
4378 4382 if (*keyPressed == '\r') {
4379 4383 actualLength = curLength;
4380 4384 }
4381 4385 }
4382 4386 }
4383 4387 }
4384 4388
4385 4389 if(lpBuffer != NULL) {
4386 4390 os::free(lpBuffer, mtInternal);
4387 4391 }
4388 4392
4389 4393 *pbytes = (long) actualLength;
4390 4394 return TRUE;
4391 4395 }
4392 4396
4393 4397 // Map a block of memory.
4394 4398 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
4395 4399 char *addr, size_t bytes, bool read_only,
4396 4400 bool allow_exec) {
4397 4401 HANDLE hFile;
4398 4402 char* base;
4399 4403
4400 4404 hFile = CreateFile(file_name, GENERIC_READ, FILE_SHARE_READ, NULL,
4401 4405 OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
4402 4406 if (hFile == NULL) {
4403 4407 if (PrintMiscellaneous && Verbose) {
4404 4408 DWORD err = GetLastError();
4405 4409 tty->print_cr("CreateFile() failed: GetLastError->%ld.");
4406 4410 }
4407 4411 return NULL;
4408 4412 }
4409 4413
4410 4414 if (allow_exec) {
4411 4415 // CreateFileMapping/MapViewOfFileEx can't map executable memory
4412 4416 // unless it comes from a PE image (which the shared archive is not.)
4413 4417 // Even VirtualProtect refuses to give execute access to mapped memory
4414 4418 // that was not previously executable.
4415 4419 //
4416 4420 // Instead, stick the executable region in anonymous memory. Yuck.
4417 4421 // Penalty is that ~4 pages will not be shareable - in the future
4418 4422 // we might consider DLLizing the shared archive with a proper PE
4419 4423 // header so that mapping executable + sharing is possible.
4420 4424
4421 4425 base = (char*) VirtualAlloc(addr, bytes, MEM_COMMIT | MEM_RESERVE,
4422 4426 PAGE_READWRITE);
4423 4427 if (base == NULL) {
4424 4428 if (PrintMiscellaneous && Verbose) {
4425 4429 DWORD err = GetLastError();
4426 4430 tty->print_cr("VirtualAlloc() failed: GetLastError->%ld.", err);
4427 4431 }
4428 4432 CloseHandle(hFile);
4429 4433 return NULL;
4430 4434 }
4431 4435
4432 4436 DWORD bytes_read;
4433 4437 OVERLAPPED overlapped;
4434 4438 overlapped.Offset = (DWORD)file_offset;
4435 4439 overlapped.OffsetHigh = 0;
4436 4440 overlapped.hEvent = NULL;
4437 4441 // ReadFile guarantees that if the return value is true, the requested
4438 4442 // number of bytes were read before returning.
4439 4443 bool res = ReadFile(hFile, base, (DWORD)bytes, &bytes_read, &overlapped) != 0;
4440 4444 if (!res) {
4441 4445 if (PrintMiscellaneous && Verbose) {
4442 4446 DWORD err = GetLastError();
4443 4447 tty->print_cr("ReadFile() failed: GetLastError->%ld.", err);
4444 4448 }
4445 4449 release_memory(base, bytes);
4446 4450 CloseHandle(hFile);
4447 4451 return NULL;
4448 4452 }
4449 4453 } else {
4450 4454 HANDLE hMap = CreateFileMapping(hFile, NULL, PAGE_WRITECOPY, 0, 0,
4451 4455 NULL /*file_name*/);
4452 4456 if (hMap == NULL) {
4453 4457 if (PrintMiscellaneous && Verbose) {
4454 4458 DWORD err = GetLastError();
4455 4459 tty->print_cr("CreateFileMapping() failed: GetLastError->%ld.");
4456 4460 }
4457 4461 CloseHandle(hFile);
4458 4462 return NULL;
4459 4463 }
4460 4464
4461 4465 DWORD access = read_only ? FILE_MAP_READ : FILE_MAP_COPY;
4462 4466 base = (char*)MapViewOfFileEx(hMap, access, 0, (DWORD)file_offset,
4463 4467 (DWORD)bytes, addr);
4464 4468 if (base == NULL) {
4465 4469 if (PrintMiscellaneous && Verbose) {
4466 4470 DWORD err = GetLastError();
4467 4471 tty->print_cr("MapViewOfFileEx() failed: GetLastError->%ld.", err);
4468 4472 }
4469 4473 CloseHandle(hMap);
4470 4474 CloseHandle(hFile);
4471 4475 return NULL;
4472 4476 }
4473 4477
4474 4478 if (CloseHandle(hMap) == 0) {
4475 4479 if (PrintMiscellaneous && Verbose) {
4476 4480 DWORD err = GetLastError();
4477 4481 tty->print_cr("CloseHandle(hMap) failed: GetLastError->%ld.", err);
4478 4482 }
4479 4483 CloseHandle(hFile);
4480 4484 return base;
4481 4485 }
4482 4486 }
4483 4487
4484 4488 if (allow_exec) {
4485 4489 DWORD old_protect;
4486 4490 DWORD exec_access = read_only ? PAGE_EXECUTE_READ : PAGE_EXECUTE_READWRITE;
4487 4491 bool res = VirtualProtect(base, bytes, exec_access, &old_protect) != 0;
4488 4492
4489 4493 if (!res) {
4490 4494 if (PrintMiscellaneous && Verbose) {
4491 4495 DWORD err = GetLastError();
4492 4496 tty->print_cr("VirtualProtect() failed: GetLastError->%ld.", err);
4493 4497 }
4494 4498 // Don't consider this a hard error, on IA32 even if the
4495 4499 // VirtualProtect fails, we should still be able to execute
4496 4500 CloseHandle(hFile);
4497 4501 return base;
4498 4502 }
4499 4503 }
4500 4504
4501 4505 if (CloseHandle(hFile) == 0) {
4502 4506 if (PrintMiscellaneous && Verbose) {
4503 4507 DWORD err = GetLastError();
4504 4508 tty->print_cr("CloseHandle(hFile) failed: GetLastError->%ld.", err);
4505 4509 }
4506 4510 return base;
4507 4511 }
4508 4512
4509 4513 return base;
4510 4514 }
4511 4515
4512 4516
4513 4517 // Remap a block of memory.
4514 4518 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
4515 4519 char *addr, size_t bytes, bool read_only,
4516 4520 bool allow_exec) {
4517 4521 // This OS does not allow existing memory maps to be remapped so we
4518 4522 // have to unmap the memory before we remap it.
4519 4523 if (!os::unmap_memory(addr, bytes)) {
4520 4524 return NULL;
4521 4525 }
4522 4526
4523 4527 // There is a very small theoretical window between the unmap_memory()
4524 4528 // call above and the map_memory() call below where a thread in native
4525 4529 // code may be able to access an address that is no longer mapped.
4526 4530
4527 4531 return os::map_memory(fd, file_name, file_offset, addr, bytes,
4528 4532 read_only, allow_exec);
4529 4533 }
4530 4534
4531 4535
4532 4536 // Unmap a block of memory.
4533 4537 // Returns true=success, otherwise false.
4534 4538
4535 4539 bool os::pd_unmap_memory(char* addr, size_t bytes) {
4536 4540 BOOL result = UnmapViewOfFile(addr);
4537 4541 if (result == 0) {
4538 4542 if (PrintMiscellaneous && Verbose) {
4539 4543 DWORD err = GetLastError();
4540 4544 tty->print_cr("UnmapViewOfFile() failed: GetLastError->%ld.", err);
4541 4545 }
4542 4546 return false;
4543 4547 }
4544 4548 return true;
4545 4549 }
4546 4550
4547 4551 void os::pause() {
4548 4552 char filename[MAX_PATH];
4549 4553 if (PauseAtStartupFile && PauseAtStartupFile[0]) {
4550 4554 jio_snprintf(filename, MAX_PATH, PauseAtStartupFile);
4551 4555 } else {
4552 4556 jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
4553 4557 }
4554 4558
4555 4559 int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
4556 4560 if (fd != -1) {
4557 4561 struct stat buf;
4558 4562 ::close(fd);
4559 4563 while (::stat(filename, &buf) == 0) {
4560 4564 Sleep(100);
4561 4565 }
4562 4566 } else {
4563 4567 jio_fprintf(stderr,
4564 4568 "Could not open pause file '%s', continuing immediately.\n", filename);
4565 4569 }
4566 4570 }
4567 4571
4568 4572 // An Event wraps a win32 "CreateEvent" kernel handle.
4569 4573 //
4570 4574 // We have a number of choices regarding "CreateEvent" win32 handle leakage:
4571 4575 //
4572 4576 // 1: When a thread dies return the Event to the EventFreeList, clear the ParkHandle
4573 4577 // field, and call CloseHandle() on the win32 event handle. Unpark() would
4574 4578 // need to be modified to tolerate finding a NULL (invalid) win32 event handle.
4575 4579 // In addition, an unpark() operation might fetch the handle field, but the
4576 4580 // event could recycle between the fetch and the SetEvent() operation.
4577 4581 // SetEvent() would either fail because the handle was invalid, or inadvertently work,
4578 4582 // as the win32 handle value had been recycled. In an ideal world calling SetEvent()
4579 4583 // on an stale but recycled handle would be harmless, but in practice this might
4580 4584 // confuse other non-Sun code, so it's not a viable approach.
4581 4585 //
4582 4586 // 2: Once a win32 event handle is associated with an Event, it remains associated
4583 4587 // with the Event. The event handle is never closed. This could be construed
4584 4588 // as handle leakage, but only up to the maximum # of threads that have been extant
4585 4589 // at any one time. This shouldn't be an issue, as windows platforms typically
4586 4590 // permit a process to have hundreds of thousands of open handles.
4587 4591 //
4588 4592 // 3: Same as (1), but periodically, at stop-the-world time, rundown the EventFreeList
4589 4593 // and release unused handles.
4590 4594 //
4591 4595 // 4: Add a CRITICAL_SECTION to the Event to protect LD+SetEvent from LD;ST(null);CloseHandle.
4592 4596 // It's not clear, however, that we wouldn't be trading one type of leak for another.
4593 4597 //
4594 4598 // 5. Use an RCU-like mechanism (Read-Copy Update).
4595 4599 // Or perhaps something similar to Maged Michael's "Hazard pointers".
4596 4600 //
4597 4601 // We use (2).
4598 4602 //
4599 4603 // TODO-FIXME:
4600 4604 // 1. Reconcile Doug's JSR166 j.u.c park-unpark with the objectmonitor implementation.
4601 4605 // 2. Consider wrapping the WaitForSingleObject(Ex) calls in SEH try/finally blocks
4602 4606 // to recover from (or at least detect) the dreaded Windows 841176 bug.
4603 4607 // 3. Collapse the interrupt_event, the JSR166 parker event, and the objectmonitor ParkEvent
4604 4608 // into a single win32 CreateEvent() handle.
4605 4609 //
4606 4610 // _Event transitions in park()
4607 4611 // -1 => -1 : illegal
4608 4612 // 1 => 0 : pass - return immediately
4609 4613 // 0 => -1 : block
4610 4614 //
4611 4615 // _Event serves as a restricted-range semaphore :
4612 4616 // -1 : thread is blocked
4613 4617 // 0 : neutral - thread is running or ready
4614 4618 // 1 : signaled - thread is running or ready
4615 4619 //
4616 4620 // Another possible encoding of _Event would be
4617 4621 // with explicit "PARKED" and "SIGNALED" bits.
4618 4622
4619 4623 int os::PlatformEvent::park (jlong Millis) {
4620 4624 guarantee (_ParkHandle != NULL , "Invariant") ;
4621 4625 guarantee (Millis > 0 , "Invariant") ;
4622 4626 int v ;
4623 4627
4624 4628 // CONSIDER: defer assigning a CreateEvent() handle to the Event until
4625 4629 // the initial park() operation.
4626 4630
4627 4631 for (;;) {
4628 4632 v = _Event ;
4629 4633 if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ;
4630 4634 }
4631 4635 guarantee ((v == 0) || (v == 1), "invariant") ;
4632 4636 if (v != 0) return OS_OK ;
4633 4637
4634 4638 // Do this the hard way by blocking ...
4635 4639 // TODO: consider a brief spin here, gated on the success of recent
4636 4640 // spin attempts by this thread.
4637 4641 //
4638 4642 // We decompose long timeouts into series of shorter timed waits.
4639 4643 // Evidently large timo values passed in WaitForSingleObject() are problematic on some
4640 4644 // versions of Windows. See EventWait() for details. This may be superstition. Or not.
4641 4645 // We trust the WAIT_TIMEOUT indication and don't track the elapsed wait time
4642 4646 // with os::javaTimeNanos(). Furthermore, we assume that spurious returns from
4643 4647 // ::WaitForSingleObject() caused by latent ::setEvent() operations will tend
4644 4648 // to happen early in the wait interval. Specifically, after a spurious wakeup (rv ==
4645 4649 // WAIT_OBJECT_0 but _Event is still < 0) we don't bother to recompute Millis to compensate
4646 4650 // for the already waited time. This policy does not admit any new outcomes.
4647 4651 // In the future, however, we might want to track the accumulated wait time and
4648 4652 // adjust Millis accordingly if we encounter a spurious wakeup.
4649 4653
4650 4654 const int MAXTIMEOUT = 0x10000000 ;
4651 4655 DWORD rv = WAIT_TIMEOUT ;
4652 4656 while (_Event < 0 && Millis > 0) {
4653 4657 DWORD prd = Millis ; // set prd = MAX (Millis, MAXTIMEOUT)
4654 4658 if (Millis > MAXTIMEOUT) {
4655 4659 prd = MAXTIMEOUT ;
4656 4660 }
4657 4661 rv = ::WaitForSingleObject (_ParkHandle, prd) ;
4658 4662 assert (rv == WAIT_OBJECT_0 || rv == WAIT_TIMEOUT, "WaitForSingleObject failed") ;
4659 4663 if (rv == WAIT_TIMEOUT) {
4660 4664 Millis -= prd ;
4661 4665 }
4662 4666 }
4663 4667 v = _Event ;
4664 4668 _Event = 0 ;
4665 4669 // see comment at end of os::PlatformEvent::park() below:
4666 4670 OrderAccess::fence() ;
4667 4671 // If we encounter a nearly simultanous timeout expiry and unpark()
4668 4672 // we return OS_OK indicating we awoke via unpark().
4669 4673 // Implementor's license -- returning OS_TIMEOUT would be equally valid, however.
4670 4674 return (v >= 0) ? OS_OK : OS_TIMEOUT ;
4671 4675 }
4672 4676
4673 4677 void os::PlatformEvent::park () {
4674 4678 guarantee (_ParkHandle != NULL, "Invariant") ;
4675 4679 // Invariant: Only the thread associated with the Event/PlatformEvent
4676 4680 // may call park().
4677 4681 int v ;
4678 4682 for (;;) {
4679 4683 v = _Event ;
4680 4684 if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ;
4681 4685 }
4682 4686 guarantee ((v == 0) || (v == 1), "invariant") ;
4683 4687 if (v != 0) return ;
4684 4688
4685 4689 // Do this the hard way by blocking ...
4686 4690 // TODO: consider a brief spin here, gated on the success of recent
4687 4691 // spin attempts by this thread.
4688 4692 while (_Event < 0) {
4689 4693 DWORD rv = ::WaitForSingleObject (_ParkHandle, INFINITE) ;
4690 4694 assert (rv == WAIT_OBJECT_0, "WaitForSingleObject failed") ;
4691 4695 }
4692 4696
4693 4697 // Usually we'll find _Event == 0 at this point, but as
4694 4698 // an optional optimization we clear it, just in case can
4695 4699 // multiple unpark() operations drove _Event up to 1.
4696 4700 _Event = 0 ;
4697 4701 OrderAccess::fence() ;
4698 4702 guarantee (_Event >= 0, "invariant") ;
4699 4703 }
4700 4704
4701 4705 void os::PlatformEvent::unpark() {
4702 4706 guarantee (_ParkHandle != NULL, "Invariant") ;
4703 4707
4704 4708 // Transitions for _Event:
4705 4709 // 0 :=> 1
4706 4710 // 1 :=> 1
4707 4711 // -1 :=> either 0 or 1; must signal target thread
4708 4712 // That is, we can safely transition _Event from -1 to either
4709 4713 // 0 or 1. Forcing 1 is slightly more efficient for back-to-back
4710 4714 // unpark() calls.
4711 4715 // See also: "Semaphores in Plan 9" by Mullender & Cox
4712 4716 //
4713 4717 // Note: Forcing a transition from "-1" to "1" on an unpark() means
4714 4718 // that it will take two back-to-back park() calls for the owning
4715 4719 // thread to block. This has the benefit of forcing a spurious return
4716 4720 // from the first park() call after an unpark() call which will help
4717 4721 // shake out uses of park() and unpark() without condition variables.
4718 4722
4719 4723 if (Atomic::xchg(1, &_Event) >= 0) return;
4720 4724
4721 4725 ::SetEvent(_ParkHandle);
4722 4726 }
4723 4727
4724 4728
4725 4729 // JSR166
4726 4730 // -------------------------------------------------------
4727 4731
4728 4732 /*
4729 4733 * The Windows implementation of Park is very straightforward: Basic
4730 4734 * operations on Win32 Events turn out to have the right semantics to
4731 4735 * use them directly. We opportunistically resuse the event inherited
4732 4736 * from Monitor.
4733 4737 */
4734 4738
4735 4739
4736 4740 void Parker::park(bool isAbsolute, jlong time) {
4737 4741 guarantee (_ParkEvent != NULL, "invariant") ;
4738 4742 // First, demultiplex/decode time arguments
4739 4743 if (time < 0) { // don't wait
4740 4744 return;
4741 4745 }
4742 4746 else if (time == 0 && !isAbsolute) {
4743 4747 time = INFINITE;
4744 4748 }
4745 4749 else if (isAbsolute) {
4746 4750 time -= os::javaTimeMillis(); // convert to relative time
4747 4751 if (time <= 0) // already elapsed
4748 4752 return;
4749 4753 }
4750 4754 else { // relative
4751 4755 time /= 1000000; // Must coarsen from nanos to millis
4752 4756 if (time == 0) // Wait for the minimal time unit if zero
4753 4757 time = 1;
4754 4758 }
4755 4759
4756 4760 JavaThread* thread = (JavaThread*)(Thread::current());
4757 4761 assert(thread->is_Java_thread(), "Must be JavaThread");
4758 4762 JavaThread *jt = (JavaThread *)thread;
4759 4763
4760 4764 // Don't wait if interrupted or already triggered
4761 4765 if (Thread::is_interrupted(thread, false) ||
4762 4766 WaitForSingleObject(_ParkEvent, 0) == WAIT_OBJECT_0) {
4763 4767 ResetEvent(_ParkEvent);
4764 4768 return;
4765 4769 }
4766 4770 else {
4767 4771 ThreadBlockInVM tbivm(jt);
4768 4772 OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
4769 4773 jt->set_suspend_equivalent();
4770 4774
4771 4775 WaitForSingleObject(_ParkEvent, time);
4772 4776 ResetEvent(_ParkEvent);
4773 4777
4774 4778 // If externally suspended while waiting, re-suspend
4775 4779 if (jt->handle_special_suspend_equivalent_condition()) {
4776 4780 jt->java_suspend_self();
4777 4781 }
4778 4782 }
4779 4783 }
4780 4784
4781 4785 void Parker::unpark() {
4782 4786 guarantee (_ParkEvent != NULL, "invariant") ;
4783 4787 SetEvent(_ParkEvent);
4784 4788 }
4785 4789
4786 4790 // Run the specified command in a separate process. Return its exit value,
4787 4791 // or -1 on failure (e.g. can't create a new process).
4788 4792 int os::fork_and_exec(char* cmd) {
4789 4793 STARTUPINFO si;
4790 4794 PROCESS_INFORMATION pi;
4791 4795
4792 4796 memset(&si, 0, sizeof(si));
4793 4797 si.cb = sizeof(si);
4794 4798 memset(&pi, 0, sizeof(pi));
4795 4799 BOOL rslt = CreateProcess(NULL, // executable name - use command line
4796 4800 cmd, // command line
4797 4801 NULL, // process security attribute
4798 4802 NULL, // thread security attribute
4799 4803 TRUE, // inherits system handles
4800 4804 0, // no creation flags
4801 4805 NULL, // use parent's environment block
4802 4806 NULL, // use parent's starting directory
4803 4807 &si, // (in) startup information
4804 4808 &pi); // (out) process information
4805 4809
4806 4810 if (rslt) {
4807 4811 // Wait until child process exits.
4808 4812 WaitForSingleObject(pi.hProcess, INFINITE);
4809 4813
4810 4814 DWORD exit_code;
4811 4815 GetExitCodeProcess(pi.hProcess, &exit_code);
4812 4816
4813 4817 // Close process and thread handles.
4814 4818 CloseHandle(pi.hProcess);
4815 4819 CloseHandle(pi.hThread);
4816 4820
4817 4821 return (int)exit_code;
4818 4822 } else {
4819 4823 return -1;
4820 4824 }
4821 4825 }
4822 4826
4823 4827 //--------------------------------------------------------------------------------------------------
4824 4828 // Non-product code
4825 4829
4826 4830 static int mallocDebugIntervalCounter = 0;
4827 4831 static int mallocDebugCounter = 0;
4828 4832 bool os::check_heap(bool force) {
4829 4833 if (++mallocDebugCounter < MallocVerifyStart && !force) return true;
4830 4834 if (++mallocDebugIntervalCounter >= MallocVerifyInterval || force) {
4831 4835 // Note: HeapValidate executes two hardware breakpoints when it finds something
4832 4836 // wrong; at these points, eax contains the address of the offending block (I think).
4833 4837 // To get to the exlicit error message(s) below, just continue twice.
4834 4838 HANDLE heap = GetProcessHeap();
4835 4839 { HeapLock(heap);
4836 4840 PROCESS_HEAP_ENTRY phe;
4837 4841 phe.lpData = NULL;
4838 4842 while (HeapWalk(heap, &phe) != 0) {
4839 4843 if ((phe.wFlags & PROCESS_HEAP_ENTRY_BUSY) &&
4840 4844 !HeapValidate(heap, 0, phe.lpData)) {
4841 4845 tty->print_cr("C heap has been corrupted (time: %d allocations)", mallocDebugCounter);
4842 4846 tty->print_cr("corrupted block near address %#x, length %d", phe.lpData, phe.cbData);
4843 4847 fatal("corrupted C heap");
4844 4848 }
4845 4849 }
4846 4850 DWORD err = GetLastError();
4847 4851 if (err != ERROR_NO_MORE_ITEMS && err != ERROR_CALL_NOT_IMPLEMENTED) {
4848 4852 fatal(err_msg("heap walk aborted with error %d", err));
4849 4853 }
4850 4854 HeapUnlock(heap);
4851 4855 }
4852 4856 mallocDebugIntervalCounter = 0;
4853 4857 }
4854 4858 return true;
4855 4859 }
4856 4860
4857 4861
4858 4862 bool os::find(address addr, outputStream* st) {
4859 4863 // Nothing yet
4860 4864 return false;
4861 4865 }
4862 4866
4863 4867 LONG WINAPI os::win32::serialize_fault_filter(struct _EXCEPTION_POINTERS* e) {
4864 4868 DWORD exception_code = e->ExceptionRecord->ExceptionCode;
4865 4869
4866 4870 if ( exception_code == EXCEPTION_ACCESS_VIOLATION ) {
4867 4871 JavaThread* thread = (JavaThread*)ThreadLocalStorage::get_thread_slow();
4868 4872 PEXCEPTION_RECORD exceptionRecord = e->ExceptionRecord;
4869 4873 address addr = (address) exceptionRecord->ExceptionInformation[1];
4870 4874
4871 4875 if (os::is_memory_serialize_page(thread, addr))
4872 4876 return EXCEPTION_CONTINUE_EXECUTION;
4873 4877 }
4874 4878
4875 4879 return EXCEPTION_CONTINUE_SEARCH;
4876 4880 }
4877 4881
4878 4882 // We don't build a headless jre for Windows
4879 4883 bool os::is_headless_jre() { return false; }
4880 4884
4881 4885
4882 4886 typedef CRITICAL_SECTION mutex_t;
4883 4887 #define mutexInit(m) InitializeCriticalSection(m)
4884 4888 #define mutexDestroy(m) DeleteCriticalSection(m)
4885 4889 #define mutexLock(m) EnterCriticalSection(m)
4886 4890 #define mutexUnlock(m) LeaveCriticalSection(m)
4887 4891
4888 4892 static bool sock_initialized = FALSE;
4889 4893 static mutex_t sockFnTableMutex;
4890 4894
4891 4895 static void initSock() {
4892 4896 WSADATA wsadata;
4893 4897
4894 4898 if (!os::WinSock2Dll::WinSock2Available()) {
4895 4899 jio_fprintf(stderr, "Could not load Winsock 2 (error: %d)\n",
4896 4900 ::GetLastError());
4897 4901 return;
4898 4902 }
4899 4903 if (sock_initialized == TRUE) return;
4900 4904
4901 4905 ::mutexInit(&sockFnTableMutex);
4902 4906 ::mutexLock(&sockFnTableMutex);
4903 4907 if (os::WinSock2Dll::WSAStartup(MAKEWORD(1,1), &wsadata) != 0) {
4904 4908 jio_fprintf(stderr, "Could not initialize Winsock\n");
4905 4909 }
4906 4910 sock_initialized = TRUE;
4907 4911 ::mutexUnlock(&sockFnTableMutex);
4908 4912 }
4909 4913
4910 4914 struct hostent* os::get_host_by_name(char* name) {
4911 4915 if (!sock_initialized) {
4912 4916 initSock();
4913 4917 }
4914 4918 if (!os::WinSock2Dll::WinSock2Available()) {
4915 4919 return NULL;
4916 4920 }
4917 4921 return (struct hostent*)os::WinSock2Dll::gethostbyname(name);
4918 4922 }
4919 4923
4920 4924 int os::socket_close(int fd) {
4921 4925 return ::closesocket(fd);
4922 4926 }
4923 4927
4924 4928 int os::socket_available(int fd, jint *pbytes) {
4925 4929 int ret = ::ioctlsocket(fd, FIONREAD, (u_long*)pbytes);
4926 4930 return (ret < 0) ? 0 : 1;
4927 4931 }
4928 4932
4929 4933 int os::socket(int domain, int type, int protocol) {
4930 4934 return ::socket(domain, type, protocol);
4931 4935 }
4932 4936
4933 4937 int os::listen(int fd, int count) {
4934 4938 return ::listen(fd, count);
4935 4939 }
4936 4940
4937 4941 int os::connect(int fd, struct sockaddr* him, socklen_t len) {
4938 4942 return ::connect(fd, him, len);
4939 4943 }
4940 4944
4941 4945 int os::accept(int fd, struct sockaddr* him, socklen_t* len) {
4942 4946 return ::accept(fd, him, len);
4943 4947 }
4944 4948
4945 4949 int os::sendto(int fd, char* buf, size_t len, uint flags,
4946 4950 struct sockaddr* to, socklen_t tolen) {
4947 4951
4948 4952 return ::sendto(fd, buf, (int)len, flags, to, tolen);
4949 4953 }
4950 4954
4951 4955 int os::recvfrom(int fd, char *buf, size_t nBytes, uint flags,
4952 4956 sockaddr* from, socklen_t* fromlen) {
4953 4957
4954 4958 return ::recvfrom(fd, buf, (int)nBytes, flags, from, fromlen);
4955 4959 }
4956 4960
4957 4961 int os::recv(int fd, char* buf, size_t nBytes, uint flags) {
4958 4962 return ::recv(fd, buf, (int)nBytes, flags);
4959 4963 }
4960 4964
4961 4965 int os::send(int fd, char* buf, size_t nBytes, uint flags) {
4962 4966 return ::send(fd, buf, (int)nBytes, flags);
4963 4967 }
4964 4968
4965 4969 int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) {
4966 4970 return ::send(fd, buf, (int)nBytes, flags);
4967 4971 }
4968 4972
4969 4973 int os::timeout(int fd, long timeout) {
4970 4974 fd_set tbl;
4971 4975 struct timeval t;
4972 4976
4973 4977 t.tv_sec = timeout / 1000;
4974 4978 t.tv_usec = (timeout % 1000) * 1000;
4975 4979
4976 4980 tbl.fd_count = 1;
4977 4981 tbl.fd_array[0] = fd;
4978 4982
4979 4983 return ::select(1, &tbl, 0, 0, &t);
4980 4984 }
4981 4985
4982 4986 int os::get_host_name(char* name, int namelen) {
4983 4987 return ::gethostname(name, namelen);
4984 4988 }
4985 4989
4986 4990 int os::socket_shutdown(int fd, int howto) {
4987 4991 return ::shutdown(fd, howto);
4988 4992 }
4989 4993
4990 4994 int os::bind(int fd, struct sockaddr* him, socklen_t len) {
4991 4995 return ::bind(fd, him, len);
4992 4996 }
4993 4997
4994 4998 int os::get_sock_name(int fd, struct sockaddr* him, socklen_t* len) {
4995 4999 return ::getsockname(fd, him, len);
4996 5000 }
4997 5001
4998 5002 int os::get_sock_opt(int fd, int level, int optname,
4999 5003 char* optval, socklen_t* optlen) {
5000 5004 return ::getsockopt(fd, level, optname, optval, optlen);
5001 5005 }
5002 5006
5003 5007 int os::set_sock_opt(int fd, int level, int optname,
5004 5008 const char* optval, socklen_t optlen) {
5005 5009 return ::setsockopt(fd, level, optname, optval, optlen);
5006 5010 }
5007 5011
5008 5012 // WINDOWS CONTEXT Flags for THREAS_SAMPLING
5009 5013 #if defined(IA32)
5010 5014 # define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT | CONTEXT_EXTENDED_REGISTERS)
5011 5015 #elif defined (AMD64)
5012 5016 # define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT)
5013 5017 #endif
5014 5018
5015 5019 // returns true if thread could be suspended,
5016 5020 // false otherwise
5017 5021 static bool do_suspend(HANDLE* h) {
5018 5022 if (h != NULL) {
5019 5023 if (SuspendThread(*h) != ~0) {
5020 5024 return true;
5021 5025 }
5022 5026 }
5023 5027 return false;
5024 5028 }
5025 5029
5026 5030 // resume the thread
5027 5031 // calling resume on an active thread is a no-op
5028 5032 static void do_resume(HANDLE* h) {
5029 5033 if (h != NULL) {
5030 5034 ResumeThread(*h);
5031 5035 }
5032 5036 }
5033 5037
5034 5038 // retrieve a suspend/resume context capable handle
5035 5039 // from the tid. Caller validates handle return value.
5036 5040 void get_thread_handle_for_extended_context(HANDLE* h, OSThread::thread_id_t tid) {
5037 5041 if (h != NULL) {
5038 5042 *h = OpenThread(THREAD_SUSPEND_RESUME | THREAD_GET_CONTEXT | THREAD_QUERY_INFORMATION, FALSE, tid);
5039 5043 }
5040 5044 }
5041 5045
5042 5046 //
5043 5047 // Thread sampling implementation
5044 5048 //
5045 5049 void os::SuspendedThreadTask::internal_do_task() {
5046 5050 CONTEXT ctxt;
5047 5051 HANDLE h = NULL;
5048 5052
5049 5053 // get context capable handle for thread
5050 5054 get_thread_handle_for_extended_context(&h, _thread->osthread()->thread_id());
5051 5055
5052 5056 // sanity
5053 5057 if (h == NULL || h == INVALID_HANDLE_VALUE) {
5054 5058 return;
5055 5059 }
5056 5060
5057 5061 // suspend the thread
5058 5062 if (do_suspend(&h)) {
5059 5063 ctxt.ContextFlags = sampling_context_flags;
5060 5064 // get thread context
5061 5065 GetThreadContext(h, &ctxt);
5062 5066 SuspendedThreadTaskContext context(_thread, &ctxt);
5063 5067 // pass context to Thread Sampling impl
5064 5068 do_task(context);
5065 5069 // resume thread
5066 5070 do_resume(&h);
5067 5071 }
5068 5072
5069 5073 // close handle
5070 5074 CloseHandle(h);
5071 5075 }
5072 5076
5073 5077
5074 5078 // Kernel32 API
5075 5079 typedef SIZE_T (WINAPI* GetLargePageMinimum_Fn)(void);
5076 5080 typedef LPVOID (WINAPI *VirtualAllocExNuma_Fn) (HANDLE, LPVOID, SIZE_T, DWORD, DWORD, DWORD);
5077 5081 typedef BOOL (WINAPI *GetNumaHighestNodeNumber_Fn) (PULONG);
5078 5082 typedef BOOL (WINAPI *GetNumaNodeProcessorMask_Fn) (UCHAR, PULONGLONG);
5079 5083 typedef USHORT (WINAPI* RtlCaptureStackBackTrace_Fn)(ULONG, ULONG, PVOID*, PULONG);
5080 5084
5081 5085 GetLargePageMinimum_Fn os::Kernel32Dll::_GetLargePageMinimum = NULL;
5082 5086 VirtualAllocExNuma_Fn os::Kernel32Dll::_VirtualAllocExNuma = NULL;
5083 5087 GetNumaHighestNodeNumber_Fn os::Kernel32Dll::_GetNumaHighestNodeNumber = NULL;
5084 5088 GetNumaNodeProcessorMask_Fn os::Kernel32Dll::_GetNumaNodeProcessorMask = NULL;
5085 5089 RtlCaptureStackBackTrace_Fn os::Kernel32Dll::_RtlCaptureStackBackTrace = NULL;
5086 5090
5087 5091
5088 5092 BOOL os::Kernel32Dll::initialized = FALSE;
5089 5093 SIZE_T os::Kernel32Dll::GetLargePageMinimum() {
5090 5094 assert(initialized && _GetLargePageMinimum != NULL,
5091 5095 "GetLargePageMinimumAvailable() not yet called");
5092 5096 return _GetLargePageMinimum();
5093 5097 }
5094 5098
5095 5099 BOOL os::Kernel32Dll::GetLargePageMinimumAvailable() {
5096 5100 if (!initialized) {
5097 5101 initialize();
5098 5102 }
5099 5103 return _GetLargePageMinimum != NULL;
5100 5104 }
5101 5105
5102 5106 BOOL os::Kernel32Dll::NumaCallsAvailable() {
5103 5107 if (!initialized) {
5104 5108 initialize();
5105 5109 }
5106 5110 return _VirtualAllocExNuma != NULL;
5107 5111 }
5108 5112
5109 5113 LPVOID os::Kernel32Dll::VirtualAllocExNuma(HANDLE hProc, LPVOID addr, SIZE_T bytes, DWORD flags, DWORD prot, DWORD node) {
5110 5114 assert(initialized && _VirtualAllocExNuma != NULL,
5111 5115 "NUMACallsAvailable() not yet called");
5112 5116
5113 5117 return _VirtualAllocExNuma(hProc, addr, bytes, flags, prot, node);
5114 5118 }
5115 5119
5116 5120 BOOL os::Kernel32Dll::GetNumaHighestNodeNumber(PULONG ptr_highest_node_number) {
5117 5121 assert(initialized && _GetNumaHighestNodeNumber != NULL,
5118 5122 "NUMACallsAvailable() not yet called");
5119 5123
5120 5124 return _GetNumaHighestNodeNumber(ptr_highest_node_number);
5121 5125 }
5122 5126
5123 5127 BOOL os::Kernel32Dll::GetNumaNodeProcessorMask(UCHAR node, PULONGLONG proc_mask) {
5124 5128 assert(initialized && _GetNumaNodeProcessorMask != NULL,
5125 5129 "NUMACallsAvailable() not yet called");
5126 5130
5127 5131 return _GetNumaNodeProcessorMask(node, proc_mask);
5128 5132 }
5129 5133
5130 5134 USHORT os::Kernel32Dll::RtlCaptureStackBackTrace(ULONG FrameToSkip,
5131 5135 ULONG FrameToCapture, PVOID* BackTrace, PULONG BackTraceHash) {
5132 5136 if (!initialized) {
5133 5137 initialize();
5134 5138 }
5135 5139
5136 5140 if (_RtlCaptureStackBackTrace != NULL) {
5137 5141 return _RtlCaptureStackBackTrace(FrameToSkip, FrameToCapture,
5138 5142 BackTrace, BackTraceHash);
5139 5143 } else {
5140 5144 return 0;
5141 5145 }
5142 5146 }
5143 5147
5144 5148 void os::Kernel32Dll::initializeCommon() {
5145 5149 if (!initialized) {
5146 5150 HMODULE handle = ::GetModuleHandle("Kernel32.dll");
5147 5151 assert(handle != NULL, "Just check");
5148 5152 _GetLargePageMinimum = (GetLargePageMinimum_Fn)::GetProcAddress(handle, "GetLargePageMinimum");
5149 5153 _VirtualAllocExNuma = (VirtualAllocExNuma_Fn)::GetProcAddress(handle, "VirtualAllocExNuma");
5150 5154 _GetNumaHighestNodeNumber = (GetNumaHighestNodeNumber_Fn)::GetProcAddress(handle, "GetNumaHighestNodeNumber");
5151 5155 _GetNumaNodeProcessorMask = (GetNumaNodeProcessorMask_Fn)::GetProcAddress(handle, "GetNumaNodeProcessorMask");
5152 5156 _RtlCaptureStackBackTrace = (RtlCaptureStackBackTrace_Fn)::GetProcAddress(handle, "RtlCaptureStackBackTrace");
5153 5157 initialized = TRUE;
5154 5158 }
5155 5159 }
5156 5160
5157 5161
5158 5162
5159 5163 #ifndef JDK6_OR_EARLIER
5160 5164
5161 5165 void os::Kernel32Dll::initialize() {
5162 5166 initializeCommon();
5163 5167 }
5164 5168
5165 5169
5166 5170 // Kernel32 API
5167 5171 inline BOOL os::Kernel32Dll::SwitchToThread() {
5168 5172 return ::SwitchToThread();
5169 5173 }
5170 5174
5171 5175 inline BOOL os::Kernel32Dll::SwitchToThreadAvailable() {
5172 5176 return true;
5173 5177 }
5174 5178
5175 5179 // Help tools
5176 5180 inline BOOL os::Kernel32Dll::HelpToolsAvailable() {
5177 5181 return true;
5178 5182 }
5179 5183
5180 5184 inline HANDLE os::Kernel32Dll::CreateToolhelp32Snapshot(DWORD dwFlags,DWORD th32ProcessId) {
5181 5185 return ::CreateToolhelp32Snapshot(dwFlags, th32ProcessId);
5182 5186 }
5183 5187
5184 5188 inline BOOL os::Kernel32Dll::Module32First(HANDLE hSnapshot,LPMODULEENTRY32 lpme) {
5185 5189 return ::Module32First(hSnapshot, lpme);
5186 5190 }
5187 5191
5188 5192 inline BOOL os::Kernel32Dll::Module32Next(HANDLE hSnapshot,LPMODULEENTRY32 lpme) {
5189 5193 return ::Module32Next(hSnapshot, lpme);
5190 5194 }
5191 5195
5192 5196
5193 5197 inline BOOL os::Kernel32Dll::GetNativeSystemInfoAvailable() {
5194 5198 return true;
5195 5199 }
5196 5200
5197 5201 inline void os::Kernel32Dll::GetNativeSystemInfo(LPSYSTEM_INFO lpSystemInfo) {
5198 5202 ::GetNativeSystemInfo(lpSystemInfo);
5199 5203 }
5200 5204
5201 5205 // PSAPI API
5202 5206 inline BOOL os::PSApiDll::EnumProcessModules(HANDLE hProcess, HMODULE *lpModule, DWORD cb, LPDWORD lpcbNeeded) {
5203 5207 return ::EnumProcessModules(hProcess, lpModule, cb, lpcbNeeded);
5204 5208 }
5205 5209
5206 5210 inline DWORD os::PSApiDll::GetModuleFileNameEx(HANDLE hProcess, HMODULE hModule, LPTSTR lpFilename, DWORD nSize) {
5207 5211 return ::GetModuleFileNameEx(hProcess, hModule, lpFilename, nSize);
5208 5212 }
5209 5213
5210 5214 inline BOOL os::PSApiDll::GetModuleInformation(HANDLE hProcess, HMODULE hModule, LPMODULEINFO lpmodinfo, DWORD cb) {
5211 5215 return ::GetModuleInformation(hProcess, hModule, lpmodinfo, cb);
5212 5216 }
5213 5217
5214 5218 inline BOOL os::PSApiDll::PSApiAvailable() {
5215 5219 return true;
5216 5220 }
5217 5221
5218 5222
5219 5223 // WinSock2 API
5220 5224 inline BOOL os::WinSock2Dll::WSAStartup(WORD wVersionRequested, LPWSADATA lpWSAData) {
5221 5225 return ::WSAStartup(wVersionRequested, lpWSAData);
5222 5226 }
5223 5227
5224 5228 inline struct hostent* os::WinSock2Dll::gethostbyname(const char *name) {
5225 5229 return ::gethostbyname(name);
5226 5230 }
5227 5231
5228 5232 inline BOOL os::WinSock2Dll::WinSock2Available() {
5229 5233 return true;
5230 5234 }
5231 5235
5232 5236 // Advapi API
5233 5237 inline BOOL os::Advapi32Dll::AdjustTokenPrivileges(HANDLE TokenHandle,
5234 5238 BOOL DisableAllPrivileges, PTOKEN_PRIVILEGES NewState, DWORD BufferLength,
5235 5239 PTOKEN_PRIVILEGES PreviousState, PDWORD ReturnLength) {
5236 5240 return ::AdjustTokenPrivileges(TokenHandle, DisableAllPrivileges, NewState,
5237 5241 BufferLength, PreviousState, ReturnLength);
5238 5242 }
5239 5243
5240 5244 inline BOOL os::Advapi32Dll::OpenProcessToken(HANDLE ProcessHandle, DWORD DesiredAccess,
5241 5245 PHANDLE TokenHandle) {
5242 5246 return ::OpenProcessToken(ProcessHandle, DesiredAccess, TokenHandle);
5243 5247 }
5244 5248
5245 5249 inline BOOL os::Advapi32Dll::LookupPrivilegeValue(LPCTSTR lpSystemName, LPCTSTR lpName, PLUID lpLuid) {
5246 5250 return ::LookupPrivilegeValue(lpSystemName, lpName, lpLuid);
5247 5251 }
5248 5252
5249 5253 inline BOOL os::Advapi32Dll::AdvapiAvailable() {
5250 5254 return true;
5251 5255 }
5252 5256
5253 5257 #else
5254 5258 // Kernel32 API
5255 5259 typedef BOOL (WINAPI* SwitchToThread_Fn)(void);
5256 5260 typedef HANDLE (WINAPI* CreateToolhelp32Snapshot_Fn)(DWORD,DWORD);
5257 5261 typedef BOOL (WINAPI* Module32First_Fn)(HANDLE,LPMODULEENTRY32);
5258 5262 typedef BOOL (WINAPI* Module32Next_Fn)(HANDLE,LPMODULEENTRY32);
5259 5263 typedef void (WINAPI* GetNativeSystemInfo_Fn)(LPSYSTEM_INFO);
5260 5264
5261 5265 SwitchToThread_Fn os::Kernel32Dll::_SwitchToThread = NULL;
5262 5266 CreateToolhelp32Snapshot_Fn os::Kernel32Dll::_CreateToolhelp32Snapshot = NULL;
5263 5267 Module32First_Fn os::Kernel32Dll::_Module32First = NULL;
5264 5268 Module32Next_Fn os::Kernel32Dll::_Module32Next = NULL;
5265 5269 GetNativeSystemInfo_Fn os::Kernel32Dll::_GetNativeSystemInfo = NULL;
5266 5270
5267 5271 void os::Kernel32Dll::initialize() {
5268 5272 if (!initialized) {
5269 5273 HMODULE handle = ::GetModuleHandle("Kernel32.dll");
5270 5274 assert(handle != NULL, "Just check");
5271 5275
5272 5276 _SwitchToThread = (SwitchToThread_Fn)::GetProcAddress(handle, "SwitchToThread");
5273 5277 _CreateToolhelp32Snapshot = (CreateToolhelp32Snapshot_Fn)
5274 5278 ::GetProcAddress(handle, "CreateToolhelp32Snapshot");
5275 5279 _Module32First = (Module32First_Fn)::GetProcAddress(handle, "Module32First");
5276 5280 _Module32Next = (Module32Next_Fn)::GetProcAddress(handle, "Module32Next");
5277 5281 _GetNativeSystemInfo = (GetNativeSystemInfo_Fn)::GetProcAddress(handle, "GetNativeSystemInfo");
5278 5282 initializeCommon(); // resolve the functions that always need resolving
5279 5283
5280 5284 initialized = TRUE;
5281 5285 }
5282 5286 }
5283 5287
5284 5288 BOOL os::Kernel32Dll::SwitchToThread() {
5285 5289 assert(initialized && _SwitchToThread != NULL,
5286 5290 "SwitchToThreadAvailable() not yet called");
5287 5291 return _SwitchToThread();
5288 5292 }
5289 5293
5290 5294
5291 5295 BOOL os::Kernel32Dll::SwitchToThreadAvailable() {
5292 5296 if (!initialized) {
5293 5297 initialize();
5294 5298 }
5295 5299 return _SwitchToThread != NULL;
5296 5300 }
5297 5301
5298 5302 // Help tools
5299 5303 BOOL os::Kernel32Dll::HelpToolsAvailable() {
5300 5304 if (!initialized) {
5301 5305 initialize();
5302 5306 }
5303 5307 return _CreateToolhelp32Snapshot != NULL &&
5304 5308 _Module32First != NULL &&
5305 5309 _Module32Next != NULL;
5306 5310 }
5307 5311
5308 5312 HANDLE os::Kernel32Dll::CreateToolhelp32Snapshot(DWORD dwFlags,DWORD th32ProcessId) {
5309 5313 assert(initialized && _CreateToolhelp32Snapshot != NULL,
5310 5314 "HelpToolsAvailable() not yet called");
5311 5315
5312 5316 return _CreateToolhelp32Snapshot(dwFlags, th32ProcessId);
5313 5317 }
5314 5318
5315 5319 BOOL os::Kernel32Dll::Module32First(HANDLE hSnapshot,LPMODULEENTRY32 lpme) {
5316 5320 assert(initialized && _Module32First != NULL,
5317 5321 "HelpToolsAvailable() not yet called");
5318 5322
5319 5323 return _Module32First(hSnapshot, lpme);
5320 5324 }
5321 5325
5322 5326 inline BOOL os::Kernel32Dll::Module32Next(HANDLE hSnapshot,LPMODULEENTRY32 lpme) {
5323 5327 assert(initialized && _Module32Next != NULL,
5324 5328 "HelpToolsAvailable() not yet called");
5325 5329
5326 5330 return _Module32Next(hSnapshot, lpme);
5327 5331 }
5328 5332
5329 5333
5330 5334 BOOL os::Kernel32Dll::GetNativeSystemInfoAvailable() {
5331 5335 if (!initialized) {
5332 5336 initialize();
5333 5337 }
5334 5338 return _GetNativeSystemInfo != NULL;
5335 5339 }
5336 5340
5337 5341 void os::Kernel32Dll::GetNativeSystemInfo(LPSYSTEM_INFO lpSystemInfo) {
5338 5342 assert(initialized && _GetNativeSystemInfo != NULL,
5339 5343 "GetNativeSystemInfoAvailable() not yet called");
5340 5344
5341 5345 _GetNativeSystemInfo(lpSystemInfo);
5342 5346 }
5343 5347
5344 5348 // PSAPI API
5345 5349
5346 5350
5347 5351 typedef BOOL (WINAPI *EnumProcessModules_Fn)(HANDLE, HMODULE *, DWORD, LPDWORD);
5348 5352 typedef BOOL (WINAPI *GetModuleFileNameEx_Fn)(HANDLE, HMODULE, LPTSTR, DWORD);;
5349 5353 typedef BOOL (WINAPI *GetModuleInformation_Fn)(HANDLE, HMODULE, LPMODULEINFO, DWORD);
5350 5354
5351 5355 EnumProcessModules_Fn os::PSApiDll::_EnumProcessModules = NULL;
5352 5356 GetModuleFileNameEx_Fn os::PSApiDll::_GetModuleFileNameEx = NULL;
5353 5357 GetModuleInformation_Fn os::PSApiDll::_GetModuleInformation = NULL;
5354 5358 BOOL os::PSApiDll::initialized = FALSE;
5355 5359
5356 5360 void os::PSApiDll::initialize() {
5357 5361 if (!initialized) {
5358 5362 HMODULE handle = os::win32::load_Windows_dll("PSAPI.DLL", NULL, 0);
5359 5363 if (handle != NULL) {
5360 5364 _EnumProcessModules = (EnumProcessModules_Fn)::GetProcAddress(handle,
5361 5365 "EnumProcessModules");
5362 5366 _GetModuleFileNameEx = (GetModuleFileNameEx_Fn)::GetProcAddress(handle,
5363 5367 "GetModuleFileNameExA");
5364 5368 _GetModuleInformation = (GetModuleInformation_Fn)::GetProcAddress(handle,
5365 5369 "GetModuleInformation");
5366 5370 }
5367 5371 initialized = TRUE;
5368 5372 }
5369 5373 }
5370 5374
5371 5375
5372 5376
5373 5377 BOOL os::PSApiDll::EnumProcessModules(HANDLE hProcess, HMODULE *lpModule, DWORD cb, LPDWORD lpcbNeeded) {
5374 5378 assert(initialized && _EnumProcessModules != NULL,
5375 5379 "PSApiAvailable() not yet called");
5376 5380 return _EnumProcessModules(hProcess, lpModule, cb, lpcbNeeded);
5377 5381 }
5378 5382
5379 5383 DWORD os::PSApiDll::GetModuleFileNameEx(HANDLE hProcess, HMODULE hModule, LPTSTR lpFilename, DWORD nSize) {
5380 5384 assert(initialized && _GetModuleFileNameEx != NULL,
5381 5385 "PSApiAvailable() not yet called");
5382 5386 return _GetModuleFileNameEx(hProcess, hModule, lpFilename, nSize);
5383 5387 }
5384 5388
5385 5389 BOOL os::PSApiDll::GetModuleInformation(HANDLE hProcess, HMODULE hModule, LPMODULEINFO lpmodinfo, DWORD cb) {
5386 5390 assert(initialized && _GetModuleInformation != NULL,
5387 5391 "PSApiAvailable() not yet called");
5388 5392 return _GetModuleInformation(hProcess, hModule, lpmodinfo, cb);
5389 5393 }
5390 5394
5391 5395 BOOL os::PSApiDll::PSApiAvailable() {
5392 5396 if (!initialized) {
5393 5397 initialize();
5394 5398 }
5395 5399 return _EnumProcessModules != NULL &&
5396 5400 _GetModuleFileNameEx != NULL &&
5397 5401 _GetModuleInformation != NULL;
5398 5402 }
5399 5403
5400 5404
5401 5405 // WinSock2 API
5402 5406 typedef int (PASCAL FAR* WSAStartup_Fn)(WORD, LPWSADATA);
5403 5407 typedef struct hostent *(PASCAL FAR *gethostbyname_Fn)(...);
5404 5408
5405 5409 WSAStartup_Fn os::WinSock2Dll::_WSAStartup = NULL;
5406 5410 gethostbyname_Fn os::WinSock2Dll::_gethostbyname = NULL;
5407 5411 BOOL os::WinSock2Dll::initialized = FALSE;
5408 5412
5409 5413 void os::WinSock2Dll::initialize() {
5410 5414 if (!initialized) {
5411 5415 HMODULE handle = os::win32::load_Windows_dll("ws2_32.dll", NULL, 0);
5412 5416 if (handle != NULL) {
5413 5417 _WSAStartup = (WSAStartup_Fn)::GetProcAddress(handle, "WSAStartup");
5414 5418 _gethostbyname = (gethostbyname_Fn)::GetProcAddress(handle, "gethostbyname");
5415 5419 }
5416 5420 initialized = TRUE;
5417 5421 }
5418 5422 }
5419 5423
5420 5424
5421 5425 BOOL os::WinSock2Dll::WSAStartup(WORD wVersionRequested, LPWSADATA lpWSAData) {
5422 5426 assert(initialized && _WSAStartup != NULL,
5423 5427 "WinSock2Available() not yet called");
5424 5428 return _WSAStartup(wVersionRequested, lpWSAData);
5425 5429 }
5426 5430
5427 5431 struct hostent* os::WinSock2Dll::gethostbyname(const char *name) {
5428 5432 assert(initialized && _gethostbyname != NULL,
5429 5433 "WinSock2Available() not yet called");
5430 5434 return _gethostbyname(name);
5431 5435 }
5432 5436
5433 5437 BOOL os::WinSock2Dll::WinSock2Available() {
5434 5438 if (!initialized) {
5435 5439 initialize();
5436 5440 }
5437 5441 return _WSAStartup != NULL &&
5438 5442 _gethostbyname != NULL;
5439 5443 }
5440 5444
5441 5445 typedef BOOL (WINAPI *AdjustTokenPrivileges_Fn)(HANDLE, BOOL, PTOKEN_PRIVILEGES, DWORD, PTOKEN_PRIVILEGES, PDWORD);
5442 5446 typedef BOOL (WINAPI *OpenProcessToken_Fn)(HANDLE, DWORD, PHANDLE);
5443 5447 typedef BOOL (WINAPI *LookupPrivilegeValue_Fn)(LPCTSTR, LPCTSTR, PLUID);
5444 5448
5445 5449 AdjustTokenPrivileges_Fn os::Advapi32Dll::_AdjustTokenPrivileges = NULL;
5446 5450 OpenProcessToken_Fn os::Advapi32Dll::_OpenProcessToken = NULL;
5447 5451 LookupPrivilegeValue_Fn os::Advapi32Dll::_LookupPrivilegeValue = NULL;
5448 5452 BOOL os::Advapi32Dll::initialized = FALSE;
5449 5453
5450 5454 void os::Advapi32Dll::initialize() {
5451 5455 if (!initialized) {
5452 5456 HMODULE handle = os::win32::load_Windows_dll("advapi32.dll", NULL, 0);
5453 5457 if (handle != NULL) {
5454 5458 _AdjustTokenPrivileges = (AdjustTokenPrivileges_Fn)::GetProcAddress(handle,
5455 5459 "AdjustTokenPrivileges");
5456 5460 _OpenProcessToken = (OpenProcessToken_Fn)::GetProcAddress(handle,
5457 5461 "OpenProcessToken");
5458 5462 _LookupPrivilegeValue = (LookupPrivilegeValue_Fn)::GetProcAddress(handle,
5459 5463 "LookupPrivilegeValueA");
5460 5464 }
5461 5465 initialized = TRUE;
5462 5466 }
5463 5467 }
5464 5468
5465 5469 BOOL os::Advapi32Dll::AdjustTokenPrivileges(HANDLE TokenHandle,
5466 5470 BOOL DisableAllPrivileges, PTOKEN_PRIVILEGES NewState, DWORD BufferLength,
5467 5471 PTOKEN_PRIVILEGES PreviousState, PDWORD ReturnLength) {
5468 5472 assert(initialized && _AdjustTokenPrivileges != NULL,
5469 5473 "AdvapiAvailable() not yet called");
5470 5474 return _AdjustTokenPrivileges(TokenHandle, DisableAllPrivileges, NewState,
5471 5475 BufferLength, PreviousState, ReturnLength);
5472 5476 }
5473 5477
5474 5478 BOOL os::Advapi32Dll::OpenProcessToken(HANDLE ProcessHandle, DWORD DesiredAccess,
5475 5479 PHANDLE TokenHandle) {
5476 5480 assert(initialized && _OpenProcessToken != NULL,
5477 5481 "AdvapiAvailable() not yet called");
5478 5482 return _OpenProcessToken(ProcessHandle, DesiredAccess, TokenHandle);
5479 5483 }
5480 5484
5481 5485 BOOL os::Advapi32Dll::LookupPrivilegeValue(LPCTSTR lpSystemName, LPCTSTR lpName, PLUID lpLuid) {
5482 5486 assert(initialized && _LookupPrivilegeValue != NULL,
5483 5487 "AdvapiAvailable() not yet called");
5484 5488 return _LookupPrivilegeValue(lpSystemName, lpName, lpLuid);
5485 5489 }
5486 5490
5487 5491 BOOL os::Advapi32Dll::AdvapiAvailable() {
5488 5492 if (!initialized) {
5489 5493 initialize();
5490 5494 }
5491 5495 return _AdjustTokenPrivileges != NULL &&
5492 5496 _OpenProcessToken != NULL &&
5493 5497 _LookupPrivilegeValue != NULL;
5494 5498 }
5495 5499
5496 5500 #endif
↓ open down ↓ |
2513 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX