1 /* 2 * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 // no precompiled headers 26 #include "assembler_x86.inline.hpp" 27 #include "classfile/classLoader.hpp" 28 #include "classfile/systemDictionary.hpp" 29 #include "classfile/vmSymbols.hpp" 30 #include "code/icBuffer.hpp" 31 #include "code/vtableStubs.hpp" 32 #include "interpreter/interpreter.hpp" 33 #include "jvm_windows.h" 34 #include "memory/allocation.inline.hpp" 35 #include "mutex_windows.inline.hpp" 36 #include "nativeInst_x86.hpp" 37 #include "os_share_windows.hpp" 38 #include "prims/jniFastGetField.hpp" 39 #include "prims/jvm.h" 40 #include "prims/jvm_misc.hpp" 41 #include "runtime/arguments.hpp" 42 #include "runtime/extendedPC.hpp" 43 #include "runtime/frame.inline.hpp" 44 #include "runtime/interfaceSupport.hpp" 45 #include "runtime/java.hpp" 46 #include "runtime/javaCalls.hpp" 47 #include "runtime/mutexLocker.hpp" 48 #include "runtime/osThread.hpp" 49 #include "runtime/sharedRuntime.hpp" 50 #include "runtime/stubRoutines.hpp" 51 #include "runtime/timer.hpp" 52 #include "thread_windows.inline.hpp" 53 #include "utilities/events.hpp" 54 #include "utilities/vmError.hpp" 55 #ifdef COMPILER1 56 #include "c1/c1_Runtime1.hpp" 57 #endif 58 #ifdef COMPILER2 59 #include "opto/runtime.hpp" 60 #endif 61 62 # include "unwind_windows_x86.hpp" 63 #undef REG_SP 64 #undef REG_FP 65 #undef REG_PC 66 #ifdef AMD64 67 #define REG_SP Rsp 68 #define REG_FP Rbp 69 #define REG_PC Rip 70 #else 71 #define REG_SP Esp 72 #define REG_FP Ebp 73 #define REG_PC Eip 74 #endif // AMD64 75 76 extern LONG WINAPI topLevelExceptionFilter(_EXCEPTION_POINTERS* ); 77 78 // Install a win32 structured exception handler around thread. 79 void os::os_exception_wrapper(java_call_t f, JavaValue* value, methodHandle* method, JavaCallArguments* args, Thread* thread) { 80 __try { 81 82 #ifndef AMD64 83 // We store the current thread in this wrapperthread location 84 // and determine how far away this address is from the structured 85 // execption pointer that FS:[0] points to. This get_thread 86 // code can then get the thread pointer via FS. 87 // 88 // Warning: This routine must NEVER be inlined since we'd end up with 89 // multiple offsets. 90 // 91 volatile Thread* wrapperthread = thread; 92 93 if ( ThreadLocalStorage::get_thread_ptr_offset() == 0 ) { 94 int thread_ptr_offset; 95 __asm { 96 lea eax, dword ptr wrapperthread; 97 sub eax, dword ptr FS:[0H]; 98 mov thread_ptr_offset, eax 99 }; 100 ThreadLocalStorage::set_thread_ptr_offset(thread_ptr_offset); 101 } 102 #ifdef ASSERT 103 // Verify that the offset hasn't changed since we initally captured 104 // it. This might happen if we accidentally ended up with an 105 // inlined version of this routine. 106 else { 107 int test_thread_ptr_offset; 108 __asm { 109 lea eax, dword ptr wrapperthread; 110 sub eax, dword ptr FS:[0H]; 111 mov test_thread_ptr_offset, eax 112 }; 113 assert(test_thread_ptr_offset == ThreadLocalStorage::get_thread_ptr_offset(), 114 "thread pointer offset from SEH changed"); 115 } 116 #endif // ASSERT 117 #endif // !AMD64 118 119 f(value, method, args, thread); 120 } __except(topLevelExceptionFilter((_EXCEPTION_POINTERS*)_exception_info())) { 121 // Nothing to do. 122 } 123 } 124 125 #ifdef AMD64 126 127 // This is the language specific handler for exceptions 128 // originating from dynamically generated code. 129 // We call the standard structured exception handler 130 // We only expect Continued Execution since we cannot unwind 131 // from generated code. 132 LONG HandleExceptionFromCodeCache( 133 IN PEXCEPTION_RECORD ExceptionRecord, 134 IN ULONG64 EstablisherFrame, 135 IN OUT PCONTEXT ContextRecord, 136 IN OUT PDISPATCHER_CONTEXT DispatcherContext) { 137 EXCEPTION_POINTERS ep; 138 LONG result; 139 140 ep.ExceptionRecord = ExceptionRecord; 141 ep.ContextRecord = ContextRecord; 142 143 result = topLevelExceptionFilter(&ep); 144 145 // We better only get a CONTINUE_EXECUTION from our handler 146 // since we don't have unwind information registered. 147 148 guarantee( result == EXCEPTION_CONTINUE_EXECUTION, 149 "Unexpected result from topLevelExceptionFilter"); 150 151 return(ExceptionContinueExecution); 152 } 153 154 155 // Structure containing the Windows Data Structures required 156 // to register our Code Cache exception handler. 157 // We put these in the CodeCache since the API requires 158 // all addresses in these structures are relative to the Code 159 // area registered with RtlAddFunctionTable. 160 typedef struct { 161 char ExceptionHandlerInstr[16]; // jmp HandleExceptionFromCodeCache 162 RUNTIME_FUNCTION rt; 163 UNWIND_INFO_EH_ONLY unw; 164 } DynamicCodeData, *pDynamicCodeData; 165 166 #endif // AMD64 167 // 168 // Register our CodeCache area with the OS so it will dispatch exceptions 169 // to our topLevelExceptionFilter when we take an exception in our 170 // dynamically generated code. 171 // 172 // Arguments: low and high are the address of the full reserved 173 // codeCache area 174 // 175 bool os::register_code_area(char *low, char *high) { 176 #ifdef AMD64 177 178 ResourceMark rm; 179 180 pDynamicCodeData pDCD; 181 PRUNTIME_FUNCTION prt; 182 PUNWIND_INFO_EH_ONLY punwind; 183 184 // If we are using Vectored Exceptions we don't need this registration 185 if (UseVectoredExceptions) return true; 186 187 BufferBlob* blob = BufferBlob::create("CodeCache Exception Handler", sizeof(DynamicCodeData)); 188 CodeBuffer cb(blob); 189 MacroAssembler* masm = new MacroAssembler(&cb); 190 pDCD = (pDynamicCodeData) masm->pc(); 191 192 masm->jump(ExternalAddress((address)&HandleExceptionFromCodeCache)); 193 masm->flush(); 194 195 // Create an Unwind Structure specifying no unwind info 196 // other than an Exception Handler 197 punwind = &pDCD->unw; 198 punwind->Version = 1; 199 punwind->Flags = UNW_FLAG_EHANDLER; 200 punwind->SizeOfProlog = 0; 201 punwind->CountOfCodes = 0; 202 punwind->FrameRegister = 0; 203 punwind->FrameOffset = 0; 204 punwind->ExceptionHandler = (char *)(&(pDCD->ExceptionHandlerInstr[0])) - 205 (char*)low; 206 punwind->ExceptionData[0] = 0; 207 208 // This structure describes the covered dynamic code area. 209 // Addresses are relative to the beginning on the code cache area 210 prt = &pDCD->rt; 211 prt->BeginAddress = 0; 212 prt->EndAddress = (ULONG)(high - low); 213 prt->UnwindData = ((char *)punwind - low); 214 215 guarantee(RtlAddFunctionTable(prt, 1, (ULONGLONG)low), 216 "Failed to register Dynamic Code Exception Handler with RtlAddFunctionTable"); 217 218 #endif // AMD64 219 return true; 220 } 221 222 void os::initialize_thread() { 223 // Nothing to do. 224 } 225 226 // Atomics and Stub Functions 227 228 typedef jint xchg_func_t (jint, volatile jint*); 229 typedef intptr_t xchg_ptr_func_t (intptr_t, volatile intptr_t*); 230 typedef jint cmpxchg_func_t (jint, volatile jint*, jint); 231 typedef jlong cmpxchg_long_func_t (jlong, volatile jlong*, jlong); 232 typedef jint add_func_t (jint, volatile jint*); 233 typedef intptr_t add_ptr_func_t (intptr_t, volatile intptr_t*); 234 235 #ifdef AMD64 236 237 jint os::atomic_xchg_bootstrap(jint exchange_value, volatile jint* dest) { 238 // try to use the stub: 239 xchg_func_t* func = CAST_TO_FN_PTR(xchg_func_t*, StubRoutines::atomic_xchg_entry()); 240 241 if (func != NULL) { 242 os::atomic_xchg_func = func; 243 return (*func)(exchange_value, dest); 244 } 245 assert(Threads::number_of_threads() == 0, "for bootstrap only"); 246 247 jint old_value = *dest; 248 *dest = exchange_value; 249 return old_value; 250 } 251 252 intptr_t os::atomic_xchg_ptr_bootstrap(intptr_t exchange_value, volatile intptr_t* dest) { 253 // try to use the stub: 254 xchg_ptr_func_t* func = CAST_TO_FN_PTR(xchg_ptr_func_t*, StubRoutines::atomic_xchg_ptr_entry()); 255 256 if (func != NULL) { 257 os::atomic_xchg_ptr_func = func; 258 return (*func)(exchange_value, dest); 259 } 260 assert(Threads::number_of_threads() == 0, "for bootstrap only"); 261 262 intptr_t old_value = *dest; 263 *dest = exchange_value; 264 return old_value; 265 } 266 267 268 jint os::atomic_cmpxchg_bootstrap(jint exchange_value, volatile jint* dest, jint compare_value) { 269 // try to use the stub: 270 cmpxchg_func_t* func = CAST_TO_FN_PTR(cmpxchg_func_t*, StubRoutines::atomic_cmpxchg_entry()); 271 272 if (func != NULL) { 273 os::atomic_cmpxchg_func = func; 274 return (*func)(exchange_value, dest, compare_value); 275 } 276 assert(Threads::number_of_threads() == 0, "for bootstrap only"); 277 278 jint old_value = *dest; 279 if (old_value == compare_value) 280 *dest = exchange_value; 281 return old_value; 282 } 283 #endif // AMD64 284 285 jlong os::atomic_cmpxchg_long_bootstrap(jlong exchange_value, volatile jlong* dest, jlong compare_value) { 286 // try to use the stub: 287 cmpxchg_long_func_t* func = CAST_TO_FN_PTR(cmpxchg_long_func_t*, StubRoutines::atomic_cmpxchg_long_entry()); 288 289 if (func != NULL) { 290 os::atomic_cmpxchg_long_func = func; 291 return (*func)(exchange_value, dest, compare_value); 292 } 293 assert(Threads::number_of_threads() == 0, "for bootstrap only"); 294 295 jlong old_value = *dest; 296 if (old_value == compare_value) 297 *dest = exchange_value; 298 return old_value; 299 } 300 301 #ifdef AMD64 302 303 jint os::atomic_add_bootstrap(jint add_value, volatile jint* dest) { 304 // try to use the stub: 305 add_func_t* func = CAST_TO_FN_PTR(add_func_t*, StubRoutines::atomic_add_entry()); 306 307 if (func != NULL) { 308 os::atomic_add_func = func; 309 return (*func)(add_value, dest); 310 } 311 assert(Threads::number_of_threads() == 0, "for bootstrap only"); 312 313 return (*dest) += add_value; 314 } 315 316 intptr_t os::atomic_add_ptr_bootstrap(intptr_t add_value, volatile intptr_t* dest) { 317 // try to use the stub: 318 add_ptr_func_t* func = CAST_TO_FN_PTR(add_ptr_func_t*, StubRoutines::atomic_add_ptr_entry()); 319 320 if (func != NULL) { 321 os::atomic_add_ptr_func = func; 322 return (*func)(add_value, dest); 323 } 324 assert(Threads::number_of_threads() == 0, "for bootstrap only"); 325 326 return (*dest) += add_value; 327 } 328 329 xchg_func_t* os::atomic_xchg_func = os::atomic_xchg_bootstrap; 330 xchg_ptr_func_t* os::atomic_xchg_ptr_func = os::atomic_xchg_ptr_bootstrap; 331 cmpxchg_func_t* os::atomic_cmpxchg_func = os::atomic_cmpxchg_bootstrap; 332 add_func_t* os::atomic_add_func = os::atomic_add_bootstrap; 333 add_ptr_func_t* os::atomic_add_ptr_func = os::atomic_add_ptr_bootstrap; 334 335 #endif // AMD64 336 337 cmpxchg_long_func_t* os::atomic_cmpxchg_long_func = os::atomic_cmpxchg_long_bootstrap; 338 339 ExtendedPC os::fetch_frame_from_context(void* ucVoid, 340 intptr_t** ret_sp, intptr_t** ret_fp) { 341 342 ExtendedPC epc; 343 CONTEXT* uc = (CONTEXT*)ucVoid; 344 345 if (uc != NULL) { 346 epc = ExtendedPC((address)uc->REG_PC); 347 if (ret_sp) *ret_sp = (intptr_t*)uc->REG_SP; 348 if (ret_fp) *ret_fp = (intptr_t*)uc->REG_FP; 349 } else { 350 // construct empty ExtendedPC for return value checking 351 epc = ExtendedPC(NULL); 352 if (ret_sp) *ret_sp = (intptr_t *)NULL; 353 if (ret_fp) *ret_fp = (intptr_t *)NULL; 354 } 355 356 return epc; 357 } 358 359 frame os::fetch_frame_from_context(void* ucVoid) { 360 intptr_t* sp; 361 intptr_t* fp; 362 ExtendedPC epc = fetch_frame_from_context(ucVoid, &sp, &fp); 363 return frame(sp, fp, epc.pc()); 364 } 365 366 // VC++ does not save frame pointer on stack in optimized build. It 367 // can be turned off by /Oy-. If we really want to walk C frames, 368 // we can use the StackWalk() API. 369 frame os::get_sender_for_C_frame(frame* fr) { 370 return frame(fr->sender_sp(), fr->link(), fr->sender_pc()); 371 } 372 373 #ifndef AMD64 374 // Returns an estimate of the current stack pointer. Result must be guaranteed 375 // to point into the calling threads stack, and be no lower than the current 376 // stack pointer. 377 address os::current_stack_pointer() { 378 int dummy; 379 address sp = (address)&dummy; 380 return sp; 381 } 382 #else 383 // Returns the current stack pointer. Accurate value needed for 384 // os::verify_stack_alignment(). 385 address os::current_stack_pointer() { 386 typedef address get_sp_func(); 387 get_sp_func* func = CAST_TO_FN_PTR(get_sp_func*, 388 StubRoutines::x86::get_previous_sp_entry()); 389 return (*func)(); 390 } 391 #endif 392 393 394 #ifndef AMD64 395 intptr_t* _get_previous_fp() { 396 intptr_t **frameptr; 397 __asm { 398 mov frameptr, ebp 399 }; 400 return *frameptr; 401 } 402 #endif // !AMD64 403 404 frame os::current_frame() { 405 406 #ifdef AMD64 407 // apparently _asm not supported on windows amd64 408 typedef intptr_t* get_fp_func (); 409 get_fp_func* func = CAST_TO_FN_PTR(get_fp_func*, 410 StubRoutines::x86::get_previous_fp_entry()); 411 if (func == NULL) return frame(NULL, NULL, NULL); 412 intptr_t* fp = (*func)(); 413 #else 414 intptr_t* fp = _get_previous_fp(); 415 #endif // AMD64 416 417 frame myframe((intptr_t*)os::current_stack_pointer(), 418 (intptr_t*)fp, 419 CAST_FROM_FN_PTR(address, os::current_frame)); 420 if (os::is_first_C_frame(&myframe)) { 421 // stack is not walkable 422 return frame(NULL, NULL, NULL); 423 } else { 424 return os::get_sender_for_C_frame(&myframe); 425 } 426 } 427 428 void os::print_context(outputStream *st, void *context) { 429 if (context == NULL) return; 430 431 CONTEXT* uc = (CONTEXT*)context; 432 433 st->print_cr("Registers:"); 434 #ifdef AMD64 435 st->print( "RAX=" INTPTR_FORMAT, uc->Rax); 436 st->print(", RBX=" INTPTR_FORMAT, uc->Rbx); 437 st->print(", RCX=" INTPTR_FORMAT, uc->Rcx); 438 st->print(", RDX=" INTPTR_FORMAT, uc->Rdx); 439 st->cr(); 440 st->print( "RSP=" INTPTR_FORMAT, uc->Rsp); 441 st->print(", RBP=" INTPTR_FORMAT, uc->Rbp); 442 st->print(", RSI=" INTPTR_FORMAT, uc->Rsi); 443 st->print(", RDI=" INTPTR_FORMAT, uc->Rdi); 444 st->cr(); 445 st->print( "R8 =" INTPTR_FORMAT, uc->R8); 446 st->print(", R9 =" INTPTR_FORMAT, uc->R9); 447 st->print(", R10=" INTPTR_FORMAT, uc->R10); 448 st->print(", R11=" INTPTR_FORMAT, uc->R11); 449 st->cr(); 450 st->print( "R12=" INTPTR_FORMAT, uc->R12); 451 st->print(", R13=" INTPTR_FORMAT, uc->R13); 452 st->print(", R14=" INTPTR_FORMAT, uc->R14); 453 st->print(", R15=" INTPTR_FORMAT, uc->R15); 454 st->cr(); 455 st->print( "RIP=" INTPTR_FORMAT, uc->Rip); 456 st->print(", EFLAGS=" INTPTR_FORMAT, uc->EFlags); 457 #else 458 st->print( "EAX=" INTPTR_FORMAT, uc->Eax); 459 st->print(", EBX=" INTPTR_FORMAT, uc->Ebx); 460 st->print(", ECX=" INTPTR_FORMAT, uc->Ecx); 461 st->print(", EDX=" INTPTR_FORMAT, uc->Edx); 462 st->cr(); 463 st->print( "ESP=" INTPTR_FORMAT, uc->Esp); 464 st->print(", EBP=" INTPTR_FORMAT, uc->Ebp); 465 st->print(", ESI=" INTPTR_FORMAT, uc->Esi); 466 st->print(", EDI=" INTPTR_FORMAT, uc->Edi); 467 st->cr(); 468 st->print( "EIP=" INTPTR_FORMAT, uc->Eip); 469 st->print(", EFLAGS=" INTPTR_FORMAT, uc->EFlags); 470 #endif // AMD64 471 st->cr(); 472 st->cr(); 473 474 intptr_t *sp = (intptr_t *)uc->REG_SP; 475 st->print_cr("Top of Stack: (sp=" PTR_FORMAT ")", sp); 476 print_hex_dump(st, (address)sp, (address)(sp + 32), sizeof(intptr_t)); 477 st->cr(); 478 479 // Note: it may be unsafe to inspect memory near pc. For example, pc may 480 // point to garbage if entry point in an nmethod is corrupted. Leave 481 // this at the end, and hope for the best. 482 address pc = (address)uc->REG_PC; 483 st->print_cr("Instructions: (pc=" PTR_FORMAT ")", pc); 484 print_hex_dump(st, pc - 32, pc + 32, sizeof(char)); 485 st->cr(); 486 } 487 488 489 void os::print_register_info(outputStream *st, void *context) { 490 if (context == NULL) return; 491 492 CONTEXT* uc = (CONTEXT*)context; 493 494 st->print_cr("Register to memory mapping:"); 495 st->cr(); 496 497 // this is only for the "general purpose" registers 498 499 #ifdef AMD64 500 st->print("RAX="); print_location(st, uc->Rax); 501 st->print("RBX="); print_location(st, uc->Rbx); 502 st->print("RCX="); print_location(st, uc->Rcx); 503 st->print("RDX="); print_location(st, uc->Rdx); 504 st->print("RSP="); print_location(st, uc->Rsp); 505 st->print("RBP="); print_location(st, uc->Rbp); 506 st->print("RSI="); print_location(st, uc->Rsi); 507 st->print("RDI="); print_location(st, uc->Rdi); 508 st->print("R8 ="); print_location(st, uc->R8); 509 st->print("R9 ="); print_location(st, uc->R9); 510 st->print("R10="); print_location(st, uc->R10); 511 st->print("R11="); print_location(st, uc->R11); 512 st->print("R12="); print_location(st, uc->R12); 513 st->print("R13="); print_location(st, uc->R13); 514 st->print("R14="); print_location(st, uc->R14); 515 st->print("R15="); print_location(st, uc->R15); 516 #else 517 st->print("EAX="); print_location(st, uc->Eax); 518 st->print("EBX="); print_location(st, uc->Ebx); 519 st->print("ECX="); print_location(st, uc->Ecx); 520 st->print("EDX="); print_location(st, uc->Edx); 521 st->print("ESP="); print_location(st, uc->Esp); 522 st->print("EBP="); print_location(st, uc->Ebp); 523 st->print("ESI="); print_location(st, uc->Esi); 524 st->print("EDI="); print_location(st, uc->Edi); 525 #endif 526 527 st->cr(); 528 } 529 530 extern "C" int SafeFetch32 (int * adr, int Err) { 531 int rv = Err ; 532 _try { 533 rv = *((volatile int *) adr) ; 534 } __except(EXCEPTION_EXECUTE_HANDLER) { 535 } 536 return rv ; 537 } 538 539 extern "C" intptr_t SafeFetchN (intptr_t * adr, intptr_t Err) { 540 intptr_t rv = Err ; 541 _try { 542 rv = *((volatile intptr_t *) adr) ; 543 } __except(EXCEPTION_EXECUTE_HANDLER) { 544 } 545 return rv ; 546 } 547 548 extern "C" int SpinPause () { 549 #ifdef AMD64 550 return 0 ; 551 #else 552 // pause == rep:nop 553 // On systems that don't support pause a rep:nop 554 // is executed as a nop. The rep: prefix is ignored. 555 _asm { 556 pause ; 557 }; 558 return 1 ; 559 #endif // AMD64 560 } 561 562 563 void os::setup_fpu() { 564 #ifndef AMD64 565 int fpu_cntrl_word = StubRoutines::fpu_cntrl_wrd_std(); 566 __asm fldcw fpu_cntrl_word; 567 #endif // !AMD64 568 } 569 570 #ifndef PRODUCT 571 void os::verify_stack_alignment() { 572 #ifdef AMD64 573 assert(((intptr_t)os::current_stack_pointer() & (StackAlignmentInBytes-1)) == 0, "incorrect stack alignment"); 574 #endif 575 } 576 #endif