1 /*
   2  * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 // no precompiled headers
  26 #include "jvm.h"
  27 #include "asm/macroAssembler.hpp"
  28 #include "classfile/classLoader.hpp"
  29 #include "classfile/systemDictionary.hpp"
  30 #include "classfile/vmSymbols.hpp"
  31 #include "code/icBuffer.hpp"
  32 #include "code/vtableStubs.hpp"
  33 #include "interpreter/interpreter.hpp"
  34 #include "memory/allocation.inline.hpp"
  35 #include "memory/resourceArea.hpp"
  36 #include "nativeInst_x86.hpp"
  37 #include "os_share_windows.hpp"
  38 #include "prims/jniFastGetField.hpp"
  39 #include "prims/jvm_misc.hpp"
  40 #include "runtime/arguments.hpp"
  41 #include "runtime/extendedPC.hpp"
  42 #include "runtime/frame.inline.hpp"
  43 #include "runtime/interfaceSupport.inline.hpp"
  44 #include "runtime/java.hpp"
  45 #include "runtime/javaCalls.hpp"
  46 #include "runtime/mutexLocker.hpp"
  47 #include "runtime/osThread.hpp"
  48 #include "runtime/sharedRuntime.hpp"
  49 #include "runtime/stubRoutines.hpp"
  50 #include "runtime/thread.inline.hpp"
  51 #include "runtime/timer.hpp"
  52 #include "symbolengine.hpp"
  53 #include "unwind_windows_x86.hpp"
  54 #include "utilities/events.hpp"
  55 #include "utilities/vmError.hpp"
  56 #include "windbghelp.hpp"
  57 
  58 
  59 #undef REG_SP
  60 #undef REG_FP
  61 #undef REG_PC
  62 #ifdef AMD64
  63 #define REG_SP Rsp
  64 #define REG_FP Rbp
  65 #define REG_PC Rip
  66 #else
  67 #define REG_SP Esp
  68 #define REG_FP Ebp
  69 #define REG_PC Eip
  70 #endif // AMD64
  71 
  72 extern LONG WINAPI topLevelExceptionFilter(_EXCEPTION_POINTERS* );
  73 
  74 // Install a win32 structured exception handler around thread.
  75 void os::os_exception_wrapper(java_call_t f, JavaValue* value, const methodHandle& method, JavaCallArguments* args, Thread* thread) {
  76   __try {
  77 
  78 #ifndef AMD64
  79     // We store the current thread in this wrapperthread location
  80     // and determine how far away this address is from the structured
  81     // execption pointer that FS:[0] points to.  This get_thread
  82     // code can then get the thread pointer via FS.
  83     //
  84     // Warning:  This routine must NEVER be inlined since we'd end up with
  85     //           multiple offsets.
  86     //
  87     volatile Thread* wrapperthread = thread;
  88 
  89     if (os::win32::get_thread_ptr_offset() == 0) {
  90       int thread_ptr_offset;
  91       __asm {
  92         lea eax, dword ptr wrapperthread;
  93         sub eax, dword ptr FS:[0H];
  94         mov thread_ptr_offset, eax
  95       };
  96       os::win32::set_thread_ptr_offset(thread_ptr_offset);
  97     }
  98 #ifdef ASSERT
  99     // Verify that the offset hasn't changed since we initally captured
 100     // it. This might happen if we accidentally ended up with an
 101     // inlined version of this routine.
 102     else {
 103       int test_thread_ptr_offset;
 104       __asm {
 105         lea eax, dword ptr wrapperthread;
 106         sub eax, dword ptr FS:[0H];
 107         mov test_thread_ptr_offset, eax
 108       };
 109       assert(test_thread_ptr_offset == os::win32::get_thread_ptr_offset(),
 110              "thread pointer offset from SEH changed");
 111     }
 112 #endif // ASSERT
 113 #endif // !AMD64
 114 
 115     f(value, method, args, thread);
 116   } __except(topLevelExceptionFilter((_EXCEPTION_POINTERS*)_exception_info())) {
 117       // Nothing to do.
 118   }
 119 }
 120 
 121 #ifdef AMD64
 122 
 123 // This is the language specific handler for exceptions
 124 // originating from dynamically generated code.
 125 // We call the standard structured exception handler
 126 // We only expect Continued Execution since we cannot unwind
 127 // from generated code.
 128 LONG HandleExceptionFromCodeCache(
 129   IN PEXCEPTION_RECORD ExceptionRecord,
 130   IN ULONG64 EstablisherFrame,
 131   IN OUT PCONTEXT ContextRecord,
 132   IN OUT PDISPATCHER_CONTEXT DispatcherContext) {
 133   EXCEPTION_POINTERS ep;
 134   LONG result;
 135 
 136   ep.ExceptionRecord = ExceptionRecord;
 137   ep.ContextRecord = ContextRecord;
 138 
 139   result = topLevelExceptionFilter(&ep);
 140 
 141   // We better only get a CONTINUE_EXECUTION from our handler
 142   // since we don't have unwind information registered.
 143 
 144   guarantee( result == EXCEPTION_CONTINUE_EXECUTION,
 145              "Unexpected result from topLevelExceptionFilter");
 146 
 147   return(ExceptionContinueExecution);
 148 }
 149 
 150 
 151 // Structure containing the Windows Data Structures required
 152 // to register our Code Cache exception handler.
 153 // We put these in the CodeCache since the API requires
 154 // all addresses in these structures are relative to the Code
 155 // area registered with RtlAddFunctionTable.
 156 typedef struct {
 157   char ExceptionHandlerInstr[16];  // jmp HandleExceptionFromCodeCache
 158   RUNTIME_FUNCTION rt;
 159   UNWIND_INFO_EH_ONLY unw;
 160 } DynamicCodeData, *pDynamicCodeData;
 161 
 162 #endif // AMD64
 163 //
 164 // Register our CodeCache area with the OS so it will dispatch exceptions
 165 // to our topLevelExceptionFilter when we take an exception in our
 166 // dynamically generated code.
 167 //
 168 // Arguments:  low and high are the address of the full reserved
 169 // codeCache area
 170 //
 171 bool os::register_code_area(char *low, char *high) {
 172 #ifdef AMD64
 173 
 174   ResourceMark rm;
 175 
 176   pDynamicCodeData pDCD;
 177   PRUNTIME_FUNCTION prt;
 178   PUNWIND_INFO_EH_ONLY punwind;
 179 
 180   BufferBlob* blob = BufferBlob::create("CodeCache Exception Handler", sizeof(DynamicCodeData));
 181   CodeBuffer cb(blob);
 182   MacroAssembler* masm = new MacroAssembler(&cb);
 183   pDCD = (pDynamicCodeData) masm->pc();
 184 
 185   masm->jump(ExternalAddress((address)&HandleExceptionFromCodeCache));
 186   masm->flush();
 187 
 188   // Create an Unwind Structure specifying no unwind info
 189   // other than an Exception Handler
 190   punwind = &pDCD->unw;
 191   punwind->Version = 1;
 192   punwind->Flags = UNW_FLAG_EHANDLER;
 193   punwind->SizeOfProlog = 0;
 194   punwind->CountOfCodes = 0;
 195   punwind->FrameRegister = 0;
 196   punwind->FrameOffset = 0;
 197   punwind->ExceptionHandler = (char *)(&(pDCD->ExceptionHandlerInstr[0])) -
 198                               (char*)low;
 199   punwind->ExceptionData[0] = 0;
 200 
 201   // This structure describes the covered dynamic code area.
 202   // Addresses are relative to the beginning on the code cache area
 203   prt = &pDCD->rt;
 204   prt->BeginAddress = 0;
 205   prt->EndAddress = (ULONG)(high - low);
 206   prt->UnwindData = ((char *)punwind - low);
 207 
 208   guarantee(RtlAddFunctionTable(prt, 1, (ULONGLONG)low),
 209             "Failed to register Dynamic Code Exception Handler with RtlAddFunctionTable");
 210 
 211 #endif // AMD64
 212   return true;
 213 }
 214 
 215 void os::initialize_thread(Thread* thr) {
 216 // Nothing to do.
 217 }
 218 
 219 // Atomics and Stub Functions
 220 
 221 typedef int32_t   xchg_func_t            (int32_t,  volatile int32_t*);
 222 typedef int64_t   xchg_long_func_t       (int64_t,  volatile int64_t*);
 223 typedef int32_t   cmpxchg_func_t         (int32_t,  volatile int32_t*, int32_t);
 224 typedef int8_t    cmpxchg_byte_func_t    (int8_t,   volatile int8_t*,  int8_t);
 225 typedef int64_t   cmpxchg_long_func_t    (int64_t,  volatile int64_t*, int64_t);
 226 typedef int32_t   add_func_t             (int32_t,  volatile int32_t*);
 227 typedef int64_t   add_long_func_t        (int64_t,  volatile int64_t*);
 228 
 229 #ifdef AMD64
 230 
 231 int32_t os::atomic_xchg_bootstrap(int32_t exchange_value, volatile int32_t* dest) {
 232   // try to use the stub:
 233   xchg_func_t* func = CAST_TO_FN_PTR(xchg_func_t*, StubRoutines::atomic_xchg_entry());
 234 
 235   if (func != NULL) {
 236     os::atomic_xchg_func = func;
 237     return (*func)(exchange_value, dest);
 238   }
 239   assert(Threads::number_of_threads() == 0, "for bootstrap only");
 240 
 241   int32_t old_value = *dest;
 242   *dest = exchange_value;
 243   return old_value;
 244 }
 245 
 246 int64_t os::atomic_xchg_long_bootstrap(int64_t exchange_value, volatile int64_t* dest) {
 247   // try to use the stub:
 248   xchg_long_func_t* func = CAST_TO_FN_PTR(xchg_long_func_t*, StubRoutines::atomic_xchg_long_entry());
 249 
 250   if (func != NULL) {
 251     os::atomic_xchg_long_func = func;
 252     return (*func)(exchange_value, dest);
 253   }
 254   assert(Threads::number_of_threads() == 0, "for bootstrap only");
 255 
 256   int64_t old_value = *dest;
 257   *dest = exchange_value;
 258   return old_value;
 259 }
 260 
 261 
 262 int32_t os::atomic_cmpxchg_bootstrap(int32_t exchange_value, volatile int32_t* dest, int32_t compare_value) {
 263   // try to use the stub:
 264   cmpxchg_func_t* func = CAST_TO_FN_PTR(cmpxchg_func_t*, StubRoutines::atomic_cmpxchg_entry());
 265 
 266   if (func != NULL) {
 267     os::atomic_cmpxchg_func = func;
 268     return (*func)(exchange_value, dest, compare_value);
 269   }
 270   assert(Threads::number_of_threads() == 0, "for bootstrap only");
 271 
 272   int32_t old_value = *dest;
 273   if (old_value == compare_value)
 274     *dest = exchange_value;
 275   return old_value;
 276 }
 277 
 278 int8_t os::atomic_cmpxchg_byte_bootstrap(int8_t exchange_value, volatile int8_t* dest, int8_t compare_value) {
 279   // try to use the stub:
 280   cmpxchg_byte_func_t* func = CAST_TO_FN_PTR(cmpxchg_byte_func_t*, StubRoutines::atomic_cmpxchg_byte_entry());
 281 
 282   if (func != NULL) {
 283     os::atomic_cmpxchg_byte_func = func;
 284     return (*func)(exchange_value, dest, compare_value);
 285   }
 286   assert(Threads::number_of_threads() == 0, "for bootstrap only");
 287 
 288   int8_t old_value = *dest;
 289   if (old_value == compare_value)
 290     *dest = exchange_value;
 291   return old_value;
 292 }
 293 
 294 #endif // AMD64
 295 
 296 int64_t os::atomic_cmpxchg_long_bootstrap(int64_t exchange_value, volatile int64_t* dest, int64_t compare_value) {
 297   // try to use the stub:
 298   cmpxchg_long_func_t* func = CAST_TO_FN_PTR(cmpxchg_long_func_t*, StubRoutines::atomic_cmpxchg_long_entry());
 299 
 300   if (func != NULL) {
 301     os::atomic_cmpxchg_long_func = func;
 302     return (*func)(exchange_value, dest, compare_value);
 303   }
 304   assert(Threads::number_of_threads() == 0, "for bootstrap only");
 305 
 306   int64_t old_value = *dest;
 307   if (old_value == compare_value)
 308     *dest = exchange_value;
 309   return old_value;
 310 }
 311 
 312 #ifdef AMD64
 313 
 314 int32_t os::atomic_add_bootstrap(int32_t add_value, volatile int32_t* dest) {
 315   // try to use the stub:
 316   add_func_t* func = CAST_TO_FN_PTR(add_func_t*, StubRoutines::atomic_add_entry());
 317 
 318   if (func != NULL) {
 319     os::atomic_add_func = func;
 320     return (*func)(add_value, dest);
 321   }
 322   assert(Threads::number_of_threads() == 0, "for bootstrap only");
 323 
 324   return (*dest) += add_value;
 325 }
 326 
 327 int64_t os::atomic_add_long_bootstrap(int64_t add_value, volatile int64_t* dest) {
 328   // try to use the stub:
 329   add_long_func_t* func = CAST_TO_FN_PTR(add_long_func_t*, StubRoutines::atomic_add_long_entry());
 330 
 331   if (func != NULL) {
 332     os::atomic_add_long_func = func;
 333     return (*func)(add_value, dest);
 334   }
 335   assert(Threads::number_of_threads() == 0, "for bootstrap only");
 336 
 337   return (*dest) += add_value;
 338 }
 339 
 340 xchg_func_t*         os::atomic_xchg_func         = os::atomic_xchg_bootstrap;
 341 xchg_long_func_t*    os::atomic_xchg_long_func    = os::atomic_xchg_long_bootstrap;
 342 cmpxchg_func_t*      os::atomic_cmpxchg_func      = os::atomic_cmpxchg_bootstrap;
 343 cmpxchg_byte_func_t* os::atomic_cmpxchg_byte_func = os::atomic_cmpxchg_byte_bootstrap;
 344 add_func_t*          os::atomic_add_func          = os::atomic_add_bootstrap;
 345 add_long_func_t*     os::atomic_add_long_func     = os::atomic_add_long_bootstrap;
 346 
 347 #endif // AMD64
 348 
 349 cmpxchg_long_func_t* os::atomic_cmpxchg_long_func = os::atomic_cmpxchg_long_bootstrap;
 350 
 351 #ifdef AMD64
 352 /*
 353  * Windows/x64 does not use stack frames the way expected by Java:
 354  * [1] in most cases, there is no frame pointer. All locals are addressed via RSP
 355  * [2] in rare cases, when alloca() is used, a frame pointer is used, but this may
 356  *     not be RBP.
 357  * See http://msdn.microsoft.com/en-us/library/ew5tede7.aspx
 358  *
 359  * So it's not possible to print the native stack using the
 360  *     while (...) {...  fr = os::get_sender_for_C_frame(&fr); }
 361  * loop in vmError.cpp. We need to roll our own loop.
 362  */
 363 bool os::platform_print_native_stack(outputStream* st, const void* context,
 364                                      char *buf, int buf_size)
 365 {
 366   CONTEXT ctx;
 367   if (context != NULL) {
 368     memcpy(&ctx, context, sizeof(ctx));
 369   } else {
 370     RtlCaptureContext(&ctx);
 371   }
 372 
 373   st->print_cr("Native frames: (J=compiled Java code, j=interpreted, Vv=VM code, C=native code)");
 374 
 375   STACKFRAME stk;
 376   memset(&stk, 0, sizeof(stk));
 377   stk.AddrStack.Offset    = ctx.Rsp;
 378   stk.AddrStack.Mode      = AddrModeFlat;
 379   stk.AddrFrame.Offset    = ctx.Rbp;
 380   stk.AddrFrame.Mode      = AddrModeFlat;
 381   stk.AddrPC.Offset       = ctx.Rip;
 382   stk.AddrPC.Mode         = AddrModeFlat;
 383 
 384   int count = 0;
 385   address lastpc = 0;
 386   while (count++ < StackPrintLimit) {
 387     intptr_t* sp = (intptr_t*)stk.AddrStack.Offset;
 388     intptr_t* fp = (intptr_t*)stk.AddrFrame.Offset; // NOT necessarily the same as ctx.Rbp!
 389     address pc = (address)stk.AddrPC.Offset;
 390 
 391     if (pc != NULL) {
 392       if (count == 2 && lastpc == pc) {
 393         // Skip it -- StackWalk64() may return the same PC
 394         // (but different SP) on the first try.
 395       } else {
 396         // Don't try to create a frame(sp, fp, pc) -- on WinX64, stk.AddrFrame
 397         // may not contain what Java expects, and may cause the frame() constructor
 398         // to crash. Let's just print out the symbolic address.
 399         frame::print_C_frame(st, buf, buf_size, pc);
 400         // print source file and line, if available
 401         char buf[128];
 402         int line_no;
 403         if (SymbolEngine::get_source_info(pc, buf, sizeof(buf), &line_no)) {
 404           st->print("  (%s:%d)", buf, line_no);
 405         }
 406         st->cr();
 407       }
 408       lastpc = pc;
 409     }
 410 
 411     PVOID p = WindowsDbgHelp::symFunctionTableAccess64(GetCurrentProcess(), stk.AddrPC.Offset);
 412     if (!p) {
 413       // StackWalk64() can't handle this PC. Calling StackWalk64 again may cause crash.
 414       break;
 415     }
 416 
 417     BOOL result = WindowsDbgHelp::stackWalk64(
 418         IMAGE_FILE_MACHINE_AMD64,  // __in      DWORD MachineType,
 419         GetCurrentProcess(),       // __in      HANDLE hProcess,
 420         GetCurrentThread(),        // __in      HANDLE hThread,
 421         &stk,                      // __inout   LP STACKFRAME64 StackFrame,
 422         &ctx);                     // __inout   PVOID ContextRecord,
 423 
 424     if (!result) {
 425       break;
 426     }
 427   }
 428   if (count > StackPrintLimit) {
 429     st->print_cr("...<more frames>...");
 430   }
 431   st->cr();
 432 
 433   return true;
 434 }
 435 #endif // AMD64
 436 
 437 ExtendedPC os::fetch_frame_from_context(const void* ucVoid,
 438                     intptr_t** ret_sp, intptr_t** ret_fp) {
 439 
 440   ExtendedPC  epc;
 441   CONTEXT* uc = (CONTEXT*)ucVoid;
 442 
 443   if (uc != NULL) {
 444     epc = ExtendedPC((address)uc->REG_PC);
 445     if (ret_sp) *ret_sp = (intptr_t*)uc->REG_SP;
 446     if (ret_fp) *ret_fp = (intptr_t*)uc->REG_FP;
 447   } else {
 448     // construct empty ExtendedPC for return value checking
 449     epc = ExtendedPC(NULL);
 450     if (ret_sp) *ret_sp = (intptr_t *)NULL;
 451     if (ret_fp) *ret_fp = (intptr_t *)NULL;
 452   }
 453 
 454   return epc;
 455 }
 456 
 457 frame os::fetch_frame_from_context(const void* ucVoid) {
 458   intptr_t* sp;
 459   intptr_t* fp;
 460   ExtendedPC epc = fetch_frame_from_context(ucVoid, &sp, &fp);
 461   return frame(sp, fp, epc.pc());
 462 }
 463 
 464 // VC++ does not save frame pointer on stack in optimized build. It
 465 // can be turned off by /Oy-. If we really want to walk C frames,
 466 // we can use the StackWalk() API.
 467 frame os::get_sender_for_C_frame(frame* fr) {
 468   return frame(fr->sender_sp(), fr->link(), fr->sender_pc());
 469 }
 470 
 471 #ifndef AMD64
 472 // Returns an estimate of the current stack pointer. Result must be guaranteed
 473 // to point into the calling threads stack, and be no lower than the current
 474 // stack pointer.
 475 address os::current_stack_pointer() {
 476   int dummy;
 477   address sp = (address)&dummy;
 478   return sp;
 479 }
 480 #else
 481 // Returns the current stack pointer. Accurate value needed for
 482 // os::verify_stack_alignment().
 483 address os::current_stack_pointer() {
 484   typedef address get_sp_func();
 485   get_sp_func* func = CAST_TO_FN_PTR(get_sp_func*,
 486                                      StubRoutines::x86::get_previous_sp_entry());
 487   return (*func)();
 488 }
 489 #endif
 490 
 491 
 492 #ifndef AMD64
 493 intptr_t* _get_previous_fp() {
 494   intptr_t **frameptr;
 495   __asm {
 496     mov frameptr, ebp
 497   };
 498   // ebp (frameptr) is for this frame (_get_previous_fp). We want the ebp for the
 499   // caller of os::current_frame*(), so go up two frames. However, for
 500   // optimized builds, _get_previous_fp() will be inlined, so only go
 501   // up 1 frame in that case.
 502 #ifdef _NMT_NOINLINE_
 503   return **(intptr_t***)frameptr;
 504 #else
 505   return *frameptr;
 506 #endif
 507 }
 508 #endif // !AMD64
 509 
 510 frame os::current_frame() {
 511 
 512 #ifdef AMD64
 513   // apparently _asm not supported on windows amd64
 514   typedef intptr_t*      get_fp_func           ();
 515   get_fp_func* func = CAST_TO_FN_PTR(get_fp_func*,
 516                                      StubRoutines::x86::get_previous_fp_entry());
 517   if (func == NULL) return frame();
 518   intptr_t* fp = (*func)();
 519   if (fp == NULL) {
 520     return frame();
 521   }
 522 #else
 523   intptr_t* fp = _get_previous_fp();
 524 #endif // AMD64
 525 
 526   frame myframe((intptr_t*)os::current_stack_pointer(),
 527                 (intptr_t*)fp,
 528                 CAST_FROM_FN_PTR(address, os::current_frame));
 529   if (os::is_first_C_frame(&myframe)) {
 530     // stack is not walkable
 531     return frame();
 532   } else {
 533     return os::get_sender_for_C_frame(&myframe);
 534   }
 535 }
 536 
 537 void os::print_context(outputStream *st, const void *context) {
 538   if (context == NULL) return;
 539 
 540   const CONTEXT* uc = (const CONTEXT*)context;
 541 
 542   st->print_cr("Registers:");
 543 #ifdef AMD64
 544   st->print(  "RAX=" INTPTR_FORMAT, uc->Rax);
 545   st->print(", RBX=" INTPTR_FORMAT, uc->Rbx);
 546   st->print(", RCX=" INTPTR_FORMAT, uc->Rcx);
 547   st->print(", RDX=" INTPTR_FORMAT, uc->Rdx);
 548   st->cr();
 549   st->print(  "RSP=" INTPTR_FORMAT, uc->Rsp);
 550   st->print(", RBP=" INTPTR_FORMAT, uc->Rbp);
 551   st->print(", RSI=" INTPTR_FORMAT, uc->Rsi);
 552   st->print(", RDI=" INTPTR_FORMAT, uc->Rdi);
 553   st->cr();
 554   st->print(  "R8 =" INTPTR_FORMAT, uc->R8);
 555   st->print(", R9 =" INTPTR_FORMAT, uc->R9);
 556   st->print(", R10=" INTPTR_FORMAT, uc->R10);
 557   st->print(", R11=" INTPTR_FORMAT, uc->R11);
 558   st->cr();
 559   st->print(  "R12=" INTPTR_FORMAT, uc->R12);
 560   st->print(", R13=" INTPTR_FORMAT, uc->R13);
 561   st->print(", R14=" INTPTR_FORMAT, uc->R14);
 562   st->print(", R15=" INTPTR_FORMAT, uc->R15);
 563   st->cr();
 564   st->print(  "RIP=" INTPTR_FORMAT, uc->Rip);
 565   st->print(", EFLAGS=" INTPTR_FORMAT, uc->EFlags);
 566 #else
 567   st->print(  "EAX=" INTPTR_FORMAT, uc->Eax);
 568   st->print(", EBX=" INTPTR_FORMAT, uc->Ebx);
 569   st->print(", ECX=" INTPTR_FORMAT, uc->Ecx);
 570   st->print(", EDX=" INTPTR_FORMAT, uc->Edx);
 571   st->cr();
 572   st->print(  "ESP=" INTPTR_FORMAT, uc->Esp);
 573   st->print(", EBP=" INTPTR_FORMAT, uc->Ebp);
 574   st->print(", ESI=" INTPTR_FORMAT, uc->Esi);
 575   st->print(", EDI=" INTPTR_FORMAT, uc->Edi);
 576   st->cr();
 577   st->print(  "EIP=" INTPTR_FORMAT, uc->Eip);
 578   st->print(", EFLAGS=" INTPTR_FORMAT, uc->EFlags);
 579 #endif // AMD64
 580   st->cr();
 581   st->cr();
 582 
 583   intptr_t *sp = (intptr_t *)uc->REG_SP;
 584   st->print_cr("Top of Stack: (sp=" PTR_FORMAT ")", sp);
 585   print_hex_dump(st, (address)sp, (address)(sp + 32), sizeof(intptr_t));
 586   st->cr();
 587 
 588   // Note: it may be unsafe to inspect memory near pc. For example, pc may
 589   // point to garbage if entry point in an nmethod is corrupted. Leave
 590   // this at the end, and hope for the best.
 591   address pc = (address)uc->REG_PC;
 592   st->print_cr("Instructions: (pc=" PTR_FORMAT ")", pc);
 593   print_hex_dump(st, pc - 32, pc + 32, sizeof(char));
 594   st->cr();
 595 }
 596 
 597 
 598 void os::print_register_info(outputStream *st, const void *context) {
 599   if (context == NULL) return;
 600 
 601   const CONTEXT* uc = (const CONTEXT*)context;
 602 
 603   st->print_cr("Register to memory mapping:");
 604   st->cr();
 605 
 606   // this is only for the "general purpose" registers
 607 
 608 #ifdef AMD64
 609   st->print("RIP="); print_location(st, uc->Rip);
 610   st->print("RAX="); print_location(st, uc->Rax);
 611   st->print("RBX="); print_location(st, uc->Rbx);
 612   st->print("RCX="); print_location(st, uc->Rcx);
 613   st->print("RDX="); print_location(st, uc->Rdx);
 614   st->print("RSP="); print_location(st, uc->Rsp);
 615   st->print("RBP="); print_location(st, uc->Rbp);
 616   st->print("RSI="); print_location(st, uc->Rsi);
 617   st->print("RDI="); print_location(st, uc->Rdi);
 618   st->print("R8 ="); print_location(st, uc->R8);
 619   st->print("R9 ="); print_location(st, uc->R9);
 620   st->print("R10="); print_location(st, uc->R10);
 621   st->print("R11="); print_location(st, uc->R11);
 622   st->print("R12="); print_location(st, uc->R12);
 623   st->print("R13="); print_location(st, uc->R13);
 624   st->print("R14="); print_location(st, uc->R14);
 625   st->print("R15="); print_location(st, uc->R15);
 626 #else
 627   st->print("EIP="); print_location(st, uc->Eip);
 628   st->print("EAX="); print_location(st, uc->Eax);
 629   st->print("EBX="); print_location(st, uc->Ebx);
 630   st->print("ECX="); print_location(st, uc->Ecx);
 631   st->print("EDX="); print_location(st, uc->Edx);
 632   st->print("ESP="); print_location(st, uc->Esp);
 633   st->print("EBP="); print_location(st, uc->Ebp);
 634   st->print("ESI="); print_location(st, uc->Esi);
 635   st->print("EDI="); print_location(st, uc->Edi);
 636 #endif
 637 
 638   st->cr();
 639 }
 640 
 641 extern "C" int SpinPause () {
 642 #ifdef AMD64
 643    return 0 ;
 644 #else
 645    // pause == rep:nop
 646    // On systems that don't support pause a rep:nop
 647    // is executed as a nop.  The rep: prefix is ignored.
 648    _asm {
 649       pause ;
 650    };
 651    return 1 ;
 652 #endif // AMD64
 653 }
 654 
 655 
 656 void os::setup_fpu() {
 657 #ifndef AMD64
 658   int fpu_cntrl_word = StubRoutines::fpu_cntrl_wrd_std();
 659   __asm fldcw fpu_cntrl_word;
 660 #endif // !AMD64
 661 }
 662 
 663 #ifndef PRODUCT
 664 void os::verify_stack_alignment() {
 665 #ifdef AMD64
 666   // The current_stack_pointer() calls generated get_previous_sp stub routine.
 667   // Only enable the assert after the routine becomes available.
 668   if (StubRoutines::code1() != NULL) {
 669     assert(((intptr_t)os::current_stack_pointer() & (StackAlignmentInBytes-1)) == 0, "incorrect stack alignment");
 670   }
 671 #endif
 672 }
 673 #endif
 674 
 675 int os::extra_bang_size_in_bytes() {
 676   // JDK-8050147 requires the full cache line bang for x86.
 677   return VM_Version::L1_line_size();
 678 }