1 /*
  2  * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 // no precompiled headers
 26 #include "jvm.h"
 27 #include "asm/macroAssembler.hpp"
 28 #include "classfile/classLoader.hpp"
 29 #include "classfile/systemDictionary.hpp"
 30 #include "classfile/vmSymbols.hpp"
 31 #include "code/icBuffer.hpp"
 32 #include "code/vtableStubs.hpp"
 33 #include "interpreter/interpreter.hpp"
 34 #include "memory/allocation.inline.hpp"
 35 #include "memory/resourceArea.hpp"
 36 #include "nativeInst_x86.hpp"
 37 #include "os_share_windows.hpp"
 38 #include "prims/jniFastGetField.hpp"
 39 #include "prims/jvm_misc.hpp"
 40 #include "runtime/arguments.hpp"
 41 #include "runtime/extendedPC.hpp"
 42 #include "runtime/frame.inline.hpp"
 43 #include "runtime/interfaceSupport.inline.hpp"
 44 #include "runtime/java.hpp"
 45 #include "runtime/javaCalls.hpp"
 46 #include "runtime/mutexLocker.hpp"
 47 #include "runtime/osThread.hpp"
 48 #include "runtime/sharedRuntime.hpp"
 49 #include "runtime/stubRoutines.hpp"
 50 #include "runtime/thread.inline.hpp"
 51 #include "runtime/timer.hpp"
 52 #include "symbolengine.hpp"
 53 #include "unwind_windows_x86.hpp"
 54 #include "utilities/events.hpp"
 55 #include "utilities/vmError.hpp"
 56 #include "windbghelp.hpp"
 57 
 58 
 59 #undef REG_SP
 60 #undef REG_FP
 61 #undef REG_PC
 62 #ifdef AMD64
 63 #define REG_SP Rsp
 64 #define REG_FP Rbp
 65 #define REG_PC Rip
 66 #else
 67 #define REG_SP Esp
 68 #define REG_FP Ebp
 69 #define REG_PC Eip
 70 #endif // AMD64
 71 
 72 extern LONG WINAPI topLevelExceptionFilter(_EXCEPTION_POINTERS* );
 73 
 74 // Install a win32 structured exception handler around thread.
 75 void os::os_exception_wrapper(java_call_t f, JavaValue* value, const methodHandle& method, JavaCallArguments* args, Thread* thread) {
 76   __try {
 77 
 78 #ifndef AMD64
 79     // We store the current thread in this wrapperthread location
 80     // and determine how far away this address is from the structured
 81     // execption pointer that FS:[0] points to.  This get_thread
 82     // code can then get the thread pointer via FS.
 83     //
 84     // Warning:  This routine must NEVER be inlined since we'd end up with
 85     //           multiple offsets.
 86     //
 87     volatile Thread* wrapperthread = thread;
 88 
 89     if (os::win32::get_thread_ptr_offset() == 0) {
 90       int thread_ptr_offset;
 91       __asm {
 92         lea eax, dword ptr wrapperthread;
 93         sub eax, dword ptr FS:[0H];
 94         mov thread_ptr_offset, eax
 95       };
 96       os::win32::set_thread_ptr_offset(thread_ptr_offset);
 97     }
 98 #ifdef ASSERT
 99     // Verify that the offset hasn't changed since we initally captured
100     // it. This might happen if we accidentally ended up with an
101     // inlined version of this routine.
102     else {
103       int test_thread_ptr_offset;
104       __asm {
105         lea eax, dword ptr wrapperthread;
106         sub eax, dword ptr FS:[0H];
107         mov test_thread_ptr_offset, eax
108       };
109       assert(test_thread_ptr_offset == os::win32::get_thread_ptr_offset(),
110              "thread pointer offset from SEH changed");
111     }
112 #endif // ASSERT
113 #endif // !AMD64
114 
115     f(value, method, args, thread);
116   } __except(topLevelExceptionFilter((_EXCEPTION_POINTERS*)_exception_info())) {
117       // Nothing to do.
118   }
119 }
120 
121 #ifdef AMD64
122 
123 // This is the language specific handler for exceptions
124 // originating from dynamically generated code.
125 // We call the standard structured exception handler
126 // We only expect Continued Execution since we cannot unwind
127 // from generated code.
128 LONG HandleExceptionFromCodeCache(
129   IN PEXCEPTION_RECORD ExceptionRecord,
130   IN ULONG64 EstablisherFrame,
131   IN OUT PCONTEXT ContextRecord,
132   IN OUT PDISPATCHER_CONTEXT DispatcherContext) {
133   EXCEPTION_POINTERS ep;
134   LONG result;
135 
136   ep.ExceptionRecord = ExceptionRecord;
137   ep.ContextRecord = ContextRecord;
138 
139   result = topLevelExceptionFilter(&ep);
140 
141   // We better only get a CONTINUE_EXECUTION from our handler
142   // since we don't have unwind information registered.
143 
144   guarantee( result == EXCEPTION_CONTINUE_EXECUTION,
145              "Unexpected result from topLevelExceptionFilter");
146 
147   return(ExceptionContinueExecution);
148 }
149 
150 
151 // Structure containing the Windows Data Structures required
152 // to register our Code Cache exception handler.
153 // We put these in the CodeCache since the API requires
154 // all addresses in these structures are relative to the Code
155 // area registered with RtlAddFunctionTable.
156 typedef struct {
157   char ExceptionHandlerInstr[16];  // jmp HandleExceptionFromCodeCache
158   RUNTIME_FUNCTION rt;
159   UNWIND_INFO_EH_ONLY unw;
160 } DynamicCodeData, *pDynamicCodeData;
161 
162 #endif // AMD64
163 //
164 // Register our CodeCache area with the OS so it will dispatch exceptions
165 // to our topLevelExceptionFilter when we take an exception in our
166 // dynamically generated code.
167 //
168 // Arguments:  low and high are the address of the full reserved
169 // codeCache area
170 //
171 bool os::register_code_area(char *low, char *high) {
172 #ifdef AMD64
173 
174   ResourceMark rm;
175 
176   pDynamicCodeData pDCD;
177   PRUNTIME_FUNCTION prt;
178   PUNWIND_INFO_EH_ONLY punwind;
179 
180   BufferBlob* blob = BufferBlob::create("CodeCache Exception Handler", sizeof(DynamicCodeData));
181   CodeBuffer cb(blob);
182   MacroAssembler* masm = new MacroAssembler(&cb);
183   pDCD = (pDynamicCodeData) masm->pc();
184 
185   masm->jump(ExternalAddress((address)&HandleExceptionFromCodeCache));
186   masm->flush();
187 
188   // Create an Unwind Structure specifying no unwind info
189   // other than an Exception Handler
190   punwind = &pDCD->unw;
191   punwind->Version = 1;
192   punwind->Flags = UNW_FLAG_EHANDLER;
193   punwind->SizeOfProlog = 0;
194   punwind->CountOfCodes = 0;
195   punwind->FrameRegister = 0;
196   punwind->FrameOffset = 0;
197   punwind->ExceptionHandler = (char *)(&(pDCD->ExceptionHandlerInstr[0])) -
198                               (char*)low;
199   punwind->ExceptionData[0] = 0;
200 
201   // This structure describes the covered dynamic code area.
202   // Addresses are relative to the beginning on the code cache area
203   prt = &pDCD->rt;
204   prt->BeginAddress = 0;
205   prt->EndAddress = (ULONG)(high - low);
206   prt->UnwindData = ((char *)punwind - low);
207 
208   guarantee(RtlAddFunctionTable(prt, 1, (ULONGLONG)low),
209             "Failed to register Dynamic Code Exception Handler with RtlAddFunctionTable");
210 
211 #endif // AMD64
212   return true;
213 }
214 
215 // Atomics and Stub Functions
216 
217 typedef int32_t   xchg_func_t            (int32_t,  volatile int32_t*);
218 typedef int64_t   xchg_long_func_t       (int64_t,  volatile int64_t*);
219 typedef int32_t   cmpxchg_func_t         (int32_t,  volatile int32_t*, int32_t);
220 typedef int8_t    cmpxchg_byte_func_t    (int8_t,   volatile int8_t*,  int8_t);
221 typedef int64_t   cmpxchg_long_func_t    (int64_t,  volatile int64_t*, int64_t);
222 typedef int32_t   add_func_t             (int32_t,  volatile int32_t*);
223 typedef int64_t   add_long_func_t        (int64_t,  volatile int64_t*);
224 
225 #ifdef AMD64
226 
227 int32_t os::atomic_xchg_bootstrap(int32_t exchange_value, volatile int32_t* dest) {
228   // try to use the stub:
229   xchg_func_t* func = CAST_TO_FN_PTR(xchg_func_t*, StubRoutines::atomic_xchg_entry());
230 
231   if (func != NULL) {
232     os::atomic_xchg_func = func;
233     return (*func)(exchange_value, dest);
234   }
235   assert(Threads::number_of_threads() == 0, "for bootstrap only");
236 
237   int32_t old_value = *dest;
238   *dest = exchange_value;
239   return old_value;
240 }
241 
242 int64_t os::atomic_xchg_long_bootstrap(int64_t exchange_value, volatile int64_t* dest) {
243   // try to use the stub:
244   xchg_long_func_t* func = CAST_TO_FN_PTR(xchg_long_func_t*, StubRoutines::atomic_xchg_long_entry());
245 
246   if (func != NULL) {
247     os::atomic_xchg_long_func = func;
248     return (*func)(exchange_value, dest);
249   }
250   assert(Threads::number_of_threads() == 0, "for bootstrap only");
251 
252   int64_t old_value = *dest;
253   *dest = exchange_value;
254   return old_value;
255 }
256 
257 
258 int32_t os::atomic_cmpxchg_bootstrap(int32_t exchange_value, volatile int32_t* dest, int32_t compare_value) {
259   // try to use the stub:
260   cmpxchg_func_t* func = CAST_TO_FN_PTR(cmpxchg_func_t*, StubRoutines::atomic_cmpxchg_entry());
261 
262   if (func != NULL) {
263     os::atomic_cmpxchg_func = func;
264     return (*func)(exchange_value, dest, compare_value);
265   }
266   assert(Threads::number_of_threads() == 0, "for bootstrap only");
267 
268   int32_t old_value = *dest;
269   if (old_value == compare_value)
270     *dest = exchange_value;
271   return old_value;
272 }
273 
274 int8_t os::atomic_cmpxchg_byte_bootstrap(int8_t exchange_value, volatile int8_t* dest, int8_t compare_value) {
275   // try to use the stub:
276   cmpxchg_byte_func_t* func = CAST_TO_FN_PTR(cmpxchg_byte_func_t*, StubRoutines::atomic_cmpxchg_byte_entry());
277 
278   if (func != NULL) {
279     os::atomic_cmpxchg_byte_func = func;
280     return (*func)(exchange_value, dest, compare_value);
281   }
282   assert(Threads::number_of_threads() == 0, "for bootstrap only");
283 
284   int8_t old_value = *dest;
285   if (old_value == compare_value)
286     *dest = exchange_value;
287   return old_value;
288 }
289 
290 #endif // AMD64
291 
292 int64_t os::atomic_cmpxchg_long_bootstrap(int64_t exchange_value, volatile int64_t* dest, int64_t compare_value) {
293   // try to use the stub:
294   cmpxchg_long_func_t* func = CAST_TO_FN_PTR(cmpxchg_long_func_t*, StubRoutines::atomic_cmpxchg_long_entry());
295 
296   if (func != NULL) {
297     os::atomic_cmpxchg_long_func = func;
298     return (*func)(exchange_value, dest, compare_value);
299   }
300   assert(Threads::number_of_threads() == 0, "for bootstrap only");
301 
302   int64_t old_value = *dest;
303   if (old_value == compare_value)
304     *dest = exchange_value;
305   return old_value;
306 }
307 
308 #ifdef AMD64
309 
310 int32_t os::atomic_add_bootstrap(int32_t add_value, volatile int32_t* dest) {
311   // try to use the stub:
312   add_func_t* func = CAST_TO_FN_PTR(add_func_t*, StubRoutines::atomic_add_entry());
313 
314   if (func != NULL) {
315     os::atomic_add_func = func;
316     return (*func)(add_value, dest);
317   }
318   assert(Threads::number_of_threads() == 0, "for bootstrap only");
319 
320   return (*dest) += add_value;
321 }
322 
323 int64_t os::atomic_add_long_bootstrap(int64_t add_value, volatile int64_t* dest) {
324   // try to use the stub:
325   add_long_func_t* func = CAST_TO_FN_PTR(add_long_func_t*, StubRoutines::atomic_add_long_entry());
326 
327   if (func != NULL) {
328     os::atomic_add_long_func = func;
329     return (*func)(add_value, dest);
330   }
331   assert(Threads::number_of_threads() == 0, "for bootstrap only");
332 
333   return (*dest) += add_value;
334 }
335 
336 xchg_func_t*         os::atomic_xchg_func         = os::atomic_xchg_bootstrap;
337 xchg_long_func_t*    os::atomic_xchg_long_func    = os::atomic_xchg_long_bootstrap;
338 cmpxchg_func_t*      os::atomic_cmpxchg_func      = os::atomic_cmpxchg_bootstrap;
339 cmpxchg_byte_func_t* os::atomic_cmpxchg_byte_func = os::atomic_cmpxchg_byte_bootstrap;
340 add_func_t*          os::atomic_add_func          = os::atomic_add_bootstrap;
341 add_long_func_t*     os::atomic_add_long_func     = os::atomic_add_long_bootstrap;
342 
343 #endif // AMD64
344 
345 cmpxchg_long_func_t* os::atomic_cmpxchg_long_func = os::atomic_cmpxchg_long_bootstrap;
346 
347 #ifdef AMD64
348 /*
349  * Windows/x64 does not use stack frames the way expected by Java:
350  * [1] in most cases, there is no frame pointer. All locals are addressed via RSP
351  * [2] in rare cases, when alloca() is used, a frame pointer is used, but this may
352  *     not be RBP.
353  * See http://msdn.microsoft.com/en-us/library/ew5tede7.aspx
354  *
355  * So it's not possible to print the native stack using the
356  *     while (...) {...  fr = os::get_sender_for_C_frame(&fr); }
357  * loop in vmError.cpp. We need to roll our own loop.
358  */
359 bool os::platform_print_native_stack(outputStream* st, const void* context,
360                                      char *buf, int buf_size)
361 {
362   CONTEXT ctx;
363   if (context != NULL) {
364     memcpy(&ctx, context, sizeof(ctx));
365   } else {
366     RtlCaptureContext(&ctx);
367   }
368 
369   st->print_cr("Native frames: (J=compiled Java code, j=interpreted, Vv=VM code, C=native code)");
370 
371   STACKFRAME stk;
372   memset(&stk, 0, sizeof(stk));
373   stk.AddrStack.Offset    = ctx.Rsp;
374   stk.AddrStack.Mode      = AddrModeFlat;
375   stk.AddrFrame.Offset    = ctx.Rbp;
376   stk.AddrFrame.Mode      = AddrModeFlat;
377   stk.AddrPC.Offset       = ctx.Rip;
378   stk.AddrPC.Mode         = AddrModeFlat;
379 
380   int count = 0;
381   address lastpc = 0;
382   while (count++ < StackPrintLimit) {
383     intptr_t* sp = (intptr_t*)stk.AddrStack.Offset;
384     intptr_t* fp = (intptr_t*)stk.AddrFrame.Offset; // NOT necessarily the same as ctx.Rbp!
385     address pc = (address)stk.AddrPC.Offset;
386 
387     if (pc != NULL) {
388       if (count == 2 && lastpc == pc) {
389         // Skip it -- StackWalk64() may return the same PC
390         // (but different SP) on the first try.
391       } else {
392         // Don't try to create a frame(sp, fp, pc) -- on WinX64, stk.AddrFrame
393         // may not contain what Java expects, and may cause the frame() constructor
394         // to crash. Let's just print out the symbolic address.
395         frame::print_C_frame(st, buf, buf_size, pc);
396         // print source file and line, if available
397         char buf[128];
398         int line_no;
399         if (SymbolEngine::get_source_info(pc, buf, sizeof(buf), &line_no)) {
400           st->print("  (%s:%d)", buf, line_no);
401         }
402         st->cr();
403       }
404       lastpc = pc;
405     }
406 
407     PVOID p = WindowsDbgHelp::symFunctionTableAccess64(GetCurrentProcess(), stk.AddrPC.Offset);
408     if (!p) {
409       // StackWalk64() can't handle this PC. Calling StackWalk64 again may cause crash.
410       break;
411     }
412 
413     BOOL result = WindowsDbgHelp::stackWalk64(
414         IMAGE_FILE_MACHINE_AMD64,  // __in      DWORD MachineType,
415         GetCurrentProcess(),       // __in      HANDLE hProcess,
416         GetCurrentThread(),        // __in      HANDLE hThread,
417         &stk,                      // __inout   LP STACKFRAME64 StackFrame,
418         &ctx);                     // __inout   PVOID ContextRecord,
419 
420     if (!result) {
421       break;
422     }
423   }
424   if (count > StackPrintLimit) {
425     st->print_cr("...<more frames>...");
426   }
427   st->cr();
428 
429   return true;
430 }
431 #endif // AMD64
432 
433 ExtendedPC os::fetch_frame_from_context(const void* ucVoid,
434                     intptr_t** ret_sp, intptr_t** ret_fp) {
435 
436   ExtendedPC  epc;
437   CONTEXT* uc = (CONTEXT*)ucVoid;
438 
439   if (uc != NULL) {
440     epc = ExtendedPC((address)uc->REG_PC);
441     if (ret_sp) *ret_sp = (intptr_t*)uc->REG_SP;
442     if (ret_fp) *ret_fp = (intptr_t*)uc->REG_FP;
443   } else {
444     // construct empty ExtendedPC for return value checking
445     epc = ExtendedPC(NULL);
446     if (ret_sp) *ret_sp = (intptr_t *)NULL;
447     if (ret_fp) *ret_fp = (intptr_t *)NULL;
448   }
449 
450   return epc;
451 }
452 
453 frame os::fetch_frame_from_context(const void* ucVoid) {
454   intptr_t* sp;
455   intptr_t* fp;
456   ExtendedPC epc = fetch_frame_from_context(ucVoid, &sp, &fp);
457   return frame(sp, fp, epc.pc());
458 }
459 
460 // VC++ does not save frame pointer on stack in optimized build. It
461 // can be turned off by /Oy-. If we really want to walk C frames,
462 // we can use the StackWalk() API.
463 frame os::get_sender_for_C_frame(frame* fr) {
464   return frame(fr->sender_sp(), fr->link(), fr->sender_pc());
465 }
466 
467 #ifndef AMD64
468 // Ignore "C4172: returning address of local variable or temporary" on 32bit
469 PRAGMA_DIAG_PUSH
470 PRAGMA_DISABLE_MSVC_WARNING(4172)
471 // Returns an estimate of the current stack pointer. Result must be guaranteed
472 // to point into the calling threads stack, and be no lower than the current
473 // stack pointer.
474 address os::current_stack_pointer() {
475   int dummy;
476   address sp = (address)&dummy;
477   return sp;
478 }
479 PRAGMA_DIAG_POP
480 #else
481 // Returns the current stack pointer. Accurate value needed for
482 // os::verify_stack_alignment().
483 address os::current_stack_pointer() {
484   typedef address get_sp_func();
485   get_sp_func* func = CAST_TO_FN_PTR(get_sp_func*,
486                                      StubRoutines::x86::get_previous_sp_entry());
487   return (*func)();
488 }
489 #endif
490 
491 
492 #ifndef AMD64
493 intptr_t* _get_previous_fp() {
494   intptr_t **frameptr;
495   __asm {
496     mov frameptr, ebp
497   };
498   // ebp (frameptr) is for this frame (_get_previous_fp). We want the ebp for the
499   // caller of os::current_frame*(), so go up two frames. However, for
500   // optimized builds, _get_previous_fp() will be inlined, so only go
501   // up 1 frame in that case.
502 #ifdef _NMT_NOINLINE_
503   return **(intptr_t***)frameptr;
504 #else
505   return *frameptr;
506 #endif
507 }
508 #endif // !AMD64
509 
510 frame os::current_frame() {
511 
512 #ifdef AMD64
513   // apparently _asm not supported on windows amd64
514   typedef intptr_t*      get_fp_func           ();
515   get_fp_func* func = CAST_TO_FN_PTR(get_fp_func*,
516                                      StubRoutines::x86::get_previous_fp_entry());
517   if (func == NULL) return frame();
518   intptr_t* fp = (*func)();
519   if (fp == NULL) {
520     return frame();
521   }
522 #else
523   intptr_t* fp = _get_previous_fp();
524 #endif // AMD64
525 
526   frame myframe((intptr_t*)os::current_stack_pointer(),
527                 (intptr_t*)fp,
528                 CAST_FROM_FN_PTR(address, os::current_frame));
529   if (os::is_first_C_frame(&myframe)) {
530     // stack is not walkable
531     return frame();
532   } else {
533     return os::get_sender_for_C_frame(&myframe);
534   }
535 }
536 
537 void os::print_context(outputStream *st, const void *context) {
538   if (context == NULL) return;
539 
540   const CONTEXT* uc = (const CONTEXT*)context;
541 
542   st->print_cr("Registers:");
543 #ifdef AMD64
544   st->print(  "RAX=" INTPTR_FORMAT, uc->Rax);
545   st->print(", RBX=" INTPTR_FORMAT, uc->Rbx);
546   st->print(", RCX=" INTPTR_FORMAT, uc->Rcx);
547   st->print(", RDX=" INTPTR_FORMAT, uc->Rdx);
548   st->cr();
549   st->print(  "RSP=" INTPTR_FORMAT, uc->Rsp);
550   st->print(", RBP=" INTPTR_FORMAT, uc->Rbp);
551   st->print(", RSI=" INTPTR_FORMAT, uc->Rsi);
552   st->print(", RDI=" INTPTR_FORMAT, uc->Rdi);
553   st->cr();
554   st->print(  "R8 =" INTPTR_FORMAT, uc->R8);
555   st->print(", R9 =" INTPTR_FORMAT, uc->R9);
556   st->print(", R10=" INTPTR_FORMAT, uc->R10);
557   st->print(", R11=" INTPTR_FORMAT, uc->R11);
558   st->cr();
559   st->print(  "R12=" INTPTR_FORMAT, uc->R12);
560   st->print(", R13=" INTPTR_FORMAT, uc->R13);
561   st->print(", R14=" INTPTR_FORMAT, uc->R14);
562   st->print(", R15=" INTPTR_FORMAT, uc->R15);
563   st->cr();
564   st->print(  "RIP=" INTPTR_FORMAT, uc->Rip);
565   st->print(", EFLAGS=" INTPTR_FORMAT, uc->EFlags);
566 #else
567   st->print(  "EAX=" INTPTR_FORMAT, uc->Eax);
568   st->print(", EBX=" INTPTR_FORMAT, uc->Ebx);
569   st->print(", ECX=" INTPTR_FORMAT, uc->Ecx);
570   st->print(", EDX=" INTPTR_FORMAT, uc->Edx);
571   st->cr();
572   st->print(  "ESP=" INTPTR_FORMAT, uc->Esp);
573   st->print(", EBP=" INTPTR_FORMAT, uc->Ebp);
574   st->print(", ESI=" INTPTR_FORMAT, uc->Esi);
575   st->print(", EDI=" INTPTR_FORMAT, uc->Edi);
576   st->cr();
577   st->print(  "EIP=" INTPTR_FORMAT, uc->Eip);
578   st->print(", EFLAGS=" INTPTR_FORMAT, uc->EFlags);
579 #endif // AMD64
580   st->cr();
581   st->cr();
582 
583   intptr_t *sp = (intptr_t *)uc->REG_SP;
584   st->print_cr("Top of Stack: (sp=" PTR_FORMAT ")", sp);
585   print_hex_dump(st, (address)sp, (address)(sp + 32), sizeof(intptr_t));
586   st->cr();
587 
588   // Note: it may be unsafe to inspect memory near pc. For example, pc may
589   // point to garbage if entry point in an nmethod is corrupted. Leave
590   // this at the end, and hope for the best.
591   address pc = (address)uc->REG_PC;
592   print_instructions(st, pc, sizeof(char));
593   st->cr();
594 }
595 
596 
597 void os::print_register_info(outputStream *st, const void *context) {
598   if (context == NULL) return;
599 
600   const CONTEXT* uc = (const CONTEXT*)context;
601 
602   st->print_cr("Register to memory mapping:");
603   st->cr();
604 
605   // this is only for the "general purpose" registers
606 
607 #ifdef AMD64
608   st->print("RIP="); print_location(st, uc->Rip);
609   st->print("RAX="); print_location(st, uc->Rax);
610   st->print("RBX="); print_location(st, uc->Rbx);
611   st->print("RCX="); print_location(st, uc->Rcx);
612   st->print("RDX="); print_location(st, uc->Rdx);
613   st->print("RSP="); print_location(st, uc->Rsp);
614   st->print("RBP="); print_location(st, uc->Rbp);
615   st->print("RSI="); print_location(st, uc->Rsi);
616   st->print("RDI="); print_location(st, uc->Rdi);
617   st->print("R8 ="); print_location(st, uc->R8);
618   st->print("R9 ="); print_location(st, uc->R9);
619   st->print("R10="); print_location(st, uc->R10);
620   st->print("R11="); print_location(st, uc->R11);
621   st->print("R12="); print_location(st, uc->R12);
622   st->print("R13="); print_location(st, uc->R13);
623   st->print("R14="); print_location(st, uc->R14);
624   st->print("R15="); print_location(st, uc->R15);
625 #else
626   st->print("EIP="); print_location(st, uc->Eip);
627   st->print("EAX="); print_location(st, uc->Eax);
628   st->print("EBX="); print_location(st, uc->Ebx);
629   st->print("ECX="); print_location(st, uc->Ecx);
630   st->print("EDX="); print_location(st, uc->Edx);
631   st->print("ESP="); print_location(st, uc->Esp);
632   st->print("EBP="); print_location(st, uc->Ebp);
633   st->print("ESI="); print_location(st, uc->Esi);
634   st->print("EDI="); print_location(st, uc->Edi);
635 #endif
636 
637   st->cr();
638 }
639 
640 extern "C" int SpinPause () {
641 #ifdef AMD64
642    return 0 ;
643 #else
644    // pause == rep:nop
645    // On systems that don't support pause a rep:nop
646    // is executed as a nop.  The rep: prefix is ignored.
647    _asm {
648       pause ;
649    };
650    return 1 ;
651 #endif // AMD64
652 }
653 
654 
655 void os::setup_fpu() {
656 #ifndef AMD64
657   int fpu_cntrl_word = StubRoutines::fpu_cntrl_wrd_std();
658   __asm fldcw fpu_cntrl_word;
659 #endif // !AMD64
660 }
661 
662 #ifndef PRODUCT
663 void os::verify_stack_alignment() {
664 #ifdef AMD64
665   // The current_stack_pointer() calls generated get_previous_sp stub routine.
666   // Only enable the assert after the routine becomes available.
667   if (StubRoutines::code1() != NULL) {
668     assert(((intptr_t)os::current_stack_pointer() & (StackAlignmentInBytes-1)) == 0, "incorrect stack alignment");
669   }
670 #endif
671 }
672 #endif
673 
674 int os::extra_bang_size_in_bytes() {
675   // JDK-8050147 requires the full cache line bang for x86.
676   return VM_Version::L1_line_size();
677 }