src/os_cpu/bsd_x86/vm/os_bsd_x86.cpp

Print this page




 307   return (address)uc->context_pc;
 308 }
 309 
 310 void os::Bsd::ucontext_set_pc(ucontext_t * uc, address pc) {
 311   uc->context_pc = (intptr_t)pc ;
 312 }
 313 
 314 intptr_t* os::Bsd::ucontext_get_sp(ucontext_t * uc) {
 315   return (intptr_t*)uc->context_sp;
 316 }
 317 
 318 intptr_t* os::Bsd::ucontext_get_fp(ucontext_t * uc) {
 319   return (intptr_t*)uc->context_fp;
 320 }
 321 
 322 // For Forte Analyzer AsyncGetCallTrace profiling support - thread
 323 // is currently interrupted by SIGPROF.
 324 // os::Solaris::fetch_frame_from_ucontext() tries to skip nested signal
 325 // frames. Currently we don't do that on Bsd, so it's the same as
 326 // os::fetch_frame_from_context().

 327 ExtendedPC os::Bsd::fetch_frame_from_ucontext(Thread* thread,
 328   ucontext_t* uc, intptr_t** ret_sp, intptr_t** ret_fp) {
 329 
 330   assert(thread != NULL, "just checking");
 331   assert(ret_sp != NULL, "just checking");
 332   assert(ret_fp != NULL, "just checking");
 333 
 334   return os::fetch_frame_from_context(uc, ret_sp, ret_fp);
 335 }
 336 
 337 ExtendedPC os::fetch_frame_from_context(void* ucVoid,
 338                     intptr_t** ret_sp, intptr_t** ret_fp) {
 339 
 340   ExtendedPC  epc;
 341   ucontext_t* uc = (ucontext_t*)ucVoid;
 342 
 343   if (uc != NULL) {
 344     epc = ExtendedPC(os::Bsd::ucontext_get_pc(uc));
 345     if (ret_sp) *ret_sp = os::Bsd::ucontext_get_sp(uc);
 346     if (ret_fp) *ret_fp = os::Bsd::ucontext_get_fp(uc);
 347   } else {
 348     // construct empty ExtendedPC for return value checking
 349     epc = ExtendedPC(NULL);
 350     if (ret_sp) *ret_sp = (intptr_t *)NULL;
 351     if (ret_fp) *ret_fp = (intptr_t *)NULL;
 352   }
 353 
 354   return epc;
 355 }
 356 
 357 frame os::fetch_frame_from_context(void* ucVoid) {
 358   intptr_t* sp;
 359   intptr_t* fp;
 360   ExtendedPC epc = fetch_frame_from_context(ucVoid, &sp, &fp);
 361   return frame(sp, fp, epc.pc());
 362 }
 363 










































 364 // By default, gcc always save frame pointer (%ebp/%rbp) on stack. It may get
 365 // turned off by -fomit-frame-pointer,
 366 frame os::get_sender_for_C_frame(frame* fr) {
 367   return frame(fr->sender_sp(), fr->link(), fr->sender_pc());
 368 }
 369 
 370 intptr_t* _get_previous_fp() {
 371 #if defined(SPARC_WORKS) || defined(__clang__) || defined(__llvm__)
 372   register intptr_t **ebp;
 373   __asm__("mov %%"SPELL_REG_FP", %0":"=r"(ebp));
 374 #else
 375   register intptr_t **ebp __asm__ (SPELL_REG_FP);
 376 #endif
 377   return (intptr_t*) *ebp;   // we want what it points to.
 378 }
 379 
 380 
 381 frame os::current_frame() {
 382   intptr_t* fp = _get_previous_fp();
 383   frame myframe((intptr_t*)os::current_stack_pointer(),


 461   address pc          = NULL;
 462 
 463   //%note os_trap_1
 464   if (info != NULL && uc != NULL && thread != NULL) {
 465     pc = (address) os::Bsd::ucontext_get_pc(uc);
 466 
 467     if (StubRoutines::is_safefetch_fault(pc)) {
 468       os::Bsd::ucontext_set_pc(uc, StubRoutines::continuation_for_safefetch_fault(pc));
 469       return 1;
 470     }
 471 
 472     // Handle ALL stack overflow variations here
 473     if (sig == SIGSEGV || sig == SIGBUS) {
 474       address addr = (address) info->si_addr;
 475 
 476       // check if fault address is within thread stack
 477       if (addr < thread->stack_base() &&
 478           addr >= thread->stack_base() - thread->stack_size()) {
 479         // stack overflow
 480         if (thread->in_stack_yellow_zone(addr)) {
 481           thread->disable_stack_yellow_zone();
 482           if (thread->thread_state() == _thread_in_Java) {

















 483             // Throw a stack overflow exception.  Guard pages will be reenabled
 484             // while unwinding the stack.

 485             stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW);
 486           } else {
 487             // Thread was in the vm or native code.  Return and try to finish.

 488             return 1;
 489           }
 490         } else if (thread->in_stack_red_zone(addr)) {
 491           // Fatal red zone violation.  Disable the guard pages and fall through
 492           // to handle_unexpected_exception way down below.
 493           thread->disable_stack_red_zone();
 494           tty->print_raw_cr("An irrecoverable stack overflow has occurred.");
 495         }
 496       }
 497     }
 498 
 499     if ((sig == SIGSEGV || sig == SIGBUS) && VM_Version::is_cpuinfo_segv_addr(pc)) {
 500       // Verify that OS save/restore AVX registers.
 501       stub = VM_Version::cpuinfo_cont_addr();
 502     }
 503 
 504     // We test if stub is already set (by the stack overflow code
 505     // above) so it is not overwritten by the code that follows. This
 506     // check is not required on other platforms, because on other
 507     // platforms we check for SIGSEGV only or SIGBUS only, where here




 307   return (address)uc->context_pc;
 308 }
 309 
 310 void os::Bsd::ucontext_set_pc(ucontext_t * uc, address pc) {
 311   uc->context_pc = (intptr_t)pc ;
 312 }
 313 
 314 intptr_t* os::Bsd::ucontext_get_sp(ucontext_t * uc) {
 315   return (intptr_t*)uc->context_sp;
 316 }
 317 
 318 intptr_t* os::Bsd::ucontext_get_fp(ucontext_t * uc) {
 319   return (intptr_t*)uc->context_fp;
 320 }
 321 
 322 // For Forte Analyzer AsyncGetCallTrace profiling support - thread
 323 // is currently interrupted by SIGPROF.
 324 // os::Solaris::fetch_frame_from_ucontext() tries to skip nested signal
 325 // frames. Currently we don't do that on Bsd, so it's the same as
 326 // os::fetch_frame_from_context().
 327 // This method is also used for stack overflow signal handling.
 328 ExtendedPC os::Bsd::fetch_frame_from_ucontext(Thread* thread,
 329   ucontext_t* uc, intptr_t** ret_sp, intptr_t** ret_fp) {
 330 
 331   assert(thread != NULL, "just checking");
 332   assert(ret_sp != NULL, "just checking");
 333   assert(ret_fp != NULL, "just checking");
 334 
 335   return os::fetch_frame_from_context(uc, ret_sp, ret_fp);
 336 }
 337 
 338 ExtendedPC os::fetch_frame_from_context(void* ucVoid,
 339                     intptr_t** ret_sp, intptr_t** ret_fp) {
 340 
 341   ExtendedPC  epc;
 342   ucontext_t* uc = (ucontext_t*)ucVoid;
 343 
 344   if (uc != NULL) {
 345     epc = ExtendedPC(os::Bsd::ucontext_get_pc(uc));
 346     if (ret_sp) *ret_sp = os::Bsd::ucontext_get_sp(uc);
 347     if (ret_fp) *ret_fp = os::Bsd::ucontext_get_fp(uc);
 348   } else {
 349     // construct empty ExtendedPC for return value checking
 350     epc = ExtendedPC(NULL);
 351     if (ret_sp) *ret_sp = (intptr_t *)NULL;
 352     if (ret_fp) *ret_fp = (intptr_t *)NULL;
 353   }
 354 
 355   return epc;
 356 }
 357 
 358 frame os::fetch_frame_from_context(void* ucVoid) {
 359   intptr_t* sp;
 360   intptr_t* fp;
 361   ExtendedPC epc = fetch_frame_from_context(ucVoid, &sp, &fp);
 362   return frame(sp, fp, epc.pc());
 363 }
 364 
 365 frame os::fetch_frame_from_ucontext(Thread* thread, void* ucVoid) {
 366   intptr_t* sp;
 367   intptr_t* fp;
 368   ExtendedPC epc = os::Bsd::fetch_frame_from_ucontext(thread, (ucontext_t*)ucVoid, &sp, &fp);
 369   return frame(sp, fp, epc.pc());
 370 }
 371 
 372 bool os::Bsd::get_frame_at_stack_banging_point(JavaThread* thread, ucontext_t* uc, frame* fr) {
 373   address pc = (address) os::Bsd::ucontext_get_pc(uc);
 374   if (Interpreter::contains(pc)) {
 375     // interpreter performs stack banging after the fixed frame header has
 376     // been generated while the compilers perform it before. To maintain
 377     // semantic consistency between interpreted and compiled frames, the
 378     // method returns the Java sender of the current frame.
 379     *fr = os::fetch_frame_from_ucontext(thread, uc);
 380     if (!fr->is_first_java_frame()) {
 381         assert(fr->safe_for_sender(thread), "Safety check");
 382       *fr = fr->java_sender();
 383     }
 384   } else {
 385     // more complex code with compiled code
 386     assert(!Interpreter::contains(pc), "Interpreted methods should have been handled above");
 387     CodeBlob* cb = CodeCache::find_blob(pc);
 388     if (cb == NULL || !cb->is_nmethod() || cb->is_frame_complete_at(pc)) {
 389       // Not sure where the pc points to, fallback to default 
 390       // stack overflow handling
 391       return false;
 392     } else {
 393       *fr = os::fetch_frame_from_ucontext(thread, uc);
 394       // in compiled code, the stack banging is performed just after the return pc
 395       // has been pushed on the stack
 396       *fr = frame(fr->sp() + 1, fr->fp(), (address)*(fr->sp()));
 397       if (!fr->is_java_frame()) {
 398         assert(fr->safe_for_sender(thread), "Safety check");
 399         *fr = fr->java_sender();
 400       }
 401     }
 402   }
 403   assert(fr->is_java_frame(), "Safety check");
 404   return true;
 405 }
 406 
 407 // By default, gcc always save frame pointer (%ebp/%rbp) on stack. It may get
 408 // turned off by -fomit-frame-pointer,
 409 frame os::get_sender_for_C_frame(frame* fr) {
 410   return frame(fr->sender_sp(), fr->link(), fr->sender_pc());
 411 }
 412 
 413 intptr_t* _get_previous_fp() {
 414 #if defined(SPARC_WORKS) || defined(__clang__) || defined(__llvm__)
 415   register intptr_t **ebp;
 416   __asm__("mov %%"SPELL_REG_FP", %0":"=r"(ebp));
 417 #else
 418   register intptr_t **ebp __asm__ (SPELL_REG_FP);
 419 #endif
 420   return (intptr_t*) *ebp;   // we want what it points to.
 421 }
 422 
 423 
 424 frame os::current_frame() {
 425   intptr_t* fp = _get_previous_fp();
 426   frame myframe((intptr_t*)os::current_stack_pointer(),


 504   address pc          = NULL;
 505 
 506   //%note os_trap_1
 507   if (info != NULL && uc != NULL && thread != NULL) {
 508     pc = (address) os::Bsd::ucontext_get_pc(uc);
 509 
 510     if (StubRoutines::is_safefetch_fault(pc)) {
 511       os::Bsd::ucontext_set_pc(uc, StubRoutines::continuation_for_safefetch_fault(pc));
 512       return 1;
 513     }
 514 
 515     // Handle ALL stack overflow variations here
 516     if (sig == SIGSEGV || sig == SIGBUS) {
 517       address addr = (address) info->si_addr;
 518 
 519       // check if fault address is within thread stack
 520       if (addr < thread->stack_base() &&
 521           addr >= thread->stack_base() - thread->stack_size()) {
 522         // stack overflow
 523         if (thread->in_stack_yellow_zone(addr)) {

 524           if (thread->thread_state() == _thread_in_Java) {
 525             if (thread->in_stack_reserved_zone(addr)) {
 526               frame fr;
 527               if (os::Bsd::get_frame_at_stack_banging_point(thread, uc, &fr)) {
 528                 assert(fr.is_java_frame(), "Must be a Java frame");
 529                 frame activation = SharedRuntime::look_for_reserved_stack_annotated_method(thread, fr);
 530                 if (activation.sp() != NULL) {
 531                   thread->disable_stack_reserved_zone();
 532                   if (activation.is_interpreted_frame()) {
 533                     thread->set_reserved_stack_activation((address)(
 534                       activation.fp() + frame::interpreter_frame_initial_sp_offset));
 535                   } else {
 536                     thread->set_reserved_stack_activation((address)activation.unextended_sp());
 537                   }
 538                   return 1;
 539                 }
 540               }
 541             }
 542             // Throw a stack overflow exception.  Guard pages will be reenabled
 543             // while unwinding the stack.
 544             thread->disable_stack_yellow_zone();
 545             stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW);
 546           } else {
 547             // Thread was in the vm or native code.  Return and try to finish.
 548             thread->disable_stack_yellow_zone();
 549             return 1;
 550           }
 551         } else if (thread->in_stack_red_zone(addr)) {
 552           // Fatal red zone violation.  Disable the guard pages and fall through
 553           // to handle_unexpected_exception way down below.
 554           thread->disable_stack_red_zone();
 555           tty->print_raw_cr("An irrecoverable stack overflow has occurred.");
 556         }
 557       }
 558     }
 559 
 560     if ((sig == SIGSEGV || sig == SIGBUS) && VM_Version::is_cpuinfo_segv_addr(pc)) {
 561       // Verify that OS save/restore AVX registers.
 562       stub = VM_Version::cpuinfo_cont_addr();
 563     }
 564 
 565     // We test if stub is already set (by the stack overflow code
 566     // above) so it is not overwritten by the code that follows. This
 567     // check is not required on other platforms, because on other
 568     // platforms we check for SIGSEGV only or SIGBUS only, where here