< prev index next >

src/os_cpu/bsd_x86/vm/os_bsd_x86.cpp

Print this page
rev 7968 : 8074552:  SafeFetch32 and SafeFetchN do not work in error handling
Summary: handle SafeFetch faults in secondary signal handlers
Reviewed-by: dholmes
Contributed-by: Thomas Stuefe


 292   return (address) esp;
 293 #endif
 294 }
 295 
 296 char* os::non_memory_address_word() {
 297   // Must never look like an address returned by reserve_memory,
 298   // even in its subfields (as defined by the CPU immediate fields,
 299   // if the CPU splits constants across multiple instructions).
 300 
 301   return (char*) -1;
 302 }
 303 
 304 void os::initialize_thread(Thread* thr) {
 305 // Nothing to do.
 306 }
 307 
 308 address os::Bsd::ucontext_get_pc(ucontext_t * uc) {
 309   return (address)uc->context_pc;
 310 }
 311 




 312 intptr_t* os::Bsd::ucontext_get_sp(ucontext_t * uc) {
 313   return (intptr_t*)uc->context_sp;
 314 }
 315 
 316 intptr_t* os::Bsd::ucontext_get_fp(ucontext_t * uc) {
 317   return (intptr_t*)uc->context_fp;
 318 }
 319 
 320 // For Forte Analyzer AsyncGetCallTrace profiling support - thread
 321 // is currently interrupted by SIGPROF.
 322 // os::Solaris::fetch_frame_from_ucontext() tries to skip nested signal
 323 // frames. Currently we don't do that on Bsd, so it's the same as
 324 // os::fetch_frame_from_context().
 325 ExtendedPC os::Bsd::fetch_frame_from_ucontext(Thread* thread,
 326   ucontext_t* uc, intptr_t** ret_sp, intptr_t** ret_fp) {
 327 
 328   assert(thread != NULL, "just checking");
 329   assert(ret_sp != NULL, "just checking");
 330   assert(ret_fp != NULL, "just checking");
 331 


 446   }
 447 /*
 448   NOTE: does not seem to work on bsd.
 449   if (info == NULL || info->si_code <= 0 || info->si_code == SI_NOINFO) {
 450     // can't decode this kind of signal
 451     info = NULL;
 452   } else {
 453     assert(sig == info->si_signo, "bad siginfo");
 454   }
 455 */
 456   // decide if this trap can be handled by a stub
 457   address stub = NULL;
 458 
 459   address pc          = NULL;
 460 
 461   //%note os_trap_1
 462   if (info != NULL && uc != NULL && thread != NULL) {
 463     pc = (address) os::Bsd::ucontext_get_pc(uc);
 464 
 465     if (StubRoutines::is_safefetch_fault(pc)) {
 466       uc->context_pc = intptr_t(StubRoutines::continuation_for_safefetch_fault(pc));
 467       return 1;
 468     }
 469 
 470     // Handle ALL stack overflow variations here
 471     if (sig == SIGSEGV || sig == SIGBUS) {
 472       address addr = (address) info->si_addr;
 473 
 474       // check if fault address is within thread stack
 475       if (addr < thread->stack_base() &&
 476           addr >= thread->stack_base() - thread->stack_size()) {
 477         // stack overflow
 478         if (thread->in_stack_yellow_zone(addr)) {
 479           thread->disable_stack_yellow_zone();
 480           if (thread->thread_state() == _thread_in_Java) {
 481             // Throw a stack overflow exception.  Guard pages will be reenabled
 482             // while unwinding the stack.
 483             stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW);
 484           } else {
 485             // Thread was in the vm or native code.  Return and try to finish.
 486             return 1;


 686         // different addresses and failing to unguard the page, resulting in
 687         // an endless loop.  This condition is probably even more unlikely than
 688         // the first.
 689         //
 690         // Although both cases could be avoided by using locks or thread local
 691         // last_addr, these solutions are unnecessary complication: this
 692         // handler is a best-effort safety net, not a complete solution.  It is
 693         // disabled by default and should only be used as a workaround in case
 694         // we missed any no-execute-unsafe VM code.
 695 
 696         last_addr = addr;
 697       }
 698     }
 699   }
 700 #endif // !AMD64
 701 
 702   if (stub != NULL) {
 703     // save all thread context in case we need to restore it
 704     if (thread != NULL) thread->set_saved_exception_pc(pc);
 705 
 706     uc->context_pc = (intptr_t)stub;
 707     return true;
 708   }
 709 
 710   // signal-chaining
 711   if (os::Bsd::chained_handler(sig, info, ucVoid)) {
 712      return true;
 713   }
 714 
 715   if (!abort_if_unrecognized) {
 716     // caller wants another chance, so give it to him
 717     return false;
 718   }
 719 
 720   if (pc == NULL && uc != NULL) {
 721     pc = os::Bsd::ucontext_get_pc(uc);
 722   }
 723 
 724   // unmask current signal
 725   sigset_t newset;
 726   sigemptyset(&newset);




 292   return (address) esp;
 293 #endif
 294 }
 295 
 296 char* os::non_memory_address_word() {
 297   // Must never look like an address returned by reserve_memory,
 298   // even in its subfields (as defined by the CPU immediate fields,
 299   // if the CPU splits constants across multiple instructions).
 300 
 301   return (char*) -1;
 302 }
 303 
 304 void os::initialize_thread(Thread* thr) {
 305 // Nothing to do.
 306 }
 307 
 308 address os::Bsd::ucontext_get_pc(ucontext_t * uc) {
 309   return (address)uc->context_pc;
 310 }
 311 
 312 void os::Bsd::ucontext_set_pc(ucontext_t * uc, address pc) {
 313   uc->context_pc = (intptr_t)pc ;
 314 }
 315 
 316 intptr_t* os::Bsd::ucontext_get_sp(ucontext_t * uc) {
 317   return (intptr_t*)uc->context_sp;
 318 }
 319 
 320 intptr_t* os::Bsd::ucontext_get_fp(ucontext_t * uc) {
 321   return (intptr_t*)uc->context_fp;
 322 }
 323 
 324 // For Forte Analyzer AsyncGetCallTrace profiling support - thread
 325 // is currently interrupted by SIGPROF.
 326 // os::Solaris::fetch_frame_from_ucontext() tries to skip nested signal
 327 // frames. Currently we don't do that on Bsd, so it's the same as
 328 // os::fetch_frame_from_context().
 329 ExtendedPC os::Bsd::fetch_frame_from_ucontext(Thread* thread,
 330   ucontext_t* uc, intptr_t** ret_sp, intptr_t** ret_fp) {
 331 
 332   assert(thread != NULL, "just checking");
 333   assert(ret_sp != NULL, "just checking");
 334   assert(ret_fp != NULL, "just checking");
 335 


 450   }
 451 /*
 452   NOTE: does not seem to work on bsd.
 453   if (info == NULL || info->si_code <= 0 || info->si_code == SI_NOINFO) {
 454     // can't decode this kind of signal
 455     info = NULL;
 456   } else {
 457     assert(sig == info->si_signo, "bad siginfo");
 458   }
 459 */
 460   // decide if this trap can be handled by a stub
 461   address stub = NULL;
 462 
 463   address pc          = NULL;
 464 
 465   //%note os_trap_1
 466   if (info != NULL && uc != NULL && thread != NULL) {
 467     pc = (address) os::Bsd::ucontext_get_pc(uc);
 468 
 469     if (StubRoutines::is_safefetch_fault(pc)) {
 470       os::Bsd::ucontext_set_pc(uc, StubRoutines::continuation_for_safefetch_fault(pc));
 471       return 1;
 472     }
 473 
 474     // Handle ALL stack overflow variations here
 475     if (sig == SIGSEGV || sig == SIGBUS) {
 476       address addr = (address) info->si_addr;
 477 
 478       // check if fault address is within thread stack
 479       if (addr < thread->stack_base() &&
 480           addr >= thread->stack_base() - thread->stack_size()) {
 481         // stack overflow
 482         if (thread->in_stack_yellow_zone(addr)) {
 483           thread->disable_stack_yellow_zone();
 484           if (thread->thread_state() == _thread_in_Java) {
 485             // Throw a stack overflow exception.  Guard pages will be reenabled
 486             // while unwinding the stack.
 487             stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW);
 488           } else {
 489             // Thread was in the vm or native code.  Return and try to finish.
 490             return 1;


 690         // different addresses and failing to unguard the page, resulting in
 691         // an endless loop.  This condition is probably even more unlikely than
 692         // the first.
 693         //
 694         // Although both cases could be avoided by using locks or thread local
 695         // last_addr, these solutions are unnecessary complication: this
 696         // handler is a best-effort safety net, not a complete solution.  It is
 697         // disabled by default and should only be used as a workaround in case
 698         // we missed any no-execute-unsafe VM code.
 699 
 700         last_addr = addr;
 701       }
 702     }
 703   }
 704 #endif // !AMD64
 705 
 706   if (stub != NULL) {
 707     // save all thread context in case we need to restore it
 708     if (thread != NULL) thread->set_saved_exception_pc(pc);
 709 
 710     os::Bsd::ucontext_set_pc(uc, stub);
 711     return true;
 712   }
 713 
 714   // signal-chaining
 715   if (os::Bsd::chained_handler(sig, info, ucVoid)) {
 716      return true;
 717   }
 718 
 719   if (!abort_if_unrecognized) {
 720     // caller wants another chance, so give it to him
 721     return false;
 722   }
 723 
 724   if (pc == NULL && uc != NULL) {
 725     pc = os::Bsd::ucontext_get_pc(uc);
 726   }
 727 
 728   // unmask current signal
 729   sigset_t newset;
 730   sigemptyset(&newset);


< prev index next >