src/os_cpu/linux_x86/vm/os_linux_x86.cpp

Print this page




 157     epc = ExtendedPC(os::Linux::ucontext_get_pc(uc));
 158     if (ret_sp) *ret_sp = os::Linux::ucontext_get_sp(uc);
 159     if (ret_fp) *ret_fp = os::Linux::ucontext_get_fp(uc);
 160   } else {
 161     // construct empty ExtendedPC for return value checking
 162     epc = ExtendedPC(NULL);
 163     if (ret_sp) *ret_sp = (intptr_t *)NULL;
 164     if (ret_fp) *ret_fp = (intptr_t *)NULL;
 165   }
 166 
 167   return epc;
 168 }
 169 
 170 frame os::fetch_frame_from_context(void* ucVoid) {
 171   intptr_t* sp;
 172   intptr_t* fp;
 173   ExtendedPC epc = fetch_frame_from_context(ucVoid, &sp, &fp);
 174   return frame(sp, fp, epc.pc());
 175 }
 176 












































 177 // By default, gcc always save frame pointer (%ebp/%rbp) on stack. It may get
 178 // turned off by -fomit-frame-pointer,
 179 frame os::get_sender_for_C_frame(frame* fr) {
 180   return frame(fr->sender_sp(), fr->link(), fr->sender_pc());
 181 }
 182 
 183 intptr_t* _get_previous_fp() {
 184 #ifdef SPARC_WORKS
 185   register intptr_t **ebp;
 186   __asm__("mov %%"SPELL_REG_FP", %0":"=r"(ebp));
 187 #elif defined(__clang__)
 188   intptr_t **ebp;
 189   __asm__ __volatile__ ("mov %%"SPELL_REG_FP", %0":"=r"(ebp):);
 190 #else
 191   register intptr_t **ebp __asm__ (SPELL_REG_FP);
 192 #endif
 193   return (intptr_t*) *ebp;   // we want what it points to.
 194 }
 195 
 196 


 287 
 288 #ifndef AMD64
 289     // Halt if SI_KERNEL before more crashes get misdiagnosed as Java bugs
 290     // This can happen in any running code (currently more frequently in
 291     // interpreter code but has been seen in compiled code)
 292     if (sig == SIGSEGV && info->si_addr == 0 && info->si_code == SI_KERNEL) {
 293       fatal("An irrecoverable SI_KERNEL SIGSEGV has occurred due "
 294             "to unstable signal handling in this distribution.");
 295     }
 296 #endif // AMD64
 297 
 298     // Handle ALL stack overflow variations here
 299     if (sig == SIGSEGV) {
 300       address addr = (address) info->si_addr;
 301 
 302       // check if fault address is within thread stack
 303       if (addr < thread->stack_base() &&
 304           addr >= thread->stack_base() - thread->stack_size()) {
 305         // stack overflow
 306         if (thread->in_stack_yellow_zone(addr)) {
 307           thread->disable_stack_yellow_zone();
 308           if (thread->thread_state() == _thread_in_Java) {


















 309             // Throw a stack overflow exception.  Guard pages will be reenabled
 310             // while unwinding the stack.

 311             stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW);
 312           } else {
 313             // Thread was in the vm or native code.  Return and try to finish.

 314             return 1;
 315           }
 316         } else if (thread->in_stack_red_zone(addr)) {
 317           // Fatal red zone violation.  Disable the guard pages and fall through
 318           // to handle_unexpected_exception way down below.
 319           thread->disable_stack_red_zone();
 320           tty->print_raw_cr("An irrecoverable stack overflow has occurred.");
 321 
 322           // This is a likely cause, but hard to verify. Let's just print
 323           // it as a hint.
 324           tty->print_raw_cr("Please check if any of your loaded .so files has "
 325                             "enabled executable stack (see man page execstack(8))");
 326         } else {
 327           // Accessing stack address below sp may cause SEGV if current
 328           // thread has MAP_GROWSDOWN stack. This should only happen when
 329           // current thread was created by user code with MAP_GROWSDOWN flag
 330           // and then attached to VM. See notes in os_linux.cpp.
 331           if (thread->osthread()->expanding_stack() == 0) {
 332              thread->osthread()->set_expanding_stack();
 333              if (os::Linux::manually_expand_stack(thread, addr)) {


 850  */
 851 void os::workaround_expand_exec_shield_cs_limit() {
 852 #if defined(IA32)
 853   size_t page_size = os::vm_page_size();
 854   /*
 855    * Take the highest VA the OS will give us and exec
 856    *
 857    * Although using -(pagesz) as mmap hint works on newer kernel as you would
 858    * think, older variants affected by this work-around don't (search forward only).
 859    *
 860    * On the affected distributions, we understand the memory layout to be:
 861    *
 862    *   TASK_LIMIT= 3G, main stack base close to TASK_LIMT.
 863    *
 864    * A few pages south main stack will do it.
 865    *
 866    * If we are embedded in an app other than launcher (initial != main stack),
 867    * we don't have much control or understanding of the address space, just let it slide.
 868    */
 869   char* hint = (char*) (Linux::initial_thread_stack_bottom() -
 870                         ((StackYellowPages + StackRedPages + 1) * page_size));
 871   char* codebuf = os::attempt_reserve_memory_at(page_size, hint);
 872   if ( (codebuf == NULL) || (!os::commit_memory(codebuf, page_size, true)) ) {
 873     return; // No matter, we tried, best effort.
 874   }
 875 
 876   MemTracker::record_virtual_memory_type((address)codebuf, mtInternal);
 877 
 878   if (PrintMiscellaneous && (Verbose || WizardMode)) {
 879      tty->print_cr("[CS limit NX emulation work-around, exec code at: %p]", codebuf);
 880   }
 881 
 882   // Some code to exec: the 'ret' instruction
 883   codebuf[0] = 0xC3;
 884 
 885   // Call the code in the codebuf
 886   __asm__ volatile("call *%0" : : "r"(codebuf));
 887 
 888   // keep the page mapped so CS limit isn't reduced.
 889 #endif
 890 }


 157     epc = ExtendedPC(os::Linux::ucontext_get_pc(uc));
 158     if (ret_sp) *ret_sp = os::Linux::ucontext_get_sp(uc);
 159     if (ret_fp) *ret_fp = os::Linux::ucontext_get_fp(uc);
 160   } else {
 161     // construct empty ExtendedPC for return value checking
 162     epc = ExtendedPC(NULL);
 163     if (ret_sp) *ret_sp = (intptr_t *)NULL;
 164     if (ret_fp) *ret_fp = (intptr_t *)NULL;
 165   }
 166 
 167   return epc;
 168 }
 169 
 170 frame os::fetch_frame_from_context(void* ucVoid) {
 171   intptr_t* sp;
 172   intptr_t* fp;
 173   ExtendedPC epc = fetch_frame_from_context(ucVoid, &sp, &fp);
 174   return frame(sp, fp, epc.pc());
 175 }
 176 
 177 frame os::fetch_frame_from_ucontext(Thread* thread, void* ucVoid) {
 178   intptr_t* sp;
 179   intptr_t* fp;
 180   ExtendedPC epc = os::Linux::fetch_frame_from_ucontext(thread, (ucontext_t*)ucVoid, &sp, &fp);
 181   return frame(sp, fp, epc.pc());
 182 }
 183 
 184 bool os::Linux::get_frame_at_stack_banging_point(JavaThread* thread, ucontext_t* uc, frame* fr) {
 185   address pc = (address) os::Linux::ucontext_get_pc(uc);
 186   if (Interpreter::contains(pc)) {
 187     // interpreter performs stack banging after the fixed frame header has
 188     // been generated while the compilers perform it before. To maintain
 189     // semantic consistency between interpreted and compiled frames, the
 190     // method returns the Java sender of the current frame.
 191     *fr = os::fetch_frame_from_ucontext(thread, uc);
 192     assert(fr->safe_for_sender(thread), "Safety check");
 193     if (!fr->is_first_java_frame()) {
 194       *fr = fr->java_sender();
 195     }
 196   } else {
 197     // more complex code with compiled code
 198     assert(!Interpreter::contains(pc), "Interpreted methods should have been handled above");
 199     CodeBlob* cb = CodeCache::find_blob(pc);
 200     if (cb == NULL || !cb->is_nmethod() || cb->is_frame_complete_at(pc)) {
 201       // Not sure where the pc points to, fallback to default 
 202       // stack overflow handling
 203       return false;
 204     } else {
 205       // in compiled code, the stack banging is performed just after the return pc
 206       // has been pushed on the stack
 207       intptr_t* fp = os::Linux::ucontext_get_fp(uc);
 208       intptr_t* sp = os::Linux::ucontext_get_sp(uc);
 209       *fr = frame(sp + 1, fp, (address)*sp);
 210       if (!fr->is_java_frame()) {
 211         assert(fr->safe_for_sender(thread), "Safety check");
 212         assert(!fr->is_first_frame(), "Safety check");
 213         *fr = fr->java_sender();
 214       }
 215     }
 216   }
 217   assert(fr->is_java_frame(), "Safety check");
 218   return true;
 219 }
 220 
 221 // By default, gcc always save frame pointer (%ebp/%rbp) on stack. It may get
 222 // turned off by -fomit-frame-pointer,
 223 frame os::get_sender_for_C_frame(frame* fr) {
 224   return frame(fr->sender_sp(), fr->link(), fr->sender_pc());
 225 }
 226 
 227 intptr_t* _get_previous_fp() {
 228 #ifdef SPARC_WORKS
 229   register intptr_t **ebp;
 230   __asm__("mov %%"SPELL_REG_FP", %0":"=r"(ebp));
 231 #elif defined(__clang__)
 232   intptr_t **ebp;
 233   __asm__ __volatile__ ("mov %%"SPELL_REG_FP", %0":"=r"(ebp):);
 234 #else
 235   register intptr_t **ebp __asm__ (SPELL_REG_FP);
 236 #endif
 237   return (intptr_t*) *ebp;   // we want what it points to.
 238 }
 239 
 240 


 331 
 332 #ifndef AMD64
 333     // Halt if SI_KERNEL before more crashes get misdiagnosed as Java bugs
 334     // This can happen in any running code (currently more frequently in
 335     // interpreter code but has been seen in compiled code)
 336     if (sig == SIGSEGV && info->si_addr == 0 && info->si_code == SI_KERNEL) {
 337       fatal("An irrecoverable SI_KERNEL SIGSEGV has occurred due "
 338             "to unstable signal handling in this distribution.");
 339     }
 340 #endif // AMD64
 341 
 342     // Handle ALL stack overflow variations here
 343     if (sig == SIGSEGV) {
 344       address addr = (address) info->si_addr;
 345 
 346       // check if fault address is within thread stack
 347       if (addr < thread->stack_base() &&
 348           addr >= thread->stack_base() - thread->stack_size()) {
 349         // stack overflow
 350         if (thread->in_stack_yellow_zone(addr)) {

 351           if (thread->thread_state() == _thread_in_Java) {
 352             if (thread->in_stack_reserved_zone(addr)) {
 353               frame fr;
 354               if (os::Linux::get_frame_at_stack_banging_point(thread, uc, &fr)) {
 355                 assert(fr.is_java_frame(), "Must be a Java frame");
 356                 frame activation = 
 357                   SharedRuntime::look_for_reserved_stack_annotated_method(thread, fr);
 358                 if (activation.sp() != NULL) {
 359                   thread->disable_stack_reserved_zone();
 360                   if (activation.is_interpreted_frame()) {
 361                     thread->set_reserved_stack_activation(
 362                       activation.fp() + frame::interpreter_frame_initial_sp_offset);
 363                   } else {
 364                     thread->set_reserved_stack_activation(activation.unextended_sp());
 365                   }
 366                   return 1;
 367                 }
 368               }
 369             }
 370             // Throw a stack overflow exception.  Guard pages will be reenabled
 371             // while unwinding the stack.
 372             thread->disable_stack_yellow_zone();
 373             stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW);
 374           } else {
 375             // Thread was in the vm or native code.  Return and try to finish.
 376             thread->disable_stack_yellow_zone();
 377             return 1;
 378           }
 379         } else if (thread->in_stack_red_zone(addr)) {
 380           // Fatal red zone violation.  Disable the guard pages and fall through
 381           // to handle_unexpected_exception way down below.
 382           thread->disable_stack_red_zone();
 383           tty->print_raw_cr("An irrecoverable stack overflow has occurred.");
 384 
 385           // This is a likely cause, but hard to verify. Let's just print
 386           // it as a hint.
 387           tty->print_raw_cr("Please check if any of your loaded .so files has "
 388                             "enabled executable stack (see man page execstack(8))");
 389         } else {
 390           // Accessing stack address below sp may cause SEGV if current
 391           // thread has MAP_GROWSDOWN stack. This should only happen when
 392           // current thread was created by user code with MAP_GROWSDOWN flag
 393           // and then attached to VM. See notes in os_linux.cpp.
 394           if (thread->osthread()->expanding_stack() == 0) {
 395              thread->osthread()->set_expanding_stack();
 396              if (os::Linux::manually_expand_stack(thread, addr)) {


 913  */
 914 void os::workaround_expand_exec_shield_cs_limit() {
 915 #if defined(IA32)
 916   size_t page_size = os::vm_page_size();
 917   /*
 918    * Take the highest VA the OS will give us and exec
 919    *
 920    * Although using -(pagesz) as mmap hint works on newer kernel as you would
 921    * think, older variants affected by this work-around don't (search forward only).
 922    *
 923    * On the affected distributions, we understand the memory layout to be:
 924    *
 925    *   TASK_LIMIT= 3G, main stack base close to TASK_LIMT.
 926    *
 927    * A few pages south main stack will do it.
 928    *
 929    * If we are embedded in an app other than launcher (initial != main stack),
 930    * we don't have much control or understanding of the address space, just let it slide.
 931    */
 932   char* hint = (char*) (Linux::initial_thread_stack_bottom() -
 933                         ((StackReservedPages + StackYellowPages + StackRedPages + 1) * page_size));
 934   char* codebuf = os::attempt_reserve_memory_at(page_size, hint);
 935   if ( (codebuf == NULL) || (!os::commit_memory(codebuf, page_size, true)) ) {
 936     return; // No matter, we tried, best effort.
 937   }
 938 
 939   MemTracker::record_virtual_memory_type((address)codebuf, mtInternal);
 940 
 941   if (PrintMiscellaneous && (Verbose || WizardMode)) {
 942      tty->print_cr("[CS limit NX emulation work-around, exec code at: %p]", codebuf);
 943   }
 944 
 945   // Some code to exec: the 'ret' instruction
 946   codebuf[0] = 0xC3;
 947 
 948   // Call the code in the codebuf
 949   __asm__ volatile("call *%0" : : "r"(codebuf));
 950 
 951   // keep the page mapped so CS limit isn't reduced.
 952 #endif
 953 }