< prev index next >

src/hotspot/os/windows/os_windows.cpp

Print this page

2362   PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2363   address addr = (address) exceptionRecord->ExceptionInformation[1];
2364   address pc = (address) exceptionInfo->ContextRecord->Rip;
2365 
2366   // Handle the case where we get an implicit exception in AOT generated
2367   // code.  AOT DLL's loaded are not registered for structured exceptions.
2368   // If the exception occurred in the codeCache or AOT code, pass control
2369   // to our normal exception handler.
2370   CodeBlob* cb = CodeCache::find_blob(pc);
2371   if (cb != NULL) {
2372     return topLevelExceptionFilter(exceptionInfo);
2373   }
2374 
2375   return EXCEPTION_CONTINUE_SEARCH;
2376 }
2377 #endif
2378 
2379 //-----------------------------------------------------------------------------
2380 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2381   if (InterceptOSException) return EXCEPTION_CONTINUE_SEARCH;
2382   DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;

2383 #ifdef _M_AMD64
2384   address pc = (address) exceptionInfo->ContextRecord->Rip;
2385 #else
2386   address pc = (address) exceptionInfo->ContextRecord->Eip;
2387 #endif
2388   Thread* t = Thread::current_or_null_safe();
2389 
2390   // Handle SafeFetch32 and SafeFetchN exceptions.
2391   if (StubRoutines::is_safefetch_fault(pc)) {
2392     return Handle_Exception(exceptionInfo, StubRoutines::continuation_for_safefetch_fault(pc));
2393   }
2394 
2395 #ifndef _WIN64
2396   // Execution protection violation - win32 running on AMD64 only
2397   // Handled first to avoid misdiagnosis as a "normal" access violation;
2398   // This is safe to do because we have a new/unique ExceptionInformation
2399   // code for this condition.
2400   if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2401     PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2402     int exception_subcode = (int) exceptionRecord->ExceptionInformation[0];
2403     address addr = (address) exceptionRecord->ExceptionInformation[1];
2404 
2405     if (exception_subcode == EXCEPTION_INFO_EXEC_VIOLATION) {
2406       int page_size = os::vm_page_size();
2407 
2408       // Make sure the pc and the faulting address are sane.
2409       //
2410       // If an instruction spans a page boundary, and the page containing
2411       // the beginning of the instruction is executable but the following
2412       // page is not, the pc and the faulting address might be slightly
2413       // different - we still want to unguard the 2nd page in this case.
2414       //
2415       // 15 bytes seems to be a (very) safe value for max instruction size.
2416       bool pc_is_near_addr =
2417         (pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15);
2418       bool instr_spans_page_boundary =
2419         (align_down((intptr_t) pc ^ (intptr_t) addr,
2420                          (intptr_t) page_size) > 0);
2421 
2422       if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) {
2423         static volatile address last_addr =

2447           //
2448           // The other race involves two threads alternately trapping at
2449           // different addresses and failing to unguard the page, resulting in
2450           // an endless loop.  This condition is probably even more unlikely
2451           // than the first.
2452           //
2453           // Although both cases could be avoided by using locks or thread
2454           // local last_addr, these solutions are unnecessary complication:
2455           // this handler is a best-effort safety net, not a complete solution.
2456           // It is disabled by default and should only be used as a workaround
2457           // in case we missed any no-execute-unsafe VM code.
2458 
2459           last_addr = addr;
2460 
2461           return EXCEPTION_CONTINUE_EXECUTION;
2462         }
2463       }
2464 
2465       // Last unguard failed or not unguarding
2466       tty->print_raw_cr("Execution protection violation");
2467       report_error(t, exception_code, addr, exceptionInfo->ExceptionRecord,
2468                    exceptionInfo->ContextRecord);
2469       return EXCEPTION_CONTINUE_SEARCH;
2470     }
2471   }
2472 #endif // _WIN64
2473 
2474   if ((exception_code == EXCEPTION_ACCESS_VIOLATION) &&
2475       VM_Version::is_cpuinfo_segv_addr(pc)) {
2476     // Verify that OS save/restore AVX registers.
2477     return Handle_Exception(exceptionInfo, VM_Version::cpuinfo_cont_addr());
2478   }
2479 
2480   if (t != NULL && t->is_Java_thread()) {
2481     JavaThread* thread = (JavaThread*) t;
2482     bool in_java = thread->thread_state() == _thread_in_Java;


2483 
2484     // Handle potential stack overflows up front.
2485     if (exception_code == EXCEPTION_STACK_OVERFLOW) {
2486       if (thread->stack_guards_enabled()) {
2487         if (in_java) {
2488           frame fr;
2489           PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2490           address addr = (address) exceptionRecord->ExceptionInformation[1];
2491           if (os::win32::get_frame_at_stack_banging_point(thread, exceptionInfo, pc, &fr)) {
2492             assert(fr.is_java_frame(), "Must be a Java frame");
2493             SharedRuntime::look_for_reserved_stack_annotated_method(thread, fr);
2494           }
2495         }
2496         // Yellow zone violation.  The o/s has unprotected the first yellow
2497         // zone page for us.  Note:  must call disable_stack_yellow_zone to
2498         // update the enabled status, even if the zone contains only one page.
2499         assert(thread->thread_state() != _thread_in_vm, "Undersized StackShadowPages");
2500         thread->disable_stack_yellow_reserved_zone();
2501         // If not in java code, return and hope for the best.
2502         return in_java
2503             ? Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW))
2504             :  EXCEPTION_CONTINUE_EXECUTION;
2505       } else {
2506         // Fatal red zone violation.
2507         thread->disable_stack_red_zone();
2508         tty->print_raw_cr("An unrecoverable stack overflow has occurred.");
2509         report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2510                       exceptionInfo->ContextRecord);
2511         return EXCEPTION_CONTINUE_SEARCH;
2512       }
2513     } else if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2514       // Either stack overflow or null pointer exception.
2515       if (in_java) {
2516         PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2517         address addr = (address) exceptionRecord->ExceptionInformation[1];
2518         address stack_end = thread->stack_end();
2519         if (addr < stack_end && addr >= stack_end - os::vm_page_size()) {
2520           // Stack overflow.
2521           assert(!os::uses_stack_guard_pages(),
2522                  "should be caught by red zone code above.");
2523           return Handle_Exception(exceptionInfo,
2524                                   SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW));
2525         }
2526         // Check for safepoint polling and implicit null
2527         // We only expect null pointers in the stubs (vtable)
2528         // the rest are checked explicitly now.
2529         CodeBlob* cb = CodeCache::find_blob(pc);
2530         if (cb != NULL) {
2531           if (SafepointMechanism::is_poll_address(addr)) {
2532             address stub = SharedRuntime::get_poll_stub(pc);
2533             return Handle_Exception(exceptionInfo, stub);
2534           }
2535         }
2536         {
2537 #ifdef _WIN64
2538           // If it's a legal stack address map the entire region in
2539           //
2540           PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2541           address addr = (address) exceptionRecord->ExceptionInformation[1];
2542           if (thread->is_in_usable_stack(addr)) {
2543             addr = (address)((uintptr_t)addr &
2544                              (~((uintptr_t)os::vm_page_size() - (uintptr_t)1)));
2545             os::commit_memory((char *)addr, thread->stack_base() - addr,
2546                               !ExecMem);
2547             return EXCEPTION_CONTINUE_EXECUTION;
2548           } else
2549 #endif
2550           {
2551             // Null pointer exception.
2552             if (MacroAssembler::uses_implicit_null_check((void*)addr)) {
2553               address stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
2554               if (stub != NULL) return Handle_Exception(exceptionInfo, stub);
2555             }
2556             report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2557                          exceptionInfo->ContextRecord);
2558             return EXCEPTION_CONTINUE_SEARCH;
2559           }
2560         }



2561       }
2562 
2563 #ifdef _WIN64
2564       // Special care for fast JNI field accessors.
2565       // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks
2566       // in and the heap gets shrunk before the field access.
2567       if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2568         address addr = JNI_FastGetField::find_slowcase_pc(pc);
2569         if (addr != (address)-1) {
2570           return Handle_Exception(exceptionInfo, addr);
2571         }
2572       }
2573 #endif
2574 
2575       // Stack overflow or null pointer exception in native code.
2576       report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2577                    exceptionInfo->ContextRecord);
2578       return EXCEPTION_CONTINUE_SEARCH;
2579     } // /EXCEPTION_ACCESS_VIOLATION
2580     // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2581 
2582     if (exception_code == EXCEPTION_IN_PAGE_ERROR) {
2583       CompiledMethod* nm = NULL;
2584       JavaThread* thread = (JavaThread*)t;
2585       if (in_java) {
2586         CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
2587         nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
2588       }
2589 
2590       bool is_unsafe_arraycopy = (thread->thread_state() == _thread_in_native || in_java) && UnsafeCopyMemory::contains_pc(pc);
2591       if (((thread->thread_state() == _thread_in_vm ||
2592            thread->thread_state() == _thread_in_native ||
2593            is_unsafe_arraycopy) &&
2594           thread->doing_unsafe_access()) ||
2595           (nm != NULL && nm->has_unsafe_access())) {
2596         address next_pc =  Assembler::locate_next_instruction(pc);
2597         if (is_unsafe_arraycopy) {
2598           next_pc = UnsafeCopyMemory::page_error_continue_pc(pc);
2599         }
2600         return Handle_Exception(exceptionInfo, SharedRuntime::handle_unsafe_access(thread, next_pc));
2601       }
2602     }
2603 
2604     if (in_java) {
2605       switch (exception_code) {
2606       case EXCEPTION_INT_DIVIDE_BY_ZERO:
2607         return Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO));
2608 
2609       case EXCEPTION_INT_OVERFLOW:
2610         return Handle_IDiv_Exception(exceptionInfo);
2611 
2612       } // switch
2613     }
2614     if (((thread->thread_state() == _thread_in_Java) ||
2615          (thread->thread_state() == _thread_in_native)) &&
2616          exception_code != EXCEPTION_UNCAUGHT_CXX_EXCEPTION) {
2617       LONG result=Handle_FLT_Exception(exceptionInfo);
2618       if (result==EXCEPTION_CONTINUE_EXECUTION) return result;
2619     }
2620   }
2621 
2622   if (exception_code != EXCEPTION_BREAKPOINT) {
2623     report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2624                  exceptionInfo->ContextRecord);
2625   }
2626   return EXCEPTION_CONTINUE_SEARCH;
2627 }
2628 
2629 #ifndef _WIN64
2630 // Special care for fast JNI accessors.
2631 // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in and
2632 // the heap gets shrunk before the field access.
2633 // Need to install our own structured exception handler since native code may
2634 // install its own.
2635 LONG WINAPI fastJNIAccessorExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2636   DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2637   if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2638     address pc = (address) exceptionInfo->ContextRecord->Eip;
2639     address addr = JNI_FastGetField::find_slowcase_pc(pc);
2640     if (addr != (address)-1) {
2641       return Handle_Exception(exceptionInfo, addr);
2642     }
2643   }

2362   PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2363   address addr = (address) exceptionRecord->ExceptionInformation[1];
2364   address pc = (address) exceptionInfo->ContextRecord->Rip;
2365 
2366   // Handle the case where we get an implicit exception in AOT generated
2367   // code.  AOT DLL's loaded are not registered for structured exceptions.
2368   // If the exception occurred in the codeCache or AOT code, pass control
2369   // to our normal exception handler.
2370   CodeBlob* cb = CodeCache::find_blob(pc);
2371   if (cb != NULL) {
2372     return topLevelExceptionFilter(exceptionInfo);
2373   }
2374 
2375   return EXCEPTION_CONTINUE_SEARCH;
2376 }
2377 #endif
2378 
2379 //-----------------------------------------------------------------------------
2380 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2381   if (InterceptOSException) return EXCEPTION_CONTINUE_SEARCH;
2382   PEXCEPTION_RECORD exception_record = exceptionInfo->ExceptionRecord;
2383   DWORD exception_code = exception_record->ExceptionCode;
2384 #ifdef _M_AMD64
2385   address pc = (address) exceptionInfo->ContextRecord->Rip;
2386 #else
2387   address pc = (address) exceptionInfo->ContextRecord->Eip;
2388 #endif
2389   Thread* t = Thread::current_or_null_safe();
2390 
2391   // Handle SafeFetch32 and SafeFetchN exceptions.
2392   if (StubRoutines::is_safefetch_fault(pc)) {
2393     return Handle_Exception(exceptionInfo, StubRoutines::continuation_for_safefetch_fault(pc));
2394   }
2395 
2396 #ifndef _WIN64
2397   // Execution protection violation - win32 running on AMD64 only
2398   // Handled first to avoid misdiagnosis as a "normal" access violation;
2399   // This is safe to do because we have a new/unique ExceptionInformation
2400   // code for this condition.
2401   if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2402     int exception_subcode = (int) exception_record->ExceptionInformation[0];
2403     address addr = (address) exception_record->ExceptionInformation[1];

2404 
2405     if (exception_subcode == EXCEPTION_INFO_EXEC_VIOLATION) {
2406       int page_size = os::vm_page_size();
2407 
2408       // Make sure the pc and the faulting address are sane.
2409       //
2410       // If an instruction spans a page boundary, and the page containing
2411       // the beginning of the instruction is executable but the following
2412       // page is not, the pc and the faulting address might be slightly
2413       // different - we still want to unguard the 2nd page in this case.
2414       //
2415       // 15 bytes seems to be a (very) safe value for max instruction size.
2416       bool pc_is_near_addr =
2417         (pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15);
2418       bool instr_spans_page_boundary =
2419         (align_down((intptr_t) pc ^ (intptr_t) addr,
2420                          (intptr_t) page_size) > 0);
2421 
2422       if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) {
2423         static volatile address last_addr =

2447           //
2448           // The other race involves two threads alternately trapping at
2449           // different addresses and failing to unguard the page, resulting in
2450           // an endless loop.  This condition is probably even more unlikely
2451           // than the first.
2452           //
2453           // Although both cases could be avoided by using locks or thread
2454           // local last_addr, these solutions are unnecessary complication:
2455           // this handler is a best-effort safety net, not a complete solution.
2456           // It is disabled by default and should only be used as a workaround
2457           // in case we missed any no-execute-unsafe VM code.
2458 
2459           last_addr = addr;
2460 
2461           return EXCEPTION_CONTINUE_EXECUTION;
2462         }
2463       }
2464 
2465       // Last unguard failed or not unguarding
2466       tty->print_raw_cr("Execution protection violation");
2467       report_error(t, exception_code, addr, exception_record,
2468                    exceptionInfo->ContextRecord);
2469       return EXCEPTION_CONTINUE_SEARCH;
2470     }
2471   }
2472 #endif // _WIN64
2473 
2474   if ((exception_code == EXCEPTION_ACCESS_VIOLATION) &&
2475       VM_Version::is_cpuinfo_segv_addr(pc)) {
2476     // Verify that OS save/restore AVX registers.
2477     return Handle_Exception(exceptionInfo, VM_Version::cpuinfo_cont_addr());
2478   }
2479 
2480   if (t != NULL && t->is_Java_thread()) {
2481     JavaThread* thread = (JavaThread*) t;
2482     bool in_java = thread->thread_state() == _thread_in_Java;
2483     bool in_native = thread->thread_state() == _thread_in_native;
2484     bool in_vm = thread->thread_state() == _thread_in_vm;
2485 
2486     // Handle potential stack overflows up front.
2487     if (exception_code == EXCEPTION_STACK_OVERFLOW) {
2488       if (thread->stack_guards_enabled()) {
2489         if (in_java) {
2490           frame fr;


2491           if (os::win32::get_frame_at_stack_banging_point(thread, exceptionInfo, pc, &fr)) {
2492             assert(fr.is_java_frame(), "Must be a Java frame");
2493             SharedRuntime::look_for_reserved_stack_annotated_method(thread, fr);
2494           }
2495         }
2496         // Yellow zone violation.  The o/s has unprotected the first yellow
2497         // zone page for us.  Note:  must call disable_stack_yellow_zone to
2498         // update the enabled status, even if the zone contains only one page.
2499         assert(!in_vm, "Undersized StackShadowPages");
2500         thread->disable_stack_yellow_reserved_zone();
2501         // If not in java code, return and hope for the best.
2502         return in_java
2503             ? Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW))
2504             :  EXCEPTION_CONTINUE_EXECUTION;
2505       } else {
2506         // Fatal red zone violation.
2507         thread->disable_stack_red_zone();
2508         tty->print_raw_cr("An unrecoverable stack overflow has occurred.");
2509         report_error(t, exception_code, pc, exception_record,
2510                       exceptionInfo->ContextRecord);
2511         return EXCEPTION_CONTINUE_SEARCH;
2512       }
2513     } else if (exception_code == EXCEPTION_ACCESS_VIOLATION) {

2514       if (in_java) {
2515         // Either stack overflow or null pointer exception.
2516         address addr = (address) exception_record->ExceptionInformation[1];
2517         address stack_end = thread->stack_end();
2518         if (addr < stack_end && addr >= stack_end - os::vm_page_size()) {
2519           // Stack overflow.
2520           assert(!os::uses_stack_guard_pages(),
2521                  "should be caught by red zone code above.");
2522           return Handle_Exception(exceptionInfo,
2523                                   SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW));
2524         }
2525         // Check for safepoint polling and implicit null
2526         // We only expect null pointers in the stubs (vtable)
2527         // the rest are checked explicitly now.
2528         CodeBlob* cb = CodeCache::find_blob(pc);
2529         if (cb != NULL) {
2530           if (SafepointMechanism::is_poll_address(addr)) {
2531             address stub = SharedRuntime::get_poll_stub(pc);
2532             return Handle_Exception(exceptionInfo, stub);
2533           }
2534         }

2535 #ifdef _WIN64
2536         // If it's a legal stack address map the entire region in
2537         if (thread->is_in_usable_stack(addr)) {
2538           addr = (address)((uintptr_t)addr &
2539                             (~((uintptr_t)os::vm_page_size() - (uintptr_t)1)));
2540           os::commit_memory((char *)addr, thread->stack_base() - addr,
2541                             !ExecMem);
2542           return EXCEPTION_CONTINUE_EXECUTION;
2543         }



2544 #endif
2545         // Null pointer exception.
2546         if (MacroAssembler::uses_implicit_null_check((void*)addr)) {
2547           address stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
2548           if (stub != NULL) return Handle_Exception(exceptionInfo, stub);






2549         }
2550         report_error(t, exception_code, pc, exception_record,
2551                       exceptionInfo->ContextRecord);
2552         return EXCEPTION_CONTINUE_SEARCH;
2553       }
2554 
2555 #ifdef _WIN64
2556       // Special care for fast JNI field accessors.
2557       // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks
2558       // in and the heap gets shrunk before the field access.
2559       address slowcase_pc = JNI_FastGetField::find_slowcase_pc(pc);
2560       if (slowcase_pc != (address)-1) {
2561         return Handle_Exception(exceptionInfo, slowcase_pc);


2562       }
2563 #endif
2564 
2565       // Stack overflow or null pointer exception in native code.
2566       report_error(t, exception_code, pc, exception_record,
2567                    exceptionInfo->ContextRecord);
2568       return EXCEPTION_CONTINUE_SEARCH;
2569     } // /EXCEPTION_ACCESS_VIOLATION
2570     // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2571 
2572     if (exception_code == EXCEPTION_IN_PAGE_ERROR) {
2573       CompiledMethod* nm = NULL;
2574       JavaThread* thread = (JavaThread*)t;
2575       if (in_java) {
2576         CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
2577         nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
2578       }
2579 
2580       bool is_unsafe_arraycopy = (in_native || in_java) && UnsafeCopyMemory::contains_pc(pc);
2581       if (((in_vm || in_native || is_unsafe_arraycopy) && thread->doing_unsafe_access()) ||



2582           (nm != NULL && nm->has_unsafe_access())) {
2583         address next_pc =  Assembler::locate_next_instruction(pc);
2584         if (is_unsafe_arraycopy) {
2585           next_pc = UnsafeCopyMemory::page_error_continue_pc(pc);
2586         }
2587         return Handle_Exception(exceptionInfo, SharedRuntime::handle_unsafe_access(thread, next_pc));
2588       }
2589     }
2590 
2591     if (in_java) {
2592       switch (exception_code) {
2593       case EXCEPTION_INT_DIVIDE_BY_ZERO:
2594         return Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO));
2595 
2596       case EXCEPTION_INT_OVERFLOW:
2597         return Handle_IDiv_Exception(exceptionInfo);
2598 
2599       } // switch
2600     }
2601     if ((in_java || in_native) && exception_code != EXCEPTION_UNCAUGHT_CXX_EXCEPTION) {


2602       LONG result=Handle_FLT_Exception(exceptionInfo);
2603       if (result==EXCEPTION_CONTINUE_EXECUTION) return result;
2604     }
2605   }
2606 
2607   if (exception_code != EXCEPTION_BREAKPOINT) {
2608     report_error(t, exception_code, pc, exception_record,
2609                  exceptionInfo->ContextRecord);
2610   }
2611   return EXCEPTION_CONTINUE_SEARCH;
2612 }
2613 
2614 #ifndef _WIN64
2615 // Special care for fast JNI accessors.
2616 // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in and
2617 // the heap gets shrunk before the field access.
2618 // Need to install our own structured exception handler since native code may
2619 // install its own.
2620 LONG WINAPI fastJNIAccessorExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2621   DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2622   if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2623     address pc = (address) exceptionInfo->ContextRecord->Eip;
2624     address addr = JNI_FastGetField::find_slowcase_pc(pc);
2625     if (addr != (address)-1) {
2626       return Handle_Exception(exceptionInfo, addr);
2627     }
2628   }
< prev index next >