< prev index next >

src/cpu/x86/vm/nativeInst_x86.hpp

Print this page
rev 8802 : G1 performance improvements: card batching, joining, sorting, prefetching and write barrier fence elision and simplification based on a global syncrhonization using handshakes piggybacking on thread-local safepoints.

@@ -524,11 +524,15 @@
 inline bool NativeInstruction::is_jump()         { return ubyte_at(0) == NativeJump::instruction_code ||
                                                           ubyte_at(0) == 0xEB; /* short jump */ }
 inline bool NativeInstruction::is_cond_jump()    { return (int_at(0) & 0xF0FF) == 0x800F /* long jump */ ||
                                                           (ubyte_at(0) & 0xF0) == 0x70;  /* short jump */ }
 inline bool NativeInstruction::is_safepoint_poll() {
+  // TODO: Fix up parsing of safepoint poll code. Skipping now as it doesn't seem to be used for much other than asserts.
 #ifdef AMD64
+  if (ThreadLocalSafepoints) {
+    return true;
+  }
   if (Assembler::is_polling_page_far()) {
     // two cases, depending on the choice of the base register in the address.
     if (((ubyte_at(0) & NativeTstRegMem::instruction_rex_prefix_mask) == NativeTstRegMem::instruction_rex_prefix &&
          ubyte_at(1) == NativeTstRegMem::instruction_code_memXregl &&
          (ubyte_at(2) & NativeTstRegMem::modrm_mask) == NativeTstRegMem::modrm_reg) ||
< prev index next >