< prev index next >

src/hotspot/cpu/x86/nativeInst_x86.cpp

Print this page




 185 void NativeCall::insert(address code_pos, address entry) {
 186   intptr_t disp = (intptr_t)entry - ((intptr_t)code_pos + 1 + 4);
 187 #ifdef AMD64
 188   guarantee(disp == (intptr_t)(jint)disp, "must be 32-bit offset");
 189 #endif // AMD64
 190   *code_pos = instruction_code;
 191   *((int32_t *)(code_pos+1)) = (int32_t) disp;
 192   ICache::invalidate_range(code_pos, instruction_size);
 193 }
 194 
 195 // MT-safe patching of a call instruction.
 196 // First patches first word of instruction to two jmp's that jmps to them
 197 // selfs (spinlock). Then patches the last byte, and then atomicly replaces
 198 // the jmp's with the first 4 byte of the new instruction.
 199 void NativeCall::replace_mt_safe(address instr_addr, address code_buffer) {
 200   assert(Patching_lock->is_locked() ||
 201          SafepointSynchronize::is_at_safepoint(), "concurrent code patching");
 202   assert (instr_addr != NULL, "illegal address for code patching");
 203 
 204   NativeCall* n_call =  nativeCall_at (instr_addr); // checking that it is a call
 205   if (os::is_MP()) {
 206     guarantee((intptr_t)instr_addr % BytesPerWord == 0, "must be aligned");
 207   }
 208 
 209   // First patch dummy jmp in place
 210   unsigned char patch[4];
 211   assert(sizeof(patch)==sizeof(jint), "sanity check");
 212   patch[0] = 0xEB;       // jmp rel8
 213   patch[1] = 0xFE;       // jmp to self
 214   patch[2] = 0xEB;
 215   patch[3] = 0xFE;
 216 
 217   // First patch dummy jmp in place
 218   *(jint*)instr_addr = *(jint *)patch;
 219 
 220   // Invalidate.  Opteron requires a flush after every write.
 221   n_call->wrote(0);
 222 
 223   // Patch 4th byte
 224   instr_addr[4] = code_buffer[4];
 225 
 226   n_call->wrote(4);
 227 


 245 // Similar to replace_mt_safe, but just changes the destination.  The
 246 // important thing is that free-running threads are able to execute this
 247 // call instruction at all times.  If the displacement field is aligned
 248 // we can simply rely on atomicity of 32-bit writes to make sure other threads
 249 // will see no intermediate states.  Otherwise, the first two bytes of the
 250 // call are guaranteed to be aligned, and can be atomically patched to a
 251 // self-loop to guard the instruction while we change the other bytes.
 252 
 253 // We cannot rely on locks here, since the free-running threads must run at
 254 // full speed.
 255 //
 256 // Used in the runtime linkage of calls; see class CompiledIC.
 257 // (Cf. 4506997 and 4479829, where threads witnessed garbage displacements.)
 258 void NativeCall::set_destination_mt_safe(address dest) {
 259   debug_only(verify());
 260   // Make sure patching code is locked.  No two threads can patch at the same
 261   // time but one may be executing this code.
 262   assert(Patching_lock->is_locked() ||
 263          SafepointSynchronize::is_at_safepoint(), "concurrent code patching");
 264   // Both C1 and C2 should now be generating code which aligns the patched address
 265   // to be within a single cache line except that C1 does not do the alignment on
 266   // uniprocessor systems.
 267   bool is_aligned = ((uintptr_t)displacement_address() + 0) / cache_line_size ==
 268                     ((uintptr_t)displacement_address() + 3) / cache_line_size;
 269 
 270   guarantee(!os::is_MP() || is_aligned, "destination must be aligned");
 271 
 272   if (is_aligned) {
 273     // Simple case:  The destination lies within a single cache line.
 274     set_destination(dest);
 275   } else if ((uintptr_t)instruction_address() / cache_line_size ==
 276              ((uintptr_t)instruction_address()+1) / cache_line_size) {
 277     // Tricky case:  The instruction prefix lies within a single cache line.
 278     intptr_t disp = dest - return_address();
 279 #ifdef AMD64
 280     guarantee(disp == (intptr_t)(jint)disp, "must be 32-bit offset");
 281 #endif // AMD64
 282 
 283     int call_opcode = instruction_address()[0];
 284 
 285     // First patch dummy jump in place:
 286     {
 287       u_char patch_jump[2];
 288       patch_jump[0] = 0xEB;       // jmp rel8
 289       patch_jump[1] = 0xFE;       // jmp to self
 290 
 291       assert(sizeof(patch_jump)==sizeof(short), "sanity check");
 292       *(short*)instruction_address() = *(short*)patch_jump;
 293     }
 294     // Invalidate.  Opteron requires a flush after every write.
 295     wrote(0);
 296 
 297     // (Note: We assume any reader which has already started to read
 298     // the unpatched call will completely read the whole unpatched call
 299     // without seeing the next writes we are about to make.)
 300 
 301     // Next, patch the last three bytes:
 302     u_char patch_disp[5];
 303     patch_disp[0] = call_opcode;
 304     *(int32_t*)&patch_disp[1] = (int32_t)disp;
 305     assert(sizeof(patch_disp)==instruction_size, "sanity check");
 306     for (int i = sizeof(short); i < instruction_size; i++)
 307       instruction_address()[i] = patch_disp[i];
 308 
 309     // Invalidate.  Opteron requires a flush after every write.
 310     wrote(sizeof(short));
 311 
 312     // (Note: We assume that any reader which reads the opcode we are
 313     // about to repatch will also read the writes we just made.)
 314 
 315     // Finally, overwrite the jump:
 316     *(short*)instruction_address() = *(short*)patch_disp;
 317     // Invalidate.  Opteron requires a flush after every write.
 318     wrote(0);
 319 
 320     debug_only(verify());
 321     guarantee(destination() == dest, "patch succeeded");
 322   } else {
 323     // Impossible:  One or the other must be atomically writable.
 324     ShouldNotReachHere();
 325   }
 326 }
 327 
 328 
 329 void NativeMovConstReg::verify() {
 330 #ifdef AMD64
 331   // make sure code pattern is actually a mov reg64, imm64 instruction
 332   if ((ubyte_at(0) != Assembler::REX_W && ubyte_at(0) != Assembler::REX_WB) ||
 333       (ubyte_at(1) & (0xff ^ register_mask)) != 0xB8) {
 334     print();
 335     fatal("not a REX.W[B] mov reg64, imm64");
 336   }
 337 #else
 338   // make sure code pattern is actually a mov reg, imm32 instruction
 339   u_char test_byte = *(u_char*)instruction_address();
 340   u_char test_byte_2 = test_byte & ( 0xff ^ register_mask);
 341   if (test_byte_2 != instruction_code) fatal("not a mov reg, imm32");
 342 #endif // AMD64
 343 }
 344 
 345 




 185 void NativeCall::insert(address code_pos, address entry) {
 186   intptr_t disp = (intptr_t)entry - ((intptr_t)code_pos + 1 + 4);
 187 #ifdef AMD64
 188   guarantee(disp == (intptr_t)(jint)disp, "must be 32-bit offset");
 189 #endif // AMD64
 190   *code_pos = instruction_code;
 191   *((int32_t *)(code_pos+1)) = (int32_t) disp;
 192   ICache::invalidate_range(code_pos, instruction_size);
 193 }
 194 
 195 // MT-safe patching of a call instruction.
 196 // First patches first word of instruction to two jmp's that jmps to them
 197 // selfs (spinlock). Then patches the last byte, and then atomicly replaces
 198 // the jmp's with the first 4 byte of the new instruction.
 199 void NativeCall::replace_mt_safe(address instr_addr, address code_buffer) {
 200   assert(Patching_lock->is_locked() ||
 201          SafepointSynchronize::is_at_safepoint(), "concurrent code patching");
 202   assert (instr_addr != NULL, "illegal address for code patching");
 203 
 204   NativeCall* n_call =  nativeCall_at (instr_addr); // checking that it is a call

 205   guarantee((intptr_t)instr_addr % BytesPerWord == 0, "must be aligned");

 206 
 207   // First patch dummy jmp in place
 208   unsigned char patch[4];
 209   assert(sizeof(patch)==sizeof(jint), "sanity check");
 210   patch[0] = 0xEB;       // jmp rel8
 211   patch[1] = 0xFE;       // jmp to self
 212   patch[2] = 0xEB;
 213   patch[3] = 0xFE;
 214 
 215   // First patch dummy jmp in place
 216   *(jint*)instr_addr = *(jint *)patch;
 217 
 218   // Invalidate.  Opteron requires a flush after every write.
 219   n_call->wrote(0);
 220 
 221   // Patch 4th byte
 222   instr_addr[4] = code_buffer[4];
 223 
 224   n_call->wrote(4);
 225 


 243 // Similar to replace_mt_safe, but just changes the destination.  The
 244 // important thing is that free-running threads are able to execute this
 245 // call instruction at all times.  If the displacement field is aligned
 246 // we can simply rely on atomicity of 32-bit writes to make sure other threads
 247 // will see no intermediate states.  Otherwise, the first two bytes of the
 248 // call are guaranteed to be aligned, and can be atomically patched to a
 249 // self-loop to guard the instruction while we change the other bytes.
 250 
 251 // We cannot rely on locks here, since the free-running threads must run at
 252 // full speed.
 253 //
 254 // Used in the runtime linkage of calls; see class CompiledIC.
 255 // (Cf. 4506997 and 4479829, where threads witnessed garbage displacements.)
 256 void NativeCall::set_destination_mt_safe(address dest) {
 257   debug_only(verify());
 258   // Make sure patching code is locked.  No two threads can patch at the same
 259   // time but one may be executing this code.
 260   assert(Patching_lock->is_locked() ||
 261          SafepointSynchronize::is_at_safepoint(), "concurrent code patching");
 262   // Both C1 and C2 should now be generating code which aligns the patched address
 263   // to be within a single cache line.

 264   bool is_aligned = ((uintptr_t)displacement_address() + 0) / cache_line_size ==
 265                     ((uintptr_t)displacement_address() + 3) / cache_line_size;
 266 
 267   guarantee(is_aligned, "destination must be aligned");
 268 
 269   // The destination lies within a single cache line.

 270   set_destination(dest);



















































 271 }
 272 
 273 
 274 void NativeMovConstReg::verify() {
 275 #ifdef AMD64
 276   // make sure code pattern is actually a mov reg64, imm64 instruction
 277   if ((ubyte_at(0) != Assembler::REX_W && ubyte_at(0) != Assembler::REX_WB) ||
 278       (ubyte_at(1) & (0xff ^ register_mask)) != 0xB8) {
 279     print();
 280     fatal("not a REX.W[B] mov reg64, imm64");
 281   }
 282 #else
 283   // make sure code pattern is actually a mov reg, imm32 instruction
 284   u_char test_byte = *(u_char*)instruction_address();
 285   u_char test_byte_2 = test_byte & ( 0xff ^ register_mask);
 286   if (test_byte_2 != instruction_code) fatal("not a mov reg, imm32");
 287 #endif // AMD64
 288 }
 289 
 290 


< prev index next >