< prev index next >

src/cpu/x86/vm/c1_CodeStubs_x86.cpp

Print this page




 211   _info = new CodeEmitInfo(info);
 212 }
 213 
 214 
 215 void NewObjectArrayStub::emit_code(LIR_Assembler* ce) {
 216   assert(__ rsp_offset() == 0, "frame size should be fixed");
 217   __ bind(_entry);
 218   assert(_length->as_register() == rbx, "length must in rbx,");
 219   assert(_klass_reg->as_register() == rdx, "klass_reg must in rdx");
 220   __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_object_array_id)));
 221   ce->add_call_info_here(_info);
 222   ce->verify_oop_map(_info);
 223   assert(_result->as_register() == rax, "result must in rax,");
 224   __ jmp(_continuation);
 225 }
 226 
 227 
 228 // Implementation of MonitorAccessStubs
 229 
 230 MonitorEnterStub::MonitorEnterStub(LIR_Opr obj_reg, LIR_Opr lock_reg, CodeEmitInfo* info)
 231 : MonitorAccessStub(obj_reg, lock_reg)
 232 {
 233   _info = new CodeEmitInfo(info);
 234 }
 235 
 236 
 237 void MonitorEnterStub::emit_code(LIR_Assembler* ce) {
 238   assert(__ rsp_offset() == 0, "frame size should be fixed");
 239   __ bind(_entry);
 240   ce->store_parameter(_obj_reg->as_register(),  1);
 241   ce->store_parameter(_lock_reg->as_register(), 0);
 242   Runtime1::StubID enter_id;
 243   if (ce->compilation()->has_fpu_code()) {
 244     enter_id = Runtime1::monitorenter_id;
 245   } else {
 246     enter_id = Runtime1::monitorenter_nofpu_id;
 247   }
 248   __ call(RuntimeAddress(Runtime1::entry_for(enter_id)));
 249   ce->add_call_info_here(_info);
 250   ce->verify_oop_map(_info);
 251   __ jmp(_continuation);
 252 }
 253 
 254 
 255 void MonitorExitStub::emit_code(LIR_Assembler* ce) {
 256   __ bind(_entry);
 257   if (_compute_lock) {
 258     // lock_reg was destroyed by fast unlocking attempt => recompute it
 259     ce->monitor_address(_monitor_ix, _lock_reg);
 260   }
 261   ce->store_parameter(_lock_reg->as_register(), 0);
 262   // note: non-blocking leaf routine => no call info needed
 263   Runtime1::StubID exit_id;
 264   if (ce->compilation()->has_fpu_code()) {
 265     exit_id = Runtime1::monitorexit_id;
 266   } else {
 267     exit_id = Runtime1::monitorexit_nofpu_id;
 268   }
 269   __ call(RuntimeAddress(Runtime1::entry_for(exit_id)));



 270   __ jmp(_continuation);
 271 }
 272 
 273 
 274 // Implementation of patching:
 275 // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes)
 276 // - Replace original code with a call to the stub
 277 // At Runtime:
 278 // - call to stub, jump to runtime
 279 // - in runtime: preserve all registers (rspecially objects, i.e., source and destination object)
 280 // - in runtime: after initializing class, restore original code, reexecute instruction
 281 
 282 int PatchingStub::_patch_info_offset = -NativeGeneralJump::instruction_size;
 283 
 284 void PatchingStub::align_patch_site(MacroAssembler* masm) {
 285   // We're patching a 5-7 byte instruction on intel and we need to
 286   // make sure that we don't see a piece of the instruction.  It
 287   // appears mostly impossible on Intel to simply invalidate other
 288   // processors caches and since they may do aggressive prefetch it's
 289   // very hard to make a guess about what code might be in the icache.




 211   _info = new CodeEmitInfo(info);
 212 }
 213 
 214 
 215 void NewObjectArrayStub::emit_code(LIR_Assembler* ce) {
 216   assert(__ rsp_offset() == 0, "frame size should be fixed");
 217   __ bind(_entry);
 218   assert(_length->as_register() == rbx, "length must in rbx,");
 219   assert(_klass_reg->as_register() == rdx, "klass_reg must in rdx");
 220   __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_object_array_id)));
 221   ce->add_call_info_here(_info);
 222   ce->verify_oop_map(_info);
 223   assert(_result->as_register() == rax, "result must in rax,");
 224   __ jmp(_continuation);
 225 }
 226 
 227 
 228 // Implementation of MonitorAccessStubs
 229 
 230 MonitorEnterStub::MonitorEnterStub(LIR_Opr obj_reg, LIR_Opr lock_reg, CodeEmitInfo* info)
 231 : MonitorAccessStub(obj_reg, lock_reg, info)
 232 {

 233 }
 234 
 235 
 236 void MonitorEnterStub::emit_code(LIR_Assembler* ce) {
 237   assert(__ rsp_offset() == 0, "frame size should be fixed");
 238   __ bind(_entry);
 239   ce->store_parameter(_obj_reg->as_register(),  1);
 240   ce->store_parameter(_lock_reg->as_register(), 0);
 241   Runtime1::StubID enter_id;
 242   if (ce->compilation()->has_fpu_code()) {
 243     enter_id = Runtime1::monitorenter_id;
 244   } else {
 245     enter_id = Runtime1::monitorenter_nofpu_id;
 246   }
 247   __ call(RuntimeAddress(Runtime1::entry_for(enter_id)));
 248   ce->add_call_info_here(_info);
 249   ce->verify_oop_map(_info);
 250   __ jmp(_continuation);
 251 }
 252 
 253 
 254 void MonitorExitStub::emit_code(LIR_Assembler* ce) {
 255   __ bind(_entry);
 256   if (_compute_lock) {
 257     // lock_reg was destroyed by fast unlocking attempt => recompute it
 258     ce->monitor_address(_monitor_ix, _lock_reg);
 259   }
 260   ce->store_parameter(_lock_reg->as_register(), 0);
 261   // note: non-blocking leaf routine => no call info needed
 262   Runtime1::StubID exit_id;
 263   if (ce->compilation()->has_fpu_code()) {
 264     exit_id = Runtime1::monitorexit_id;
 265   } else {
 266     exit_id = Runtime1::monitorexit_nofpu_id;
 267   }
 268   __ call(RuntimeAddress(Runtime1::entry_for(exit_id)));
 269   if (_info != NULL) {
 270     ce->add_non_safepoint_debug_info_here(_info);
 271   }
 272   __ jmp(_continuation);
 273 }
 274 
 275 
 276 // Implementation of patching:
 277 // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes)
 278 // - Replace original code with a call to the stub
 279 // At Runtime:
 280 // - call to stub, jump to runtime
 281 // - in runtime: preserve all registers (rspecially objects, i.e., source and destination object)
 282 // - in runtime: after initializing class, restore original code, reexecute instruction
 283 
 284 int PatchingStub::_patch_info_offset = -NativeGeneralJump::instruction_size;
 285 
 286 void PatchingStub::align_patch_site(MacroAssembler* masm) {
 287   // We're patching a 5-7 byte instruction on intel and we need to
 288   // make sure that we don't see a piece of the instruction.  It
 289   // appears mostly impossible on Intel to simply invalidate other
 290   // processors caches and since they may do aggressive prefetch it's
 291   // very hard to make a guess about what code might be in the icache.


< prev index next >