287 address stub = Runtime1::entry_for(ce->compilation()->has_fpu_code() ? Runtime1::monitorexit_id : Runtime1::monitorexit_nofpu_id);
288 //__ load_const_optimized(R0, stub);
289 __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub));
290 assert(_lock_reg->as_register() == R4_ARG2, "");
291 __ mtctr(R0);
292 __ bctrl();
293 __ b(_continuation);
294 }
295
296
297 // Implementation of patching:
298 // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes).
299 // - Replace original code with a call to the stub.
300 // At Runtime:
301 // - call to stub, jump to runtime
302 // - in runtime: preserve all registers (especially objects, i.e., source and destination object)
303 // - in runtime: after initializing class, restore original code, reexecute instruction
304
305 int PatchingStub::_patch_info_offset = -(5 * BytesPerInstWord);
306
307 void PatchingStub::align_patch_site(MacroAssembler* ) {
308 // Patch sites on ppc are always properly aligned.
309 }
310
311 #ifdef ASSERT
312 inline void compare_with_patch_site(address template_start, address pc_start, int bytes_to_copy) {
313 address start = template_start;
314 for (int i = 0; i < bytes_to_copy; i++) {
315 address ptr = (address)(pc_start + i);
316 int a_byte = (*ptr) & 0xFF;
317 assert(a_byte == *start++, "should be the same code");
318 }
319 }
320 #endif
321
322 void PatchingStub::emit_code(LIR_Assembler* ce) {
323 // copy original code here
324 assert(NativeGeneralJump::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF,
325 "not enough room for call");
326 assert((_bytes_to_copy & 0x3) == 0, "must copy a multiple of four bytes");
|
287 address stub = Runtime1::entry_for(ce->compilation()->has_fpu_code() ? Runtime1::monitorexit_id : Runtime1::monitorexit_nofpu_id);
288 //__ load_const_optimized(R0, stub);
289 __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub));
290 assert(_lock_reg->as_register() == R4_ARG2, "");
291 __ mtctr(R0);
292 __ bctrl();
293 __ b(_continuation);
294 }
295
296
297 // Implementation of patching:
298 // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes).
299 // - Replace original code with a call to the stub.
300 // At Runtime:
301 // - call to stub, jump to runtime
302 // - in runtime: preserve all registers (especially objects, i.e., source and destination object)
303 // - in runtime: after initializing class, restore original code, reexecute instruction
304
305 int PatchingStub::_patch_info_offset = -(5 * BytesPerInstWord);
306
307 int PatchingStub::patch_info_offset() { return _patch_info_offset; }
308
309 void PatchingStub::align_patch_site(MacroAssembler* ) {
310 // Patch sites on ppc are always properly aligned.
311 }
312
313 #ifdef ASSERT
314 inline void compare_with_patch_site(address template_start, address pc_start, int bytes_to_copy) {
315 address start = template_start;
316 for (int i = 0; i < bytes_to_copy; i++) {
317 address ptr = (address)(pc_start + i);
318 int a_byte = (*ptr) & 0xFF;
319 assert(a_byte == *start++, "should be the same code");
320 }
321 }
322 #endif
323
324 void PatchingStub::emit_code(LIR_Assembler* ce) {
325 // copy original code here
326 assert(NativeGeneralJump::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF,
327 "not enough room for call");
328 assert((_bytes_to_copy & 0x3) == 0, "must copy a multiple of four bytes");
|