src/cpu/x86/vm/c1_CodeStubs_x86.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File hs-comp Sdiff src/cpu/x86/vm

src/cpu/x86/vm/c1_CodeStubs_x86.cpp

Print this page
rev 6355 : 8031475: Missing oopmap in patching stub
Reviewed-by:


 395   assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info");
 396 
 397   address entry = __ pc();
 398   NativeGeneralJump::insert_unconditional((address)_pc_start, entry);
 399   address target = NULL;
 400   relocInfo::relocType reloc_type = relocInfo::none;
 401   switch (_id) {
 402     case access_field_id:  target = Runtime1::entry_for(Runtime1::access_field_patching_id); break;
 403     case load_klass_id:    target = Runtime1::entry_for(Runtime1::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break;
 404     case load_mirror_id:   target = Runtime1::entry_for(Runtime1::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break;
 405     case load_appendix_id:      target = Runtime1::entry_for(Runtime1::load_appendix_patching_id); reloc_type = relocInfo::oop_type; break;
 406     default: ShouldNotReachHere();
 407   }
 408   __ bind(call_patch);
 409 
 410   if (CommentedAssembly) {
 411     __ block_comment("patch entry point");
 412   }
 413   __ call(RuntimeAddress(target));
 414   assert(_patch_info_offset == (patch_info_pc - __ pc()), "must not change");

 415   ce->add_call_info_here(_info);
 416   int jmp_off = __ offset();
 417   __ jmp(_patch_site_entry);
 418   // Add enough nops so deoptimization can overwrite the jmp above with a call
 419   // and not destroy the world.
 420   for (int j = __ offset() ; j < jmp_off + 5 ; j++ ) {
 421     __ nop();
 422   }
 423   if (_id == load_klass_id || _id == load_mirror_id || _id == load_appendix_id) {
 424     CodeSection* cs = __ code_section();
 425     RelocIterator iter(cs, (address)_pc_start, (address)(_pc_start + 1));
 426     relocInfo::change_reloc_info_for_address(&iter, (address) _pc_start, reloc_type, relocInfo::none);
 427   }
 428 }
 429 
 430 
 431 void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
 432   __ bind(_entry);
 433   __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::deoptimize_id)));
 434   ce->add_call_info_here(_info);




 395   assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info");
 396 
 397   address entry = __ pc();
 398   NativeGeneralJump::insert_unconditional((address)_pc_start, entry);
 399   address target = NULL;
 400   relocInfo::relocType reloc_type = relocInfo::none;
 401   switch (_id) {
 402     case access_field_id:  target = Runtime1::entry_for(Runtime1::access_field_patching_id); break;
 403     case load_klass_id:    target = Runtime1::entry_for(Runtime1::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break;
 404     case load_mirror_id:   target = Runtime1::entry_for(Runtime1::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break;
 405     case load_appendix_id:      target = Runtime1::entry_for(Runtime1::load_appendix_patching_id); reloc_type = relocInfo::oop_type; break;
 406     default: ShouldNotReachHere();
 407   }
 408   __ bind(call_patch);
 409 
 410   if (CommentedAssembly) {
 411     __ block_comment("patch entry point");
 412   }
 413   __ call(RuntimeAddress(target));
 414   assert(_patch_info_offset == (patch_info_pc - __ pc()), "must not change");
 415   _info->oop_map()->set_oop(_obj->as_VMReg());
 416   ce->add_call_info_here(_info);
 417   int jmp_off = __ offset();
 418   __ jmp(_patch_site_entry);
 419   // Add enough nops so deoptimization can overwrite the jmp above with a call
 420   // and not destroy the world.
 421   for (int j = __ offset() ; j < jmp_off + 5 ; j++ ) {
 422     __ nop();
 423   }
 424   if (_id == load_klass_id || _id == load_mirror_id || _id == load_appendix_id) {
 425     CodeSection* cs = __ code_section();
 426     RelocIterator iter(cs, (address)_pc_start, (address)(_pc_start + 1));
 427     relocInfo::change_reloc_info_for_address(&iter, (address) _pc_start, reloc_type, relocInfo::none);
 428   }
 429 }
 430 
 431 
 432 void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
 433   __ bind(_entry);
 434   __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::deoptimize_id)));
 435   ce->add_call_info_here(_info);


src/cpu/x86/vm/c1_CodeStubs_x86.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File