src/cpu/x86/vm/c1_CodeStubs_x86.cpp

Print this page




 399   address target = NULL;
 400   relocInfo::relocType reloc_type = relocInfo::none;
 401   switch (_id) {
 402     case access_field_id:  target = Runtime1::entry_for(Runtime1::access_field_patching_id); break;
 403     case load_klass_id:    target = Runtime1::entry_for(Runtime1::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break;
 404     case load_mirror_id:   target = Runtime1::entry_for(Runtime1::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break;
 405     case load_appendix_id:      target = Runtime1::entry_for(Runtime1::load_appendix_patching_id); reloc_type = relocInfo::oop_type; break;
 406     default: ShouldNotReachHere();
 407   }
 408   __ bind(call_patch);
 409 
 410   if (CommentedAssembly) {
 411     __ block_comment("patch entry point");
 412   }
 413   __ call(RuntimeAddress(target));
 414   assert(_patch_info_offset == (patch_info_pc - __ pc()), "must not change");
 415   ce->add_call_info_here(_info);
 416   int jmp_off = __ offset();
 417   __ jmp(_patch_site_entry);
 418   // Add enough nops so deoptimization can overwrite the jmp above with a call
 419   // and not destroy the world.

 420   for (int j = __ offset() ; j < jmp_off + 5 ; j++ ) {
 421     __ nop();
 422   }
 423   if (_id == load_klass_id || _id == load_mirror_id || _id == load_appendix_id) {
 424     CodeSection* cs = __ code_section();
 425     RelocIterator iter(cs, (address)_pc_start, (address)(_pc_start + 1));
 426     relocInfo::change_reloc_info_for_address(&iter, (address) _pc_start, reloc_type, relocInfo::none);
 427   }
 428 }
 429 
 430 
 431 void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
 432   __ bind(_entry);
 433   ce->store_parameter(_trap_request, 0);
 434   __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::deoptimize_id)));
 435   ce->add_call_info_here(_info);
 436   DEBUG_ONLY(__ should_not_reach_here());
 437 }
 438 
 439 




 399   address target = NULL;
 400   relocInfo::relocType reloc_type = relocInfo::none;
 401   switch (_id) {
 402     case access_field_id:  target = Runtime1::entry_for(Runtime1::access_field_patching_id); break;
 403     case load_klass_id:    target = Runtime1::entry_for(Runtime1::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break;
 404     case load_mirror_id:   target = Runtime1::entry_for(Runtime1::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break;
 405     case load_appendix_id:      target = Runtime1::entry_for(Runtime1::load_appendix_patching_id); reloc_type = relocInfo::oop_type; break;
 406     default: ShouldNotReachHere();
 407   }
 408   __ bind(call_patch);
 409 
 410   if (CommentedAssembly) {
 411     __ block_comment("patch entry point");
 412   }
 413   __ call(RuntimeAddress(target));
 414   assert(_patch_info_offset == (patch_info_pc - __ pc()), "must not change");
 415   ce->add_call_info_here(_info);
 416   int jmp_off = __ offset();
 417   __ jmp(_patch_site_entry);
 418   // Add enough nops so deoptimization can overwrite the jmp above with a call
 419   // and not destroy the world. We cannot use fat nops here, since the concurrent
 420   // code rewrite may transiently create the illegal instruction sequence.
 421   for (int j = __ offset() ; j < jmp_off + 5 ; j++ ) {
 422     __ nop();
 423   }
 424   if (_id == load_klass_id || _id == load_mirror_id || _id == load_appendix_id) {
 425     CodeSection* cs = __ code_section();
 426     RelocIterator iter(cs, (address)_pc_start, (address)(_pc_start + 1));
 427     relocInfo::change_reloc_info_for_address(&iter, (address) _pc_start, reloc_type, relocInfo::none);
 428   }
 429 }
 430 
 431 
 432 void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
 433   __ bind(_entry);
 434   ce->store_parameter(_trap_request, 0);
 435   __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::deoptimize_id)));
 436   ce->add_call_info_here(_info);
 437   DEBUG_ONLY(__ should_not_reach_here());
 438 }
 439 
 440