src/cpu/x86/vm/c1_CodeStubs_x86.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File 8004250 Sdiff src/cpu/x86/vm

src/cpu/x86/vm/c1_CodeStubs_x86.cpp

Print this page




 296       assert(a_byte == *start++, "should be the same code");
 297     }
 298 #endif
 299   } else if (_id == load_mirror_id) {
 300     // produce a copy of the load mirror instruction for use by the being
 301     // initialized case
 302 #ifdef ASSERT
 303     address start = __ pc();
 304 #endif
 305     jobject o = NULL;
 306     __ movoop(_obj, o);
 307 #ifdef ASSERT
 308     for (int i = 0; i < _bytes_to_copy; i++) {
 309       address ptr = (address)(_pc_start + i);
 310       int a_byte = (*ptr) & 0xFF;
 311       assert(a_byte == *start++, "should be the same code");
 312     }
 313 #endif
 314   } else {
 315     // make a copy the code which is going to be patched.
 316     for ( int i = 0; i < _bytes_to_copy; i++) {
 317       address ptr = (address)(_pc_start + i);
 318       int a_byte = (*ptr) & 0xFF;
 319       __ a_byte (a_byte);
 320       *ptr = 0x90; // make the site look like a nop
 321     }
 322   }
 323 
 324   address end_of_patch = __ pc();
 325   int bytes_to_skip = 0;
 326   if (_id == load_mirror_id) {
 327     int offset = __ offset();
 328     if (CommentedAssembly) {
 329       __ block_comment(" being_initialized check");
 330     }
 331     assert(_obj != noreg, "must be a valid register");
 332     Register tmp = rax;
 333     Register tmp2 = rbx;
 334     __ push(tmp);
 335     __ push(tmp2);
 336     // Load without verification to keep code size small. We need it because
 337     // begin_initialized_entry_offset has to fit in a byte. Also, we know it's not null.
 338     __ movptr(tmp2, Address(_obj, java_lang_Class::klass_offset_in_bytes()));
 339     __ get_thread(tmp);


 346     // copied back into place so we need to jump back into the main
 347     // code of the nmethod to continue execution.
 348     __ jmp(_patch_site_continuation);
 349 
 350     // make sure this extra code gets skipped
 351     bytes_to_skip += __ offset() - offset;
 352   }
 353   if (CommentedAssembly) {
 354     __ block_comment("patch data encoded as movl");
 355   }
 356   // Now emit the patch record telling the runtime how to find the
 357   // pieces of the patch.  We only need 3 bytes but for readability of
 358   // the disassembly we make the data look like a movl reg, imm32,
 359   // which requires 5 bytes
 360   int sizeof_patch_record = 5;
 361   bytes_to_skip += sizeof_patch_record;
 362 
 363   // emit the offsets needed to find the code to patch
 364   int being_initialized_entry_offset = __ pc() - being_initialized_entry + sizeof_patch_record;
 365 
 366   __ a_byte(0xB8);
 367   __ a_byte(0);
 368   __ a_byte(being_initialized_entry_offset);
 369   __ a_byte(bytes_to_skip);
 370   __ a_byte(_bytes_to_copy);
 371   address patch_info_pc = __ pc();
 372   assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info");
 373 
 374   address entry = __ pc();
 375   NativeGeneralJump::insert_unconditional((address)_pc_start, entry);
 376   address target = NULL;
 377   relocInfo::relocType reloc_type = relocInfo::none;
 378   switch (_id) {
 379     case access_field_id:  target = Runtime1::entry_for(Runtime1::access_field_patching_id); break;
 380     case load_klass_id:    target = Runtime1::entry_for(Runtime1::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break;
 381     case load_mirror_id:   target = Runtime1::entry_for(Runtime1::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break;
 382     default: ShouldNotReachHere();
 383   }
 384   __ bind(call_patch);
 385 
 386   if (CommentedAssembly) {
 387     __ block_comment("patch entry point");
 388   }
 389   __ call(RuntimeAddress(target));
 390   assert(_patch_info_offset == (patch_info_pc - __ pc()), "must not change");




 296       assert(a_byte == *start++, "should be the same code");
 297     }
 298 #endif
 299   } else if (_id == load_mirror_id) {
 300     // produce a copy of the load mirror instruction for use by the being
 301     // initialized case
 302 #ifdef ASSERT
 303     address start = __ pc();
 304 #endif
 305     jobject o = NULL;
 306     __ movoop(_obj, o);
 307 #ifdef ASSERT
 308     for (int i = 0; i < _bytes_to_copy; i++) {
 309       address ptr = (address)(_pc_start + i);
 310       int a_byte = (*ptr) & 0xFF;
 311       assert(a_byte == *start++, "should be the same code");
 312     }
 313 #endif
 314   } else {
 315     // make a copy the code which is going to be patched.
 316     for (int i = 0; i < _bytes_to_copy; i++) {
 317       address ptr = (address)(_pc_start + i);
 318       int a_byte = (*ptr) & 0xFF;
 319       __ emit_int8(a_byte);
 320       *ptr = 0x90; // make the site look like a nop
 321     }
 322   }
 323 
 324   address end_of_patch = __ pc();
 325   int bytes_to_skip = 0;
 326   if (_id == load_mirror_id) {
 327     int offset = __ offset();
 328     if (CommentedAssembly) {
 329       __ block_comment(" being_initialized check");
 330     }
 331     assert(_obj != noreg, "must be a valid register");
 332     Register tmp = rax;
 333     Register tmp2 = rbx;
 334     __ push(tmp);
 335     __ push(tmp2);
 336     // Load without verification to keep code size small. We need it because
 337     // begin_initialized_entry_offset has to fit in a byte. Also, we know it's not null.
 338     __ movptr(tmp2, Address(_obj, java_lang_Class::klass_offset_in_bytes()));
 339     __ get_thread(tmp);


 346     // copied back into place so we need to jump back into the main
 347     // code of the nmethod to continue execution.
 348     __ jmp(_patch_site_continuation);
 349 
 350     // make sure this extra code gets skipped
 351     bytes_to_skip += __ offset() - offset;
 352   }
 353   if (CommentedAssembly) {
 354     __ block_comment("patch data encoded as movl");
 355   }
 356   // Now emit the patch record telling the runtime how to find the
 357   // pieces of the patch.  We only need 3 bytes but for readability of
 358   // the disassembly we make the data look like a movl reg, imm32,
 359   // which requires 5 bytes
 360   int sizeof_patch_record = 5;
 361   bytes_to_skip += sizeof_patch_record;
 362 
 363   // emit the offsets needed to find the code to patch
 364   int being_initialized_entry_offset = __ pc() - being_initialized_entry + sizeof_patch_record;
 365 
 366   __ emit_int8((unsigned char)0xB8);
 367   __ emit_int8(0);
 368   __ emit_int8(being_initialized_entry_offset);
 369   __ emit_int8(bytes_to_skip);
 370   __ emit_int8(_bytes_to_copy);
 371   address patch_info_pc = __ pc();
 372   assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info");
 373 
 374   address entry = __ pc();
 375   NativeGeneralJump::insert_unconditional((address)_pc_start, entry);
 376   address target = NULL;
 377   relocInfo::relocType reloc_type = relocInfo::none;
 378   switch (_id) {
 379     case access_field_id:  target = Runtime1::entry_for(Runtime1::access_field_patching_id); break;
 380     case load_klass_id:    target = Runtime1::entry_for(Runtime1::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break;
 381     case load_mirror_id:   target = Runtime1::entry_for(Runtime1::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break;
 382     default: ShouldNotReachHere();
 383   }
 384   __ bind(call_patch);
 385 
 386   if (CommentedAssembly) {
 387     __ block_comment("patch entry point");
 388   }
 389   __ call(RuntimeAddress(target));
 390   assert(_patch_info_offset == (patch_info_pc - __ pc()), "must not change");


src/cpu/x86/vm/c1_CodeStubs_x86.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File