src/cpu/sparc/vm/c1_CodeStubs_sparc.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File 8004250 Sdiff src/cpu/sparc/vm

src/cpu/sparc/vm/c1_CodeStubs_sparc.cpp

Print this page




 281   } else if (_id == load_mirror_id) {
 282     // produce a copy of the load mirror instruction for use by the being initialized case
 283 #ifdef ASSERT
 284     address start = __ pc();
 285 #endif
 286     AddressLiteral addrlit(NULL, oop_Relocation::spec(_index));
 287     __ patchable_set(addrlit, _obj);
 288 
 289 #ifdef ASSERT
 290     for (int i = 0; i < _bytes_to_copy; i++) {
 291       address ptr = (address)(_pc_start + i);
 292       int a_byte = (*ptr) & 0xFF;
 293       assert(a_byte == *start++, "should be the same code");
 294     }
 295 #endif
 296   } else {
 297     // make a copy the code which is going to be patched.
 298     for (int i = 0; i < _bytes_to_copy; i++) {
 299       address ptr = (address)(_pc_start + i);
 300       int a_byte = (*ptr) & 0xFF;
 301       __ a_byte (a_byte);
 302     }
 303   }
 304 
 305   address end_of_patch = __ pc();
 306   int bytes_to_skip = 0;
 307   if (_id == load_mirror_id) {
 308     int offset = __ offset();
 309     if (CommentedAssembly) {
 310       __ block_comment(" being_initialized check");
 311     }
 312 
 313     // static field accesses have special semantics while the class
 314     // initializer is being run so we emit a test which can be used to
 315     // check that this code is being executed by the initializing
 316     // thread.
 317     assert(_obj != noreg, "must be a valid register");
 318     assert(_index >= 0, "must have oop index");
 319     __ ld_ptr(_obj, java_lang_Class::klass_offset_in_bytes(), G3);
 320     __ ld_ptr(G3, in_bytes(InstanceKlass::init_thread_offset()), G3);
 321     __ cmp_and_brx_short(G2_thread, G3, Assembler::notEqual, Assembler::pn, call_patch);


 323     // load_klass patches may execute the patched code before it's
 324     // copied back into place so we need to jump back into the main
 325     // code of the nmethod to continue execution.
 326     __ br(Assembler::always, false, Assembler::pt, _patch_site_continuation);
 327     __ delayed()->nop();
 328 
 329     // make sure this extra code gets skipped
 330     bytes_to_skip += __ offset() - offset;
 331   }
 332 
 333   // Now emit the patch record telling the runtime how to find the
 334   // pieces of the patch.  We only need 3 bytes but it has to be
 335   // aligned as an instruction so emit 4 bytes.
 336   int sizeof_patch_record = 4;
 337   bytes_to_skip += sizeof_patch_record;
 338 
 339   // emit the offsets needed to find the code to patch
 340   int being_initialized_entry_offset = __ offset() - being_initialized_entry + sizeof_patch_record;
 341 
 342   // Emit the patch record.  We need to emit a full word, so emit an extra empty byte
 343   __ a_byte(0);
 344   __ a_byte(being_initialized_entry_offset);
 345   __ a_byte(bytes_to_skip);
 346   __ a_byte(_bytes_to_copy);
 347   address patch_info_pc = __ pc();
 348   assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info");
 349 
 350   address entry = __ pc();
 351   NativeGeneralJump::insert_unconditional((address)_pc_start, entry);
 352   address target = NULL;
 353   relocInfo::relocType reloc_type = relocInfo::none;
 354   switch (_id) {
 355     case access_field_id:  target = Runtime1::entry_for(Runtime1::access_field_patching_id); break;
 356     case load_klass_id:    target = Runtime1::entry_for(Runtime1::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break;
 357     case load_mirror_id:   target = Runtime1::entry_for(Runtime1::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break;
 358     default: ShouldNotReachHere();
 359   }
 360   __ bind(call_patch);
 361 
 362   if (CommentedAssembly) {
 363     __ block_comment("patch entry point");
 364   }
 365   __ call(target, relocInfo::runtime_call_type);
 366   __ delayed()->nop();




 281   } else if (_id == load_mirror_id) {
 282     // produce a copy of the load mirror instruction for use by the being initialized case
 283 #ifdef ASSERT
 284     address start = __ pc();
 285 #endif
 286     AddressLiteral addrlit(NULL, oop_Relocation::spec(_index));
 287     __ patchable_set(addrlit, _obj);
 288 
 289 #ifdef ASSERT
 290     for (int i = 0; i < _bytes_to_copy; i++) {
 291       address ptr = (address)(_pc_start + i);
 292       int a_byte = (*ptr) & 0xFF;
 293       assert(a_byte == *start++, "should be the same code");
 294     }
 295 #endif
 296   } else {
 297     // make a copy the code which is going to be patched.
 298     for (int i = 0; i < _bytes_to_copy; i++) {
 299       address ptr = (address)(_pc_start + i);
 300       int a_byte = (*ptr) & 0xFF;
 301       __ emit_int8 (a_byte);
 302     }
 303   }
 304 
 305   address end_of_patch = __ pc();
 306   int bytes_to_skip = 0;
 307   if (_id == load_mirror_id) {
 308     int offset = __ offset();
 309     if (CommentedAssembly) {
 310       __ block_comment(" being_initialized check");
 311     }
 312 
 313     // static field accesses have special semantics while the class
 314     // initializer is being run so we emit a test which can be used to
 315     // check that this code is being executed by the initializing
 316     // thread.
 317     assert(_obj != noreg, "must be a valid register");
 318     assert(_index >= 0, "must have oop index");
 319     __ ld_ptr(_obj, java_lang_Class::klass_offset_in_bytes(), G3);
 320     __ ld_ptr(G3, in_bytes(InstanceKlass::init_thread_offset()), G3);
 321     __ cmp_and_brx_short(G2_thread, G3, Assembler::notEqual, Assembler::pn, call_patch);


 323     // load_klass patches may execute the patched code before it's
 324     // copied back into place so we need to jump back into the main
 325     // code of the nmethod to continue execution.
 326     __ br(Assembler::always, false, Assembler::pt, _patch_site_continuation);
 327     __ delayed()->nop();
 328 
 329     // make sure this extra code gets skipped
 330     bytes_to_skip += __ offset() - offset;
 331   }
 332 
 333   // Now emit the patch record telling the runtime how to find the
 334   // pieces of the patch.  We only need 3 bytes but it has to be
 335   // aligned as an instruction so emit 4 bytes.
 336   int sizeof_patch_record = 4;
 337   bytes_to_skip += sizeof_patch_record;
 338 
 339   // emit the offsets needed to find the code to patch
 340   int being_initialized_entry_offset = __ offset() - being_initialized_entry + sizeof_patch_record;
 341 
 342   // Emit the patch record.  We need to emit a full word, so emit an extra empty byte
 343   __ emit_int8(0);
 344   __ emit_int8(being_initialized_entry_offset);
 345   __ emit_int8(bytes_to_skip);
 346   __ emit_int8(_bytes_to_copy);
 347   address patch_info_pc = __ pc();
 348   assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info");
 349 
 350   address entry = __ pc();
 351   NativeGeneralJump::insert_unconditional((address)_pc_start, entry);
 352   address target = NULL;
 353   relocInfo::relocType reloc_type = relocInfo::none;
 354   switch (_id) {
 355     case access_field_id:  target = Runtime1::entry_for(Runtime1::access_field_patching_id); break;
 356     case load_klass_id:    target = Runtime1::entry_for(Runtime1::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break;
 357     case load_mirror_id:   target = Runtime1::entry_for(Runtime1::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break;
 358     default: ShouldNotReachHere();
 359   }
 360   __ bind(call_patch);
 361 
 362   if (CommentedAssembly) {
 363     __ block_comment("patch entry point");
 364   }
 365   __ call(target, relocInfo::runtime_call_type);
 366   __ delayed()->nop();


src/cpu/sparc/vm/c1_CodeStubs_sparc.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File