src/cpu/sparc/vm/c1_CodeStubs_sparc.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File 7017732 Sdiff src/cpu/sparc/vm

src/cpu/sparc/vm/c1_CodeStubs_sparc.cpp

Print this page




 284       address ptr = (address)(_pc_start + i);
 285       int a_byte = (*ptr) & 0xFF;
 286       __ a_byte (a_byte);
 287     }
 288   }
 289 
 290   address end_of_patch = __ pc();
 291   int bytes_to_skip = 0;
 292   if (_id == load_klass_id) {
 293     int offset = __ offset();
 294     if (CommentedAssembly) {
 295       __ block_comment(" being_initialized check");
 296     }
 297 
 298     // static field accesses have special semantics while the class
 299     // initializer is being run so we emit a test which can be used to
 300     // check that this code is being executed by the initializing
 301     // thread.
 302     assert(_obj != noreg, "must be a valid register");
 303     assert(_oop_index >= 0, "must have oop index");
 304     __ ld_ptr(_obj, instanceKlass::init_thread_offset_in_bytes() + sizeof(klassOopDesc), G3);

 305     __ cmp(G2_thread, G3);
 306     __ br(Assembler::notEqual, false, Assembler::pn, call_patch);
 307     __ delayed()->nop();
 308 
 309     // load_klass patches may execute the patched code before it's
 310     // copied back into place so we need to jump back into the main
 311     // code of the nmethod to continue execution.
 312     __ br(Assembler::always, false, Assembler::pt, _patch_site_continuation);
 313     __ delayed()->nop();
 314 
 315     // make sure this extra code gets skipped
 316     bytes_to_skip += __ offset() - offset;
 317   }
 318 
 319   // Now emit the patch record telling the runtime how to find the
 320   // pieces of the patch.  We only need 3 bytes but it has to be
 321   // aligned as an instruction so emit 4 bytes.
 322   int sizeof_patch_record = 4;
 323   bytes_to_skip += sizeof_patch_record;
 324 




 284       address ptr = (address)(_pc_start + i);
 285       int a_byte = (*ptr) & 0xFF;
 286       __ a_byte (a_byte);
 287     }
 288   }
 289 
 290   address end_of_patch = __ pc();
 291   int bytes_to_skip = 0;
 292   if (_id == load_klass_id) {
 293     int offset = __ offset();
 294     if (CommentedAssembly) {
 295       __ block_comment(" being_initialized check");
 296     }
 297 
 298     // static field accesses have special semantics while the class
 299     // initializer is being run so we emit a test which can be used to
 300     // check that this code is being executed by the initializing
 301     // thread.
 302     assert(_obj != noreg, "must be a valid register");
 303     assert(_oop_index >= 0, "must have oop index");
 304     __ ld_ptr(_obj, java_lang_Class::klass_offset_in_bytes(), G3);
 305     __ ld_ptr(G3, instanceKlass::init_thread_offset_in_bytes() + sizeof(klassOopDesc), G3);
 306     __ cmp(G2_thread, G3);
 307     __ br(Assembler::notEqual, false, Assembler::pn, call_patch);
 308     __ delayed()->nop();
 309 
 310     // load_klass patches may execute the patched code before it's
 311     // copied back into place so we need to jump back into the main
 312     // code of the nmethod to continue execution.
 313     __ br(Assembler::always, false, Assembler::pt, _patch_site_continuation);
 314     __ delayed()->nop();
 315 
 316     // make sure this extra code gets skipped
 317     bytes_to_skip += __ offset() - offset;
 318   }
 319 
 320   // Now emit the patch record telling the runtime how to find the
 321   // pieces of the patch.  We only need 3 bytes but it has to be
 322   // aligned as an instruction so emit 4 bytes.
 323   int sizeof_patch_record = 4;
 324   bytes_to_skip += sizeof_patch_record;
 325 


src/cpu/sparc/vm/c1_CodeStubs_sparc.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File