< prev index next >

src/cpu/x86/vm/templateInterpreterGenerator_x86.cpp

Print this page
rev 12906 : [mq]: gc_interface

*** 22,31 **** --- 22,32 ---- * */ #include "precompiled.hpp" #include "asm/macroAssembler.hpp" + #include "gc/shared/barrierSetCodeGen.hpp" #include "interpreter/bytecodeHistogram.hpp" #include "interpreter/interp_masm.hpp" #include "interpreter/interpreter.hpp" #include "interpreter/interpreterRuntime.hpp" #include "interpreter/templateInterpreterGenerator.hpp"
*** 690,700 **** // End of helpers // Method entry for java.lang.ref.Reference.get. address TemplateInterpreterGenerator::generate_Reference_get_entry(void) { - #if INCLUDE_ALL_GCS // Code: _aload_0, _getfield, _areturn // parameter size = 1 // // The code that gets generated by this routine is split into 2 parts: // 1. The "intrinsified" code for G1 (or any SATB based GC), --- 691,700 ----
*** 723,733 **** address entry = __ pc(); const int referent_offset = java_lang_ref_Reference::referent_offset; guarantee(referent_offset > 0, "referent offset not initialized"); - if (UseG1GC) { Label slow_path; // rbx: method // Check if local 0 != NULL // If the receiver is null then it is OK to jump to the slow path. --- 723,732 ----
*** 743,789 **** // Preserve the sender sp in case the pre-barrier // calls the runtime NOT_LP64(__ push(rsi)); - // Generate the G1 pre-barrier code to log the value of - // the referent field in an SATB buffer. - // Load the value of the referent field. const Address field_address(rax, referent_offset); ! __ load_heap_oop(rax, field_address); ! ! const Register sender_sp = NOT_LP64(rsi) LP64_ONLY(r13); ! const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread); ! NOT_LP64(__ get_thread(thread)); ! ! // Generate the G1 pre-barrier code to log the value of ! // the referent field in an SATB buffer. ! __ g1_write_barrier_pre(noreg /* obj */, ! rax /* pre_val */, ! thread /* thread */, ! rbx /* tmp */, ! true /* tosca_live */, ! true /* expand_call */); // _areturn NOT_LP64(__ pop(rsi)); // get sender sp __ pop(rdi); // get return address __ mov(rsp, sender_sp); // set sp to sender sp __ jmp(rdi); __ ret(0); // generate a vanilla interpreter entry as the slow path __ bind(slow_path); __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals)); return entry; - } - #endif // INCLUDE_ALL_GCS - - // If G1 is not enabled then attempt to go through the accessor entry point - // Reference.get is an accessor - return NULL; } void TemplateInterpreterGenerator::bang_stack_shadow_pages(bool native_call) { // Quick & dirty stack overflow checking: bang the stack & handle trap. // Note that we do the banging after the frame is setup, since the exception --- 742,768 ---- // Preserve the sender sp in case the pre-barrier // calls the runtime NOT_LP64(__ push(rsi)); // Load the value of the referent field. const Address field_address(rax, referent_offset); ! BarrierSetCodeGen *code_gen = Universe::heap()->barrier_set()->code_gen(); ! code_gen->load_at(_masm, ACCESS_ON_HEAP | GC_ACCESS_ON_WEAK, T_OBJECT, rax, field_address, /*tmp1*/ rbx, /*tmp2*/ rdx); // _areturn + const Register sender_sp = NOT_LP64(rsi) LP64_ONLY(r13); NOT_LP64(__ pop(rsi)); // get sender sp __ pop(rdi); // get return address __ mov(rsp, sender_sp); // set sp to sender sp __ jmp(rdi); __ ret(0); // generate a vanilla interpreter entry as the slow path __ bind(slow_path); __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals)); return entry; } void TemplateInterpreterGenerator::bang_stack_shadow_pages(bool native_call) { // Quick & dirty stack overflow checking: bang the stack & handle trap. // Note that we do the banging after the frame is setup, since the exception
< prev index next >