< prev index next >

src/cpu/ppc/vm/compiledIC_ppc.cpp

Print this page




  77 //   r1 = toc
  78 //   ICreg = [r1 + IC_offset]         // Load IC from const section
  79 //   r1    = [r1 + offset]            // Load call target2 from const section
  80 //   mtctr r1
  81 //   bctr
  82 //
  83 // <<<< stubs
  84 //
  85 // The call instruction in the code either
  86 // - branches directly to a compiled method if offset encodable in instruction
  87 // - branches to the trampoline stub if offset to compiled method not encodable
  88 // - branches to the compiled_to_interp stub if target interpreted
  89 //
  90 // Further there are three relocations from the loads to the constants in
  91 // the constant section.
  92 //
  93 // Usage of r1 and r2 in the stubs allows to distinguish them.
  94 
  95 const int IC_pos_in_java_to_interp_stub = 8;
  96 #define __ _masm.
  97 address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf) {
  98 #ifdef COMPILER2

  99   // Get the mark within main instrs section which is set to the address of the call.
 100   address call_addr = cbuf.insts_mark();

 101 
 102   // Note that the code buffer's insts_mark is always relative to insts.
 103   // That's why we must use the macroassembler to generate a stub.
 104   MacroAssembler _masm(&cbuf);
 105 
 106   // Start the stub.
 107   address stub = __ start_a_stub(CompiledStaticCall::to_interp_stub_size());
 108   if (stub == NULL) {
 109     return NULL; // CodeCache is full
 110   }
 111 
 112   // For java_to_interp stubs we use R11_scratch1 as scratch register
 113   // and in call trampoline stubs we use R12_scratch2. This way we
 114   // can distinguish them (see is_NativeCallTrampolineStub_at()).
 115   Register reg_scratch = R11_scratch1;
 116 
 117   // Create a static stub relocation which relates this stub
 118   // with the call instruction at insts_call_instruction_offset in the
 119   // instructions code-section.
 120   __ relocate(static_stub_Relocation::spec(call_addr));
 121   const int stub_start_offset = __ offset();
 122 
 123   // Now, create the stub's code:
 124   // - load the TOC
 125   // - load the inline cache oop from the constant pool
 126   // - load the call target from the constant pool
 127   // - call
 128   __ calculate_address_from_global_toc(reg_scratch, __ method_toc());
 129   AddressLiteral ic = __ allocate_metadata_address((Metadata *)NULL);
 130   __ load_const_from_method_toc(as_Register(Matcher::inline_cache_reg_encode()), ic, reg_scratch);
 131 
 132   if (ReoptimizeCallSequences) {
 133     __ b64_patchable((address)-1, relocInfo::none);
 134   } else {
 135     AddressLiteral a((address)-1);
 136     __ load_const_from_method_toc(reg_scratch, a, reg_scratch);
 137     __ mtctr(reg_scratch);
 138     __ bctr();
 139   }
 140 




  77 //   r1 = toc
  78 //   ICreg = [r1 + IC_offset]         // Load IC from const section
  79 //   r1    = [r1 + offset]            // Load call target2 from const section
  80 //   mtctr r1
  81 //   bctr
  82 //
  83 // <<<< stubs
  84 //
  85 // The call instruction in the code either
  86 // - branches directly to a compiled method if offset encodable in instruction
  87 // - branches to the trampoline stub if offset to compiled method not encodable
  88 // - branches to the compiled_to_interp stub if target interpreted
  89 //
  90 // Further there are three relocations from the loads to the constants in
  91 // the constant section.
  92 //
  93 // Usage of r1 and r2 in the stubs allows to distinguish them.
  94 
  95 const int IC_pos_in_java_to_interp_stub = 8;
  96 #define __ _masm.
  97 address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark/* = NULL*/) {
  98 #ifdef COMPILER2
  99   if (mark == NULL) {
 100     // Get the mark within main instrs section which is set to the address of the call.
 101     mark = cbuf.insts_mark();
 102   }
 103 
 104   // Note that the code buffer's insts_mark is always relative to insts.
 105   // That's why we must use the macroassembler to generate a stub.
 106   MacroAssembler _masm(&cbuf);
 107 
 108   // Start the stub.
 109   address stub = __ start_a_stub(CompiledStaticCall::to_interp_stub_size());
 110   if (stub == NULL) {
 111     return NULL; // CodeCache is full
 112   }
 113 
 114   // For java_to_interp stubs we use R11_scratch1 as scratch register
 115   // and in call trampoline stubs we use R12_scratch2. This way we
 116   // can distinguish them (see is_NativeCallTrampolineStub_at()).
 117   Register reg_scratch = R11_scratch1;
 118 
 119   // Create a static stub relocation which relates this stub
 120   // with the call instruction at insts_call_instruction_offset in the
 121   // instructions code-section.
 122   __ relocate(static_stub_Relocation::spec(mark));
 123   const int stub_start_offset = __ offset();
 124 
 125   // Now, create the stub's code:
 126   // - load the TOC
 127   // - load the inline cache oop from the constant pool
 128   // - load the call target from the constant pool
 129   // - call
 130   __ calculate_address_from_global_toc(reg_scratch, __ method_toc());
 131   AddressLiteral ic = __ allocate_metadata_address((Metadata *)NULL);
 132   __ load_const_from_method_toc(as_Register(Matcher::inline_cache_reg_encode()), ic, reg_scratch);
 133 
 134   if (ReoptimizeCallSequences) {
 135     __ b64_patchable((address)-1, relocInfo::none);
 136   } else {
 137     AddressLiteral a((address)-1);
 138     __ load_const_from_method_toc(reg_scratch, a, reg_scratch);
 139     __ mtctr(reg_scratch);
 140     __ bctr();
 141   }
 142 


< prev index next >