< prev index next >

src/cpu/ppc/vm/compiledIC_ppc.cpp

Print this page




  77 //   r1 = toc
  78 //   ICreg = [r1 + IC_offset]         // Load IC from const section
  79 //   r1    = [r1 + offset]            // Load call target2 from const section
  80 //   mtctr r1
  81 //   bctr
  82 //
  83 // <<<< stubs
  84 //
  85 // The call instruction in the code either
  86 // - branches directly to a compiled method if offset encodable in instruction
  87 // - branches to the trampoline stub if offset to compiled method not encodable
  88 // - branches to the compiled_to_interp stub if target interpreted
  89 //
  90 // Further there are three relocations from the loads to the constants in
  91 // the constant section.
  92 //
  93 // Usage of r1 and r2 in the stubs allows to distinguish them.
  94 
  95 const int IC_pos_in_java_to_interp_stub = 8;
  96 #define __ _masm.
  97 void CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf) {
  98 #ifdef COMPILER2
  99   // Get the mark within main instrs section which is set to the address of the call.
 100   address call_addr = cbuf.insts_mark();
 101 
 102   // Note that the code buffer's insts_mark is always relative to insts.
 103   // That's why we must use the macroassembler to generate a stub.
 104   MacroAssembler _masm(&cbuf);
 105 
 106   // Start the stub.
 107   address stub = __ start_a_stub(CompiledStaticCall::to_interp_stub_size());
 108   if (stub == NULL) {
 109     Compile::current()->env()->record_out_of_memory_failure();
 110     return;
 111   }
 112 
 113   // For java_to_interp stubs we use R11_scratch1 as scratch register
 114   // and in call trampoline stubs we use R12_scratch2. This way we
 115   // can distinguish them (see is_NativeCallTrampolineStub_at()).
 116   Register reg_scratch = R11_scratch1;
 117 
 118   // Create a static stub relocation which relates this stub
 119   // with the call instruction at insts_call_instruction_offset in the
 120   // instructions code-section.
 121   __ relocate(static_stub_Relocation::spec(call_addr));
 122   const int stub_start_offset = __ offset();
 123 
 124   // Now, create the stub's code:
 125   // - load the TOC
 126   // - load the inline cache oop from the constant pool
 127   // - load the call target from the constant pool
 128   // - call
 129   __ calculate_address_from_global_toc(reg_scratch, __ method_toc());
 130   AddressLiteral ic = __ allocate_metadata_address((Metadata *)NULL);


 132 
 133   if (ReoptimizeCallSequences) {
 134     __ b64_patchable((address)-1, relocInfo::none);
 135   } else {
 136     AddressLiteral a((address)-1);
 137     __ load_const_from_method_toc(reg_scratch, a, reg_scratch);
 138     __ mtctr(reg_scratch);
 139     __ bctr();
 140   }
 141 
 142   // FIXME: Assert that the stub can be identified and patched.
 143 
 144   // Java_to_interp_stub_size should be good.
 145   assert((__ offset() - stub_start_offset) <= CompiledStaticCall::to_interp_stub_size(),
 146          "should be good size");
 147   assert(!is_NativeCallTrampolineStub_at(__ addr_at(stub_start_offset)),
 148          "must not confuse java_to_interp with trampoline stubs");
 149 
 150  // End the stub.
 151   __ end_a_stub();

 152 #else
 153   ShouldNotReachHere();
 154 #endif
 155 }
 156 #undef __
 157 
 158 // Size of java_to_interp stub, this doesn't need to be accurate but it must
 159 // be larger or equal to the real size of the stub.
 160 // Used for optimization in Compile::Shorten_branches.
 161 int CompiledStaticCall::to_interp_stub_size() {
 162   return 12 * BytesPerInstWord;
 163 }
 164 
 165 // Relocation entries for call stub, compiled java to interpreter.
 166 // Used for optimization in Compile::Shorten_branches.
 167 int CompiledStaticCall::reloc_to_interp_stub() {
 168   return 5;
 169 }
 170 
 171 void CompiledStaticCall::set_to_interpreted(methodHandle callee, address entry) {




  77 //   r1 = toc
  78 //   ICreg = [r1 + IC_offset]         // Load IC from const section
  79 //   r1    = [r1 + offset]            // Load call target2 from const section
  80 //   mtctr r1
  81 //   bctr
  82 //
  83 // <<<< stubs
  84 //
  85 // The call instruction in the code either
  86 // - branches directly to a compiled method if offset encodable in instruction
  87 // - branches to the trampoline stub if offset to compiled method not encodable
  88 // - branches to the compiled_to_interp stub if target interpreted
  89 //
  90 // Further there are three relocations from the loads to the constants in
  91 // the constant section.
  92 //
  93 // Usage of r1 and r2 in the stubs allows to distinguish them.
  94 
  95 const int IC_pos_in_java_to_interp_stub = 8;
  96 #define __ _masm.
  97 address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf) {
  98 #ifdef COMPILER2
  99   // Get the mark within main instrs section which is set to the address of the call.
 100   address call_addr = cbuf.insts_mark();
 101 
 102   // Note that the code buffer's insts_mark is always relative to insts.
 103   // That's why we must use the macroassembler to generate a stub.
 104   MacroAssembler _masm(&cbuf);
 105 
 106   // Start the stub.
 107   address stub = __ start_a_stub(CompiledStaticCall::to_interp_stub_size());
 108   if (stub == NULL) {
 109     return NULL; // CodeCache is full

 110   }
 111 
 112   // For java_to_interp stubs we use R11_scratch1 as scratch register
 113   // and in call trampoline stubs we use R12_scratch2. This way we
 114   // can distinguish them (see is_NativeCallTrampolineStub_at()).
 115   Register reg_scratch = R11_scratch1;
 116 
 117   // Create a static stub relocation which relates this stub
 118   // with the call instruction at insts_call_instruction_offset in the
 119   // instructions code-section.
 120   __ relocate(static_stub_Relocation::spec(call_addr));
 121   const int stub_start_offset = __ offset();
 122 
 123   // Now, create the stub's code:
 124   // - load the TOC
 125   // - load the inline cache oop from the constant pool
 126   // - load the call target from the constant pool
 127   // - call
 128   __ calculate_address_from_global_toc(reg_scratch, __ method_toc());
 129   AddressLiteral ic = __ allocate_metadata_address((Metadata *)NULL);


 131 
 132   if (ReoptimizeCallSequences) {
 133     __ b64_patchable((address)-1, relocInfo::none);
 134   } else {
 135     AddressLiteral a((address)-1);
 136     __ load_const_from_method_toc(reg_scratch, a, reg_scratch);
 137     __ mtctr(reg_scratch);
 138     __ bctr();
 139   }
 140 
 141   // FIXME: Assert that the stub can be identified and patched.
 142 
 143   // Java_to_interp_stub_size should be good.
 144   assert((__ offset() - stub_start_offset) <= CompiledStaticCall::to_interp_stub_size(),
 145          "should be good size");
 146   assert(!is_NativeCallTrampolineStub_at(__ addr_at(stub_start_offset)),
 147          "must not confuse java_to_interp with trampoline stubs");
 148 
 149  // End the stub.
 150   __ end_a_stub();
 151   return stub;
 152 #else
 153   ShouldNotReachHere();
 154 #endif
 155 }
 156 #undef __
 157 
 158 // Size of java_to_interp stub, this doesn't need to be accurate but it must
 159 // be larger or equal to the real size of the stub.
 160 // Used for optimization in Compile::Shorten_branches.
 161 int CompiledStaticCall::to_interp_stub_size() {
 162   return 12 * BytesPerInstWord;
 163 }
 164 
 165 // Relocation entries for call stub, compiled java to interpreter.
 166 // Used for optimization in Compile::Shorten_branches.
 167 int CompiledStaticCall::reloc_to_interp_stub() {
 168   return 5;
 169 }
 170 
 171 void CompiledStaticCall::set_to_interpreted(methodHandle callee, address entry) {


< prev index next >