1 /*
   2  * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.hpp"
  27 #include "code/vtableStubs.hpp"
  28 #include "interp_masm_x86_64.hpp"
  29 #include "memory/resourceArea.hpp"
  30 #include "oops/instanceKlass.hpp"
  31 #include "oops/klassVtable.hpp"
  32 #include "runtime/sharedRuntime.hpp"
  33 #include "vmreg_x86.inline.hpp"
  34 #ifdef COMPILER2
  35 #include "opto/runtime.hpp"
  36 #endif
  37 
  38 // machine-dependent part of VtableStubs: create VtableStub of correct size and
  39 // initialize its code
  40 
  41 #define __ masm->
  42 
  43 #ifndef PRODUCT
  44 extern "C" void bad_compiled_vtable_index(JavaThread* thread,
  45                                           oop receiver,
  46                                           int index);
  47 #endif
  48 
  49 VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
  50   const int amd64_code_length = VtableStub::pd_code_size_limit(true);
  51   VtableStub* s = new(amd64_code_length) VtableStub(true, vtable_index);
  52   ResourceMark rm;
  53   CodeBuffer cb(s->entry_point(), amd64_code_length);
  54   MacroAssembler* masm = new MacroAssembler(&cb);
  55 
  56 #ifndef PRODUCT
  57   if (CountCompiledCalls) {
  58     __ incrementl(ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr()));
  59   }
  60 #endif
  61 
  62   // get receiver (need to skip return address on top of stack)
  63   assert(VtableStub::receiver_location() == j_rarg0->as_VMReg(), "receiver expected in j_rarg0");
  64 
  65   // Free registers (non-args) are rax, rbx
  66 
  67   // get receiver klass
  68   address npe_addr = __ pc();
  69   __ load_klass(rax, j_rarg0);
  70 
  71 #ifndef PRODUCT
  72   if (DebugVtables) {
  73     Label L;
  74     // check offset vs vtable length
  75     __ cmpl(Address(rax, InstanceKlass::vtable_length_offset() * wordSize),
  76             vtable_index * vtableEntry::size());
  77     __ jcc(Assembler::greater, L);
  78     __ movl(rbx, vtable_index);
  79     __ call_VM(noreg,
  80                CAST_FROM_FN_PTR(address, bad_compiled_vtable_index), j_rarg0, rbx);
  81     __ bind(L);
  82   }
  83 #endif // PRODUCT
  84 
  85   // load Method* and target address
  86   const Register method = rbx;
  87 
  88   __ lookup_virtual_method(rax, vtable_index, method);
  89 
  90   if (DebugVtables) {
  91     Label L;
  92     __ cmpptr(method, (int32_t)NULL_WORD);
  93     __ jcc(Assembler::equal, L);
  94     __ cmpptr(Address(method, Method::from_compiled_offset()), (int32_t)NULL_WORD);
  95     __ jcc(Assembler::notZero, L);
  96     __ stop("Vtable entry is NULL");
  97     __ bind(L);
  98   }
  99   // rax: receiver klass
 100   // rbx: Method*
 101   // rcx: receiver
 102   address ame_addr = __ pc();
 103   __ jmp( Address(rbx, Method::from_compiled_offset()));
 104 
 105   __ flush();
 106 
 107   if (PrintMiscellaneous && (WizardMode || Verbose)) {
 108     tty->print_cr("vtable #%d at "PTR_FORMAT"[%d] left over: %d",
 109                   vtable_index, s->entry_point(),
 110                   (int)(s->code_end() - s->entry_point()),
 111                   (int)(s->code_end() - __ pc()));
 112   }
 113   guarantee(__ pc() <= s->code_end(), "overflowed buffer");
 114   // shut the door on sizing bugs
 115   int slop = 3;  // 32-bit offset is this much larger than an 8-bit one
 116   assert(vtable_index > 10 || __ pc() + slop <= s->code_end(), "room for 32-bit offset");
 117 
 118   s->set_exception_points(npe_addr, ame_addr);
 119   return s;
 120 }
 121 
 122 
 123 VtableStub* VtableStubs::create_itable_stub(int itable_index) {
 124   // Note well: pd_code_size_limit is the absolute minimum we can get
 125   // away with.  If you add code here, bump the code stub size
 126   // returned by pd_code_size_limit!
 127   const int amd64_code_length = VtableStub::pd_code_size_limit(false);
 128   VtableStub* s = new(amd64_code_length) VtableStub(false, itable_index);
 129   ResourceMark rm;
 130   CodeBuffer cb(s->entry_point(), amd64_code_length);
 131   MacroAssembler* masm = new MacroAssembler(&cb);
 132 
 133 #ifndef PRODUCT
 134   if (CountCompiledCalls) {
 135     __ incrementl(ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr()));
 136   }
 137 #endif
 138 
 139   // Entry arguments:
 140   //  rax: Interface
 141   //  j_rarg0: Receiver
 142 
 143   // Free registers (non-args) are rax (interface), rbx
 144 
 145   // get receiver (need to skip return address on top of stack)
 146 
 147   assert(VtableStub::receiver_location() == j_rarg0->as_VMReg(), "receiver expected in j_rarg0");
 148   // get receiver klass (also an implicit null-check)
 149   address npe_addr = __ pc();
 150 
 151   // Most registers are in use; we'll use rax, rbx, r10, r11
 152   // (various calling sequences use r[cd]x, r[sd]i, r[89]; stay away from them)
 153   __ load_klass(r10, j_rarg0);
 154 
 155   // If we take a trap while this arg is on the stack we will not
 156   // be able to walk the stack properly. This is not an issue except
 157   // when there are mistakes in this assembly code that could generate
 158   // a spurious fault. Ask me how I know...
 159 
 160   const Register method = rbx;
 161   Label throw_icce;
 162 
 163   // Get Method* and entrypoint for compiler
 164   __ lookup_interface_method(// inputs: rec. class, interface, itable index
 165                              r10, rax, itable_index,
 166                              // outputs: method, scan temp. reg
 167                              method, r11,
 168                              throw_icce);
 169 
 170   // method (rbx): Method*
 171   // j_rarg0: receiver
 172 
 173 #ifdef ASSERT
 174   if (DebugVtables) {
 175     Label L2;
 176     __ cmpptr(method, (int32_t)NULL_WORD);
 177     __ jcc(Assembler::equal, L2);
 178     __ cmpptr(Address(method, Method::from_compiled_offset()), (int32_t)NULL_WORD);
 179     __ jcc(Assembler::notZero, L2);
 180     __ stop("compiler entrypoint is null");
 181     __ bind(L2);
 182   }
 183 #endif // ASSERT
 184 
 185   // rbx: Method*
 186   // j_rarg0: receiver
 187   address ame_addr = __ pc();
 188   __ jmp(Address(method, Method::from_compiled_offset()));
 189 
 190   __ bind(throw_icce);
 191   __ jump(RuntimeAddress(StubRoutines::throw_IncompatibleClassChangeError_entry()));
 192 
 193   __ flush();
 194 
 195   if (PrintMiscellaneous && (WizardMode || Verbose)) {
 196     tty->print_cr("itable #%d at "PTR_FORMAT"[%d] left over: %d",
 197                   itable_index, s->entry_point(),
 198                   (int)(s->code_end() - s->entry_point()),
 199                   (int)(s->code_end() - __ pc()));
 200   }
 201   guarantee(__ pc() <= s->code_end(), "overflowed buffer");
 202   // shut the door on sizing bugs
 203   int slop = 3;  // 32-bit offset is this much larger than an 8-bit one
 204   assert(itable_index > 10 || __ pc() + slop <= s->code_end(), "room for 32-bit offset");
 205 
 206   s->set_exception_points(npe_addr, ame_addr);
 207   return s;
 208 }
 209 
 210 int VtableStub::pd_code_size_limit(bool is_vtable_stub) {
 211   if (is_vtable_stub) {
 212     // Vtable stub size
 213     return (DebugVtables ? 512 : 24) + (CountCompiledCalls ? 13 : 0) +
 214            (UseCompressedKlassPointers ?  MacroAssembler::instr_size_for_decode_klass_not_null() : 0);
 215   } else {
 216     // Itable stub size
 217     return (DebugVtables ? 512 : 74) + (CountCompiledCalls ? 13 : 0) +
 218            (UseCompressedKlassPointers ?  MacroAssembler::instr_size_for_decode_klass_not_null() : 0);
 219   }
 220   // In order to tune these parameters, run the JVM with VM options
 221   // +PrintMiscellaneous and +WizardMode to see information about
 222   // actual itable stubs.  Look for lines like this:
 223   //   itable #1 at 0x5551212[71] left over: 3
 224   // Reduce the constants so that the "left over" number is >=3
 225   // for the common cases.
 226   // Do not aim at a left-over number of zero, because a
 227   // large vtable or itable index (>= 32) will require a 32-bit
 228   // immediate displacement instead of an 8-bit one.
 229   //
 230   // The JVM98 app. _202_jess has a megamorphic interface call.
 231   // The itable code looks like this:
 232   // Decoding VtableStub itbl[1]@12
 233   //   mov    0x8(%rsi),%r10
 234   //   mov    0x198(%r10),%r11d
 235   //   lea    0x218(%r10,%r11,8),%r11
 236   //   lea    0x8(%r10),%r10
 237   //   mov    (%r11),%rbx
 238   //   cmp    %rbx,%rax
 239   //   je     success
 240   // loop:
 241   //   test   %rbx,%rbx
 242   //   je     throw_icce
 243   //   add    $0x10,%r11
 244   //   mov    (%r11),%rbx
 245   //   cmp    %rbx,%rax
 246   //   jne    loop
 247   // success:
 248   //   mov    0x8(%r11),%r11d
 249   //   mov    (%r10,%r11,1),%rbx
 250   //   jmpq   *0x60(%rbx)
 251   // throw_icce:
 252   //   jmpq   throw_ICCE_entry
 253 }
 254 
 255 int VtableStub::pd_code_alignment() {
 256   return wordSize;
 257 }