1 /*
   2  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "incls/_precompiled.incl"
  26 #include "incls/_vtableStubs_x86_64.cpp.incl"
  27 
  28 // machine-dependent part of VtableStubs: create VtableStub of correct size and
  29 // initialize its code
  30 
  31 #define __ masm->
  32 
  33 #ifndef PRODUCT
  34 extern "C" void bad_compiled_vtable_index(JavaThread* thread,
  35                                           oop receiver,
  36                                           int index);
  37 #endif
  38 
  39 VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
  40   const int amd64_code_length = VtableStub::pd_code_size_limit(true);
  41   VtableStub* s = new(amd64_code_length) VtableStub(true, vtable_index);
  42   ResourceMark rm;
  43   CodeBuffer cb(s->entry_point(), amd64_code_length);
  44   MacroAssembler* masm = new MacroAssembler(&cb);
  45 
  46 #ifndef PRODUCT
  47   if (CountCompiledCalls) {
  48     __ incrementl(ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr()));
  49   }
  50 #endif
  51 
  52   // get receiver (need to skip return address on top of stack)
  53   assert(VtableStub::receiver_location() == j_rarg0->as_VMReg(), "receiver expected in j_rarg0");
  54 
  55   // Free registers (non-args) are rax, rbx
  56 
  57   // get receiver klass
  58   address npe_addr = __ pc();
  59   __ load_klass(rax, j_rarg0);
  60 
  61   // compute entry offset (in words)
  62   int entry_offset =
  63     instanceKlass::vtable_start_offset() + vtable_index * vtableEntry::size();
  64 
  65 #ifndef PRODUCT
  66   if (DebugVtables) {
  67     Label L;
  68     // check offset vs vtable length
  69     __ cmpl(Address(rax, instanceKlass::vtable_length_offset() * wordSize),
  70             vtable_index * vtableEntry::size());
  71     __ jcc(Assembler::greater, L);
  72     __ movl(rbx, vtable_index);
  73     __ call_VM(noreg,
  74                CAST_FROM_FN_PTR(address, bad_compiled_vtable_index), j_rarg0, rbx);
  75     __ bind(L);
  76   }
  77 #endif // PRODUCT
  78 
  79   // load methodOop and target address
  80   const Register method = rbx;
  81 
  82   __ movptr(method, Address(rax,
  83                             entry_offset * wordSize +
  84                             vtableEntry::method_offset_in_bytes()));
  85   if (DebugVtables) {
  86     Label L;
  87     __ cmpptr(method, (int32_t)NULL_WORD);
  88     __ jcc(Assembler::equal, L);
  89     __ cmpptr(Address(method, methodOopDesc::from_compiled_offset()), (int32_t)NULL_WORD);
  90     __ jcc(Assembler::notZero, L);
  91     __ stop("Vtable entry is NULL");
  92     __ bind(L);
  93   }
  94   // rax: receiver klass
  95   // rbx: methodOop
  96   // rcx: receiver
  97   address ame_addr = __ pc();
  98   __ jmp( Address(rbx, methodOopDesc::from_compiled_offset()));
  99 
 100   __ flush();
 101 
 102   if (PrintMiscellaneous && (WizardMode || Verbose)) {
 103     tty->print_cr("vtable #%d at "PTR_FORMAT"[%d] left over: %d",
 104                   vtable_index, s->entry_point(),
 105                   (int)(s->code_end() - s->entry_point()),
 106                   (int)(s->code_end() - __ pc()));
 107   }
 108   guarantee(__ pc() <= s->code_end(), "overflowed buffer");
 109   // shut the door on sizing bugs
 110   int slop = 3;  // 32-bit offset is this much larger than an 8-bit one
 111   assert(vtable_index > 10 || __ pc() + slop <= s->code_end(), "room for 32-bit offset");
 112 
 113   s->set_exception_points(npe_addr, ame_addr);
 114   return s;
 115 }
 116 
 117 
 118 VtableStub* VtableStubs::create_itable_stub(int itable_index) {
 119   // Note well: pd_code_size_limit is the absolute minimum we can get
 120   // away with.  If you add code here, bump the code stub size
 121   // returned by pd_code_size_limit!
 122   const int amd64_code_length = VtableStub::pd_code_size_limit(false);
 123   VtableStub* s = new(amd64_code_length) VtableStub(false, itable_index);
 124   ResourceMark rm;
 125   CodeBuffer cb(s->entry_point(), amd64_code_length);
 126   MacroAssembler* masm = new MacroAssembler(&cb);
 127 
 128 #ifndef PRODUCT
 129   if (CountCompiledCalls) {
 130     __ incrementl(ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr()));
 131   }
 132 #endif
 133 
 134   // Entry arguments:
 135   //  rax: Interface
 136   //  j_rarg0: Receiver
 137 
 138   // Free registers (non-args) are rax (interface), rbx
 139 
 140   // get receiver (need to skip return address on top of stack)
 141 
 142   assert(VtableStub::receiver_location() == j_rarg0->as_VMReg(), "receiver expected in j_rarg0");
 143   // get receiver klass (also an implicit null-check)
 144   address npe_addr = __ pc();
 145 
 146   // Most registers are in use; we'll use rax, rbx, r10, r11
 147   // (various calling sequences use r[cd]x, r[sd]i, r[89]; stay away from them)
 148   __ load_klass(r10, j_rarg0);
 149 
 150   // If we take a trap while this arg is on the stack we will not
 151   // be able to walk the stack properly. This is not an issue except
 152   // when there are mistakes in this assembly code that could generate
 153   // a spurious fault. Ask me how I know...
 154 
 155   const Register method = rbx;
 156   Label throw_icce;
 157 
 158   // Get methodOop and entrypoint for compiler
 159   __ lookup_interface_method(// inputs: rec. class, interface, itable index
 160                              r10, rax, itable_index,
 161                              // outputs: method, scan temp. reg
 162                              method, r11,
 163                              throw_icce);
 164 
 165   // method (rbx): methodOop
 166   // j_rarg0: receiver
 167 
 168 #ifdef ASSERT
 169   if (DebugVtables) {
 170     Label L2;
 171     __ cmpptr(method, (int32_t)NULL_WORD);
 172     __ jcc(Assembler::equal, L2);
 173     __ cmpptr(Address(method, methodOopDesc::from_compiled_offset()), (int32_t)NULL_WORD);
 174     __ jcc(Assembler::notZero, L2);
 175     __ stop("compiler entrypoint is null");
 176     __ bind(L2);
 177   }
 178 #endif // ASSERT
 179 
 180   // rbx: methodOop
 181   // j_rarg0: receiver
 182   address ame_addr = __ pc();
 183   __ jmp(Address(method, methodOopDesc::from_compiled_offset()));
 184 
 185   __ bind(throw_icce);
 186   __ jump(RuntimeAddress(StubRoutines::throw_IncompatibleClassChangeError_entry()));
 187 
 188   __ flush();
 189 
 190   if (PrintMiscellaneous && (WizardMode || Verbose)) {
 191     tty->print_cr("itable #%d at "PTR_FORMAT"[%d] left over: %d",
 192                   itable_index, s->entry_point(),
 193                   (int)(s->code_end() - s->entry_point()),
 194                   (int)(s->code_end() - __ pc()));
 195   }
 196   guarantee(__ pc() <= s->code_end(), "overflowed buffer");
 197   // shut the door on sizing bugs
 198   int slop = 3;  // 32-bit offset is this much larger than an 8-bit one
 199   assert(itable_index > 10 || __ pc() + slop <= s->code_end(), "room for 32-bit offset");
 200 
 201   s->set_exception_points(npe_addr, ame_addr);
 202   return s;
 203 }
 204 
 205 int VtableStub::pd_code_size_limit(bool is_vtable_stub) {
 206   if (is_vtable_stub) {
 207     // Vtable stub size
 208     return (DebugVtables ? 512 : 24) + (CountCompiledCalls ? 13 : 0) +
 209            (UseCompressedOops ? 16 : 0);  // 1 leaq can be 3 bytes + 1 long
 210   } else {
 211     // Itable stub size
 212     return (DebugVtables ? 512 : 74) + (CountCompiledCalls ? 13 : 0) +
 213            (UseCompressedOops ? 32 : 0);  // 2 leaqs
 214   }
 215   // In order to tune these parameters, run the JVM with VM options
 216   // +PrintMiscellaneous and +WizardMode to see information about
 217   // actual itable stubs.  Look for lines like this:
 218   //   itable #1 at 0x5551212[71] left over: 3
 219   // Reduce the constants so that the "left over" number is >=3
 220   // for the common cases.
 221   // Do not aim at a left-over number of zero, because a
 222   // large vtable or itable index (>= 32) will require a 32-bit
 223   // immediate displacement instead of an 8-bit one.
 224   //
 225   // The JVM98 app. _202_jess has a megamorphic interface call.
 226   // The itable code looks like this:
 227   // Decoding VtableStub itbl[1]@12
 228   //   mov    0x8(%rsi),%r10
 229   //   mov    0x198(%r10),%r11d
 230   //   lea    0x218(%r10,%r11,8),%r11
 231   //   lea    0x8(%r10),%r10
 232   //   mov    (%r11),%rbx
 233   //   cmp    %rbx,%rax
 234   //   je     success
 235   // loop:
 236   //   test   %rbx,%rbx
 237   //   je     throw_icce
 238   //   add    $0x10,%r11
 239   //   mov    (%r11),%rbx
 240   //   cmp    %rbx,%rax
 241   //   jne    loop
 242   // success:
 243   //   mov    0x8(%r11),%r11d
 244   //   mov    (%r10,%r11,1),%rbx
 245   //   jmpq   *0x60(%rbx)
 246   // throw_icce:
 247   //   jmpq   throw_ICCE_entry
 248 }
 249 
 250 int VtableStub::pd_code_alignment() {
 251   return wordSize;
 252 }