1 /*
   2  * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.inline.hpp"
  27 #include "code/vtableStubs.hpp"
  28 #include "interp_masm_sparc.hpp"
  29 #include "memory/resourceArea.hpp"
  30 #include "oops/compiledICHolder.hpp"
  31 #include "oops/instanceKlass.hpp"
  32 #include "oops/klassVtable.hpp"
  33 #include "runtime/sharedRuntime.hpp"
  34 #include "vmreg_sparc.inline.hpp"
  35 #ifdef COMPILER2
  36 #include "opto/runtime.hpp"
  37 #endif
  38 
  39 // machine-dependent part of VtableStubs: create vtableStub of correct size and
  40 // initialize its code
  41 
  42 #define __ masm->
  43 
  44 #ifndef PRODUCT
  45 extern "C" void bad_compiled_vtable_index(JavaThread* thread, oopDesc* receiver, int index);
  46 #endif
  47 
  48 
  49 // Used by compiler only; may use only caller saved, non-argument registers
  50 VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
  51   // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
  52   const int stub_code_length = code_size_limit(true);
  53   VtableStub* s = new(stub_code_length) VtableStub(true, vtable_index);
  54   // Can be NULL if there is no free space in the code cache.
  55   if (s == NULL) {
  56     return NULL;
  57   }
  58 
  59   // Count unused bytes in instruction sequences of variable size.
  60   // We add them to the computed buffer size in order to avoid
  61   // overflow in subsequently generated stubs.
  62   address   start_pc;
  63   int       slop_bytes = 0;
  64   int       slop_delta = 0;
  65   const int index_dependent_slop     = ((vtable_index < 512) ? 2 : 0)*BytesPerInstWord; // code size change with transition from 13-bit to 32-bit constant (@index == 512?).
  66 
  67   ResourceMark    rm;
  68   CodeBuffer      cb(s->entry_point(), stub_code_length);
  69   MacroAssembler* masm = new MacroAssembler(&cb);
  70 
  71 #if (!defined(PRODUCT) && defined(COMPILER2))
  72   if (CountCompiledCalls) {
  73     __ inc_counter(SharedRuntime::nof_megamorphic_calls_addr(), G5, G3_scratch);
  74   }
  75 #endif // PRODUCT
  76 
  77   assert(VtableStub::receiver_location() == O0->as_VMReg(), "receiver expected in O0");
  78 
  79   // get receiver klass
  80   address npe_addr = __ pc();
  81   __ load_klass(O0, G3_scratch);
  82 
  83 #ifndef PRODUCT
  84   if (DebugVtables) {
  85     Label L;
  86     // check offset vs vtable length
  87     __ ld(G3_scratch, in_bytes(Klass::vtable_length_offset()), G5);
  88     __ cmp_and_br_short(G5, vtable_index*vtableEntry::size(), Assembler::greaterUnsigned, Assembler::pt, L);
  89 
  90     // set generates 8 instructions (worst case), 1 instruction (best case)
  91     start_pc = __ pc();
  92     __ set(vtable_index, O2);
  93     slop_delta  = __ worst_case_insts_for_set()*BytesPerInstWord - (__ pc() - start_pc);
  94     slop_bytes += slop_delta;
  95     assert(slop_delta >= 0, "negative slop(%d) encountered, adjust code size estimate!", slop_delta);
  96 
  97     // there is no variance in call_VM() emitted code.
  98     __ call_VM(noreg, CAST_FROM_FN_PTR(address, bad_compiled_vtable_index), O0, O2);
  99     __ bind(L);
 100   }
 101 #endif
 102 
 103   // set Method* (in case of interpreted method), and destination address
 104   start_pc = __ pc();
 105   __ lookup_virtual_method(G3_scratch, vtable_index, G5_method);
 106   // lookup_virtual_method generates 3 instructions (worst case), 1 instruction (best case)
 107   slop_delta  = 3*BytesPerInstWord - (int)(__ pc() - start_pc);
 108   slop_bytes += slop_delta;
 109   assert(slop_delta >= 0, "negative slop(%d) encountered, adjust code size estimate!", slop_delta);
 110 
 111 #ifndef PRODUCT
 112   if (DebugVtables) {
 113     Label L;
 114     __ br_notnull_short(G5_method, Assembler::pt, L);
 115     __ stop("Vtable entry is ZERO");
 116     __ bind(L);
 117   }
 118 #endif
 119 
 120   address ame_addr = __ pc();  // if the vtable entry is null, the method is abstract
 121                                // NOTE: for vtable dispatches, the vtable entry will never be null.
 122 
 123   __ ld_ptr(G5_method, in_bytes(Method::from_compiled_offset()), G3_scratch);
 124 
 125   // jump to target (either compiled code or c2iadapter)
 126   __ JMP(G3_scratch, 0);
 127   // load Method* (in case we call c2iadapter)
 128   __ delayed()->nop();
 129 
 130   masm->flush();
 131   slop_bytes += index_dependent_slop; // add'l slop for size variance due to large itable offsets
 132   bookkeeping(masm, tty, s, npe_addr, ame_addr, true, vtable_index, slop_bytes, index_dependent_slop);
 133 
 134   return s;
 135 }
 136 
 137 
 138 VtableStub* VtableStubs::create_itable_stub(int itable_index) {
 139   // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
 140   const int stub_code_length = code_size_limit(false);
 141   VtableStub* s = new(stub_code_length) VtableStub(false, itable_index);
 142   // Can be NULL if there is no free space in the code cache.
 143   if (s == NULL) {
 144     return NULL;
 145   }
 146   // Count unused bytes in instruction sequences of variable size.
 147   // We add them to the computed buffer size in order to avoid
 148   // overflow in subsequently generated stubs.
 149   address   start_pc;
 150   int       slop_bytes = 0;
 151   int       slop_delta = 0;
 152   const int index_dependent_slop     = ((itable_index < 512) ? 2 : 0)*BytesPerInstWord; // code size change with transition from 13-bit to 32-bit constant (@index == 512?).
 153 
 154   ResourceMark    rm;
 155   CodeBuffer      cb(s->entry_point(), stub_code_length);
 156   MacroAssembler* masm = new MacroAssembler(&cb);
 157 
 158 #if (!defined(PRODUCT) && defined(COMPILER2))
 159   if (CountCompiledCalls) {
 160 //  Use G3_scratch, G4_scratch as work regs for inc_counter.
 161 //  These are defined before use further down.
 162     __ inc_counter(SharedRuntime::nof_megamorphic_calls_addr(), G3_scratch, G4_scratch);
 163   }
 164 #endif // PRODUCT
 165 
 166   Register G3_Klass = G3_scratch;
 167   Register G5_icholder = G5;  // Passed in as an argument
 168   Register G4_interface = G4_scratch;
 169   Label search;
 170 
 171   // Entry arguments:
 172   //  G5_interface: Interface
 173   //  O0:           Receiver
 174   assert(VtableStub::receiver_location() == O0->as_VMReg(), "receiver expected in O0");
 175 
 176   // get receiver klass (also an implicit null-check)
 177   address npe_addr = __ pc();
 178   __ load_klass(O0, G3_Klass);
 179 
 180   // Push a new window to get some temp registers.  This chops the head of all
 181   // my 64-bit %o registers in the LION build, but this is OK because no longs
 182   // are passed in the %o registers.  Instead, longs are passed in G1 and G4
 183   // and so those registers are not available here.
 184   __ save(SP,-frame::register_save_words*wordSize,SP);
 185 
 186   Label    L_no_such_interface;
 187   Register L5_method = L5;
 188 
 189   start_pc = __ pc();
 190 
 191   // Receiver subtype check against REFC.
 192   __ ld_ptr(G5_icholder, CompiledICHolder::holder_klass_offset(), G4_interface);
 193   __ lookup_interface_method(// inputs: rec. class, interface, itable index
 194                              G3_Klass, G4_interface, itable_index,
 195                              // outputs: scan temp. reg1, scan temp. reg2
 196                              L5_method, L2, L3,
 197                              L_no_such_interface,
 198                              /*return_method=*/ false);
 199 
 200   const ptrdiff_t typecheckSize = __ pc() - start_pc;
 201   start_pc = __ pc();
 202 
 203   // Get Method* and entrypoint for compiler
 204   __ ld_ptr(G5_icholder, CompiledICHolder::holder_metadata_offset(), G4_interface);
 205   __ lookup_interface_method(// inputs: rec. class, interface, itable index
 206                              G3_Klass, G4_interface, itable_index,
 207                              // outputs: method, scan temp. reg
 208                              L5_method, L2, L3,
 209                              L_no_such_interface);
 210 
 211   const ptrdiff_t lookupSize = __ pc() - start_pc;
 212 
 213   // Reduce "estimate" such that "padding" does not drop below 8.
 214   // Do not target a left-over number of zero, because a very
 215   // large vtable or itable offset (> 4K) will require an extra
 216   // sethi/or pair of instructions.
 217   // Found typecheck(60) + lookup(72) to exceed previous extimate (32*4).
 218   const ptrdiff_t estimate = 36*BytesPerInstWord;
 219   const ptrdiff_t codesize = typecheckSize + lookupSize + index_dependent_slop;
 220   slop_delta  = (int)(estimate - codesize);
 221   slop_bytes += slop_delta;
 222   assert(slop_delta >= 0, "itable #%d: Code size estimate (%d) for lookup_interface_method too small, required: %d", itable_index, (int)estimate, (int)codesize);
 223 
 224 #ifndef PRODUCT
 225   if (DebugVtables) {
 226     Label L01;
 227     __ br_notnull_short(L5_method, Assembler::pt, L01);
 228     __ stop("Method* is null");
 229     __ bind(L01);
 230   }
 231 #endif
 232 
 233   // If the following load is through a NULL pointer, we'll take an OS
 234   // exception that should translate into an AbstractMethodError.  We need the
 235   // window count to be correct at that time.
 236   __ restore(L5_method, 0, G5_method);
 237   // Restore registers *before* the AME point.
 238 
 239   address ame_addr = __ pc();   // if the vtable entry is null, the method is abstract
 240   __ ld_ptr(G5_method, in_bytes(Method::from_compiled_offset()), G3_scratch);
 241 
 242   // G5_method:  Method*
 243   // O0:         Receiver
 244   // G3_scratch: entry point
 245   __ JMP(G3_scratch, 0);
 246   __ delayed()->nop();
 247 
 248   __ bind(L_no_such_interface);
 249   // Handle IncompatibleClassChangeError in itable stubs.
 250   // More detailed error message.
 251   // We force resolving of the call site by jumping to the "handle
 252   // wrong method" stub, and so let the interpreter runtime do all the
 253   // dirty work.
 254   AddressLiteral icce(SharedRuntime::get_handle_wrong_method_stub());
 255   __ jump_to(icce, G3_scratch);
 256   __ delayed()->restore();
 257 
 258   masm->flush();
 259   slop_bytes += index_dependent_slop; // add'l slop for size variance due to large itable offsets
 260   bookkeeping(masm, tty, s, npe_addr, ame_addr, false, itable_index, slop_bytes, index_dependent_slop);
 261 
 262   return s;
 263 }
 264 
 265 int VtableStub::pd_code_alignment() {
 266   // UltraSPARC cache line size is 8 instructions:
 267   const unsigned int icache_line_size = 32;
 268   return icache_line_size;
 269 }