1 /*
   2  * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/assembler.hpp"
  27 #include "assembler_sparc.inline.hpp"
  28 #include "code/vtableStubs.hpp"
  29 #include "interp_masm_sparc.hpp"
  30 #include "memory/resourceArea.hpp"
  31 #include "oops/instanceKlass.hpp"
  32 #include "oops/klassVtable.hpp"
  33 #include "runtime/sharedRuntime.hpp"
  34 #include "vmreg_sparc.inline.hpp"
  35 #ifdef COMPILER2
  36 #include "opto/runtime.hpp"
  37 #endif
  38 
  39 // machine-dependent part of VtableStubs: create vtableStub of correct size and
  40 // initialize its code
  41 
  42 #define __ masm->
  43 
  44 
  45 #ifndef PRODUCT
  46 extern "C" void bad_compiled_vtable_index(JavaThread* thread, oopDesc* receiver, int index);
  47 #endif
  48 
  49 
  50 // Used by compiler only; may use only caller saved, non-argument registers
  51 // NOTE:  %%%% if any change is made to this stub make sure that the function
  52 //             pd_code_size_limit is changed to ensure the correct size for VtableStub
  53 VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
  54   const int sparc_code_length = VtableStub::pd_code_size_limit(true);
  55   VtableStub* s = new(sparc_code_length) VtableStub(true, vtable_index);
  56   ResourceMark rm;
  57   CodeBuffer cb(s->entry_point(), sparc_code_length);
  58   MacroAssembler* masm = new MacroAssembler(&cb);
  59 
  60 #ifndef PRODUCT
  61   if (CountCompiledCalls) {
  62     __ inc_counter(SharedRuntime::nof_megamorphic_calls_addr(), G5, G3_scratch);
  63   }
  64 #endif /* PRODUCT */
  65 
  66   assert(VtableStub::receiver_location() == O0->as_VMReg(), "receiver expected in O0");
  67 
  68   // get receiver klass
  69   address npe_addr = __ pc();
  70   __ load_klass(O0, G3_scratch);
  71 
  72   // set methodOop (in case of interpreted method), and destination address
  73   int entry_offset = instanceKlass::vtable_start_offset() + vtable_index*vtableEntry::size();
  74 #ifndef PRODUCT
  75   if (DebugVtables) {
  76     Label L;
  77     // check offset vs vtable length
  78     __ ld(G3_scratch, instanceKlass::vtable_length_offset()*wordSize, G5);
  79     __ cmp(G5, vtable_index*vtableEntry::size());
  80     __ br(Assembler::greaterUnsigned, false, Assembler::pt, L);
  81     __ delayed()->nop();
  82     __ set(vtable_index, O2);
  83     __ call_VM(noreg, CAST_FROM_FN_PTR(address, bad_compiled_vtable_index), O0, O2);
  84     __ bind(L);
  85   }
  86 #endif
  87   int v_off = entry_offset*wordSize + vtableEntry::method_offset_in_bytes();
  88   if( __ is_simm13(v_off) ) {
  89     __ ld_ptr(G3, v_off, G5_method);
  90   } else {
  91     __ set(v_off,G5);
  92     __ ld_ptr(G3, G5, G5_method);
  93   }
  94 
  95 #ifndef PRODUCT
  96   if (DebugVtables) {
  97     Label L;
  98     __ br_notnull(G5_method, false, Assembler::pt, L);
  99     __ delayed()->nop();
 100     __ stop("Vtable entry is ZERO");
 101     __ bind(L);
 102   }
 103 #endif
 104 
 105   address ame_addr = __ pc();  // if the vtable entry is null, the method is abstract
 106                                // NOTE: for vtable dispatches, the vtable entry will never be null.
 107 
 108   __ ld_ptr(G5_method, in_bytes(methodOopDesc::from_compiled_offset()), G3_scratch);
 109 
 110   // jump to target (either compiled code or c2iadapter)
 111   __ JMP(G3_scratch, 0);
 112   // load methodOop (in case we call c2iadapter)
 113   __ delayed()->nop();
 114 
 115   masm->flush();
 116 
 117   if (PrintMiscellaneous && (WizardMode || Verbose)) {
 118     tty->print_cr("vtable #%d at "PTR_FORMAT"[%d] left over: %d",
 119                   vtable_index, s->entry_point(),
 120                   (int)(s->code_end() - s->entry_point()),
 121                   (int)(s->code_end() - __ pc()));
 122   }
 123   guarantee(__ pc() <= s->code_end(), "overflowed buffer");
 124   // shut the door on sizing bugs
 125   int slop = 2*BytesPerInstWord;  // 32-bit offset is this much larger than a 13-bit one
 126   assert(vtable_index > 10 || __ pc() + slop <= s->code_end(), "room for sethi;add");
 127 
 128   s->set_exception_points(npe_addr, ame_addr);
 129   return s;
 130 }
 131 
 132 
 133 // NOTE:  %%%% if any change is made to this stub make sure that the function
 134 //             pd_code_size_limit is changed to ensure the correct size for VtableStub
 135 VtableStub* VtableStubs::create_itable_stub(int itable_index) {
 136   const int sparc_code_length = VtableStub::pd_code_size_limit(false);
 137   VtableStub* s = new(sparc_code_length) VtableStub(false, itable_index);
 138   ResourceMark rm;
 139   CodeBuffer cb(s->entry_point(), sparc_code_length);
 140   MacroAssembler* masm = new MacroAssembler(&cb);
 141 
 142   Register G3_klassOop = G3_scratch;
 143   Register G5_interface = G5;  // Passed in as an argument
 144   Label search;
 145 
 146   // Entry arguments:
 147   //  G5_interface: Interface
 148   //  O0:           Receiver
 149   assert(VtableStub::receiver_location() == O0->as_VMReg(), "receiver expected in O0");
 150 
 151   // get receiver klass (also an implicit null-check)
 152   address npe_addr = __ pc();
 153   __ load_klass(O0, G3_klassOop);
 154   __ verify_oop(G3_klassOop);
 155 
 156   // Push a new window to get some temp registers.  This chops the head of all
 157   // my 64-bit %o registers in the LION build, but this is OK because no longs
 158   // are passed in the %o registers.  Instead, longs are passed in G1 and G4
 159   // and so those registers are not available here.
 160   __ save(SP,-frame::register_save_words*wordSize,SP);
 161 
 162 #ifndef PRODUCT
 163   if (CountCompiledCalls) {
 164     __ inc_counter(SharedRuntime::nof_megamorphic_calls_addr(), L0, L1);
 165   }
 166 #endif /* PRODUCT */
 167 
 168   Label throw_icce;
 169 
 170   Register L5_method = L5;
 171   __ lookup_interface_method(// inputs: rec. class, interface, itable index
 172                              G3_klassOop, G5_interface, itable_index,
 173                              // outputs: method, scan temp. reg
 174                              L5_method, L2, L3,
 175                              throw_icce);
 176 
 177 #ifndef PRODUCT
 178   if (DebugVtables) {
 179     Label L01;
 180     __ bpr(Assembler::rc_nz, false, Assembler::pt, L5_method, L01);
 181     __ delayed()->nop();
 182     __ stop("methodOop is null");
 183     __ bind(L01);
 184     __ verify_oop(L5_method);
 185   }
 186 #endif
 187 
 188   // If the following load is through a NULL pointer, we'll take an OS
 189   // exception that should translate into an AbstractMethodError.  We need the
 190   // window count to be correct at that time.
 191   __ restore(L5_method, 0, G5_method);
 192   // Restore registers *before* the AME point.
 193 
 194   address ame_addr = __ pc();   // if the vtable entry is null, the method is abstract
 195   __ ld_ptr(G5_method, in_bytes(methodOopDesc::from_compiled_offset()), G3_scratch);
 196 
 197   // G5_method:  methodOop
 198   // O0:         Receiver
 199   // G3_scratch: entry point
 200   __ JMP(G3_scratch, 0);
 201   __ delayed()->nop();
 202 
 203   __ bind(throw_icce);
 204   AddressLiteral icce(StubRoutines::throw_IncompatibleClassChangeError_entry());
 205   __ jump_to(icce, G3_scratch);
 206   __ delayed()->restore();
 207 
 208   masm->flush();
 209 
 210   if (PrintMiscellaneous && (WizardMode || Verbose)) {
 211     tty->print_cr("itable #%d at "PTR_FORMAT"[%d] left over: %d",
 212                   itable_index, s->entry_point(),
 213                   (int)(s->code_end() - s->entry_point()),
 214                   (int)(s->code_end() - __ pc()));
 215   }
 216   guarantee(__ pc() <= s->code_end(), "overflowed buffer");
 217   // shut the door on sizing bugs
 218   int slop = 2*BytesPerInstWord;  // 32-bit offset is this much larger than a 13-bit one
 219   assert(itable_index > 10 || __ pc() + slop <= s->code_end(), "room for sethi;add");
 220 
 221   s->set_exception_points(npe_addr, ame_addr);
 222   return s;
 223 }
 224 
 225 
 226 int VtableStub::pd_code_size_limit(bool is_vtable_stub) {
 227   if (TraceJumps || DebugVtables || CountCompiledCalls || VerifyOops) return 1000;
 228   else {
 229     const int slop = 2*BytesPerInstWord; // sethi;add  (needed for long offsets)
 230     if (is_vtable_stub) {
 231       // ld;ld;ld,jmp,nop
 232       const int basic = 5*BytesPerInstWord +
 233                         // shift;add for load_klass (only shift with zero heap based)
 234                         (UseCompressedOops ?
 235                          ((Universe::narrow_oop_base() == NULL) ? BytesPerInstWord : 2*BytesPerInstWord) : 0);
 236       return basic + slop;
 237     } else {
 238       const int basic = (28 LP64_ONLY(+ 6)) * BytesPerInstWord +
 239                         // shift;add for load_klass (only shift with zero heap based)
 240                         (UseCompressedOops ?
 241                          ((Universe::narrow_oop_base() == NULL) ? BytesPerInstWord : 2*BytesPerInstWord) : 0);
 242       return (basic + slop);
 243     }
 244   }
 245 
 246   // In order to tune these parameters, run the JVM with VM options
 247   // +PrintMiscellaneous and +WizardMode to see information about
 248   // actual itable stubs.  Look for lines like this:
 249   //   itable #1 at 0x5551212[116] left over: 8
 250   // Reduce the constants so that the "left over" number is 8
 251   // Do not aim at a left-over number of zero, because a very
 252   // large vtable or itable offset (> 4K) will require an extra
 253   // sethi/or pair of instructions.
 254   //
 255   // The JVM98 app. _202_jess has a megamorphic interface call.
 256   // The itable code looks like this:
 257   // Decoding VtableStub itbl[1]@16
 258   //   ld  [ %o0 + 4 ], %g3
 259   //   save  %sp, -64, %sp
 260   //   ld  [ %g3 + 0xe8 ], %l2
 261   //   sll  %l2, 2, %l2
 262   //   add  %l2, 0x134, %l2
 263   //   and  %l2, -8, %l2        ! NOT_LP64 only
 264   //   add  %g3, %l2, %l2
 265   //   add  %g3, 4, %g3
 266   //   ld  [ %l2 ], %l5
 267   //   brz,pn   %l5, throw_icce
 268   //   cmp  %l5, %g5
 269   //   be  %icc, success
 270   //   add  %l2, 8, %l2
 271   // loop:
 272   //   ld  [ %l2 ], %l5
 273   //   brz,pn   %l5, throw_icce
 274   //   cmp  %l5, %g5
 275   //   bne,pn   %icc, loop
 276   //   add  %l2, 8, %l2
 277   // success:
 278   //   ld  [ %l2 + -4 ], %l2
 279   //   ld  [ %g3 + %l2 ], %l5
 280   //   restore  %l5, 0, %g5
 281   //   ld  [ %g5 + 0x44 ], %g3
 282   //   jmp  %g3
 283   //   nop
 284   // throw_icce:
 285   //   sethi  %hi(throw_ICCE_entry), %g3
 286   //   ! 5 more instructions here, LP64_ONLY
 287   //   jmp  %g3 + %lo(throw_ICCE_entry)
 288   //   restore
 289 }
 290 
 291 
 292 int VtableStub::pd_code_alignment() {
 293   // UltraSPARC cache line size is 8 instructions:
 294   const unsigned int icache_line_size = 32;
 295   return icache_line_size;
 296 }