< prev index next >

src/hotspot/cpu/sparc/vtableStubs_sparc.cpp

Print this page
rev 51381 : [mq]: 8207343.patch
   1 /*
   2  * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *


  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.inline.hpp"
  27 #include "code/vtableStubs.hpp"
  28 #include "interp_masm_sparc.hpp"
  29 #include "memory/resourceArea.hpp"
  30 #include "oops/compiledICHolder.hpp"
  31 #include "oops/instanceKlass.hpp"
  32 #include "oops/klassVtable.hpp"
  33 #include "runtime/sharedRuntime.hpp"
  34 #include "vmreg_sparc.inline.hpp"
  35 #ifdef COMPILER2
  36 #include "opto/runtime.hpp"
  37 #endif
  38 
  39 // machine-dependent part of VtableStubs: create vtableStub of correct size and
  40 // initialize its code
  41 
  42 #define __ masm->
  43 
  44 
  45 #ifndef PRODUCT
  46 extern "C" void bad_compiled_vtable_index(JavaThread* thread, oopDesc* receiver, int index);
  47 #endif
  48 
  49 
  50 // Used by compiler only; may use only caller saved, non-argument registers
  51 // NOTE:  %%%% if any change is made to this stub make sure that the function
  52 //             pd_code_size_limit is changed to ensure the correct size for VtableStub
  53 VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
  54   const int sparc_code_length = VtableStub::pd_code_size_limit(true);
  55   VtableStub* s = new(sparc_code_length) VtableStub(true, vtable_index);

  56   // Can be NULL if there is no free space in the code cache.
  57   if (s == NULL) {
  58     return NULL;
  59   }
  60 








  61   ResourceMark rm;
  62   CodeBuffer cb(s->entry_point(), sparc_code_length);
  63   MacroAssembler* masm = new MacroAssembler(&cb);
  64 
  65 #ifndef PRODUCT
  66   if (CountCompiledCalls) {
  67     __ inc_counter(SharedRuntime::nof_megamorphic_calls_addr(), G5, G3_scratch);
  68   }
  69 #endif /* PRODUCT */
  70 
  71   assert(VtableStub::receiver_location() == O0->as_VMReg(), "receiver expected in O0");
  72 
  73   // get receiver klass
  74   address npe_addr = __ pc();
  75   __ load_klass(O0, G3_scratch);
  76 
  77   // set Method* (in case of interpreted method), and destination address
  78 #ifndef PRODUCT
  79   if (DebugVtables) {
  80     Label L;
  81     // check offset vs vtable length
  82     __ ld(G3_scratch, in_bytes(Klass::vtable_length_offset()), G5);
  83     __ cmp_and_br_short(G5, vtable_index*vtableEntry::size(), Assembler::greaterUnsigned, Assembler::pt, L);



  84     __ set(vtable_index, O2);





  85     __ call_VM(noreg, CAST_FROM_FN_PTR(address, bad_compiled_vtable_index), O0, O2);
  86     __ bind(L);
  87   }
  88 #endif
  89 


  90   __ lookup_virtual_method(G3_scratch, vtable_index, G5_method);




  91 
  92 #ifndef PRODUCT
  93   if (DebugVtables) {
  94     Label L;
  95     __ br_notnull_short(G5_method, Assembler::pt, L);
  96     __ stop("Vtable entry is ZERO");
  97     __ bind(L);
  98   }
  99 #endif
 100 
 101   address ame_addr = __ pc();  // if the vtable entry is null, the method is abstract
 102                                // NOTE: for vtable dispatches, the vtable entry will never be null.
 103 
 104   __ ld_ptr(G5_method, in_bytes(Method::from_compiled_offset()), G3_scratch);
 105 
 106   // jump to target (either compiled code or c2iadapter)
 107   __ JMP(G3_scratch, 0);
 108   // load Method* (in case we call c2iadapter)
 109   __ delayed()->nop();
 110 
 111   masm->flush();


 112 
 113   if (PrintMiscellaneous && (WizardMode || Verbose)) {
 114     tty->print_cr("vtable #%d at " PTR_FORMAT "[%d] left over: %d",
 115                   vtable_index, p2i(s->entry_point()),
 116                   (int)(s->code_end() - s->entry_point()),
 117                   (int)(s->code_end() - __ pc()));
 118   }
 119   guarantee(__ pc() <= s->code_end(), "overflowed buffer");
 120   // shut the door on sizing bugs
 121   int slop = 2*BytesPerInstWord;  // 32-bit offset is this much larger than a 13-bit one
 122   assert(vtable_index > 10 || __ pc() + slop <= s->code_end(), "room for sethi;add");
 123 
 124   s->set_exception_points(npe_addr, ame_addr);
 125   return s;
 126 }
 127 
 128 
 129 // NOTE:  %%%% if any change is made to this stub make sure that the function
 130 //             pd_code_size_limit is changed to ensure the correct size for VtableStub
 131 VtableStub* VtableStubs::create_itable_stub(int itable_index) {
 132   const int sparc_code_length = VtableStub::pd_code_size_limit(false);
 133   VtableStub* s = new(sparc_code_length) VtableStub(false, itable_index);

 134   // Can be NULL if there is no free space in the code cache.
 135   if (s == NULL) {
 136     return NULL;
 137   }







 138 
 139   ResourceMark rm;
 140   CodeBuffer cb(s->entry_point(), sparc_code_length);
 141   MacroAssembler* masm = new MacroAssembler(&cb);
 142 








 143   Register G3_Klass = G3_scratch;
 144   Register G5_icholder = G5;  // Passed in as an argument
 145   Register G4_interface = G4_scratch;
 146   Label search;
 147 
 148   // Entry arguments:
 149   //  G5_interface: Interface
 150   //  O0:           Receiver
 151   assert(VtableStub::receiver_location() == O0->as_VMReg(), "receiver expected in O0");
 152 
 153   // get receiver klass (also an implicit null-check)
 154   address npe_addr = __ pc();
 155   __ load_klass(O0, G3_Klass);
 156 
 157   // Push a new window to get some temp registers.  This chops the head of all
 158   // my 64-bit %o registers in the LION build, but this is OK because no longs
 159   // are passed in the %o registers.  Instead, longs are passed in G1 and G4
 160   // and so those registers are not available here.
 161   __ save(SP,-frame::register_save_words*wordSize,SP);
 162 
 163 #ifndef PRODUCT
 164   if (CountCompiledCalls) {
 165     __ inc_counter(SharedRuntime::nof_megamorphic_calls_addr(), L0, L1);
 166   }
 167 #endif /* PRODUCT */
 168 
 169   Label L_no_such_interface;
 170 
 171   Register L5_method = L5;
 172 


 173   // Receiver subtype check against REFC.
 174   __ ld_ptr(G5_icholder, CompiledICHolder::holder_klass_offset(), G4_interface);
 175   __ lookup_interface_method(// inputs: rec. class, interface, itable index
 176                              G3_Klass, G4_interface, itable_index,
 177                              // outputs: scan temp. reg1, scan temp. reg2
 178                              L5_method, L2, L3,
 179                              L_no_such_interface,
 180                              /*return_method=*/ false);
 181 



 182   // Get Method* and entrypoint for compiler
 183   __ ld_ptr(G5_icholder, CompiledICHolder::holder_metadata_offset(), G4_interface);
 184   __ lookup_interface_method(// inputs: rec. class, interface, itable index
 185                              G3_Klass, G4_interface, itable_index,
 186                              // outputs: method, scan temp. reg
 187                              L5_method, L2, L3,
 188                              L_no_such_interface);
 189 













 190 #ifndef PRODUCT
 191   if (DebugVtables) {
 192     Label L01;
 193     __ br_notnull_short(L5_method, Assembler::pt, L01);
 194     __ stop("Method* is null");
 195     __ bind(L01);
 196   }
 197 #endif
 198 
 199   // If the following load is through a NULL pointer, we'll take an OS
 200   // exception that should translate into an AbstractMethodError.  We need the
 201   // window count to be correct at that time.
 202   __ restore(L5_method, 0, G5_method);
 203   // Restore registers *before* the AME point.
 204 
 205   address ame_addr = __ pc();   // if the vtable entry is null, the method is abstract
 206   __ ld_ptr(G5_method, in_bytes(Method::from_compiled_offset()), G3_scratch);
 207 
 208   // G5_method:  Method*
 209   // O0:         Receiver
 210   // G3_scratch: entry point
 211   __ JMP(G3_scratch, 0);
 212   __ delayed()->nop();
 213 
 214   __ bind(L_no_such_interface);
 215   // Handle IncompatibleClassChangeError in itable stubs.
 216   // More detailed error message.
 217   // We force resolving of the call site by jumping to the "handle
 218   // wrong method" stub, and so let the interpreter runtime do all the
 219   // dirty work.
 220   AddressLiteral icce(SharedRuntime::get_handle_wrong_method_stub());
 221   __ jump_to(icce, G3_scratch);
 222   __ delayed()->restore();
 223 
 224   masm->flush();


 225 
 226   if (PrintMiscellaneous && (WizardMode || Verbose)) {
 227     tty->print_cr("itable #%d at " PTR_FORMAT "[%d] left over: %d",
 228                   itable_index, p2i(s->entry_point()),
 229                   (int)(s->code_end() - s->entry_point()),
 230                   (int)(s->code_end() - __ pc()));
 231   }
 232   guarantee(__ pc() <= s->code_end(), "overflowed buffer");
 233   // shut the door on sizing bugs
 234   int slop = 2*BytesPerInstWord;  // 32-bit offset is this much larger than a 13-bit one
 235   assert(itable_index > 10 || __ pc() + slop <= s->code_end(), "room for sethi;add");
 236 
 237   s->set_exception_points(npe_addr, ame_addr);
 238   return s;
 239 }
 240 
 241 
 242 int VtableStub::pd_code_size_limit(bool is_vtable_stub) {
 243   if (DebugVtables || CountCompiledCalls || VerifyOops) return 1000;
 244   else {
 245     const int slop = 2*BytesPerInstWord; // sethi;add  (needed for long offsets)
 246     if (is_vtable_stub) {
 247       // ld;ld;ld,jmp,nop
 248       const int basic = 5*BytesPerInstWord +
 249                         // shift;add for load_klass (only shift with zero heap based)
 250                         (UseCompressedClassPointers ?
 251                           MacroAssembler::instr_size_for_decode_klass_not_null() : 0);
 252       return basic + slop;
 253     } else {
 254       const int basic = 54 * BytesPerInstWord +
 255                         // shift;add for load_klass (only shift with zero heap based)
 256                         (UseCompressedClassPointers ?
 257                           MacroAssembler::instr_size_for_decode_klass_not_null() : 0);
 258       return (basic + slop);
 259     }
 260   }
 261 
 262   // In order to tune these parameters, run the JVM with VM options
 263   // +PrintMiscellaneous and +WizardMode to see information about
 264   // actual itable stubs.  Look for lines like this:
 265   //   itable #1 at 0x5551212[116] left over: 8
 266   // Reduce the constants so that the "left over" number is 8
 267   // Do not aim at a left-over number of zero, because a very
 268   // large vtable or itable offset (> 4K) will require an extra
 269   // sethi/or pair of instructions.
 270   //
 271   // The JVM98 app. _202_jess has a megamorphic interface call.
 272   // The itable code looks like this:
 273   // Decoding VtableStub itbl[1]@16
 274   //   ld  [ %o0 + 4 ], %g3
 275   //   save  %sp, -64, %sp
 276   //   ld  [ %g3 + 0xe8 ], %l2
 277   //   sll  %l2, 2, %l2
 278   //   add  %l2, 0x134, %l2
 279   //   add  %g3, %l2, %l2
 280   //   add  %g3, 4, %g3
 281   //   ld  [ %l2 ], %l5
 282   //   brz,pn   %l5, throw_icce
 283   //   cmp  %l5, %g5
 284   //   be  %icc, success
 285   //   add  %l2, 8, %l2
 286   // loop:
 287   //   ld  [ %l2 ], %l5
 288   //   brz,pn   %l5, throw_icce
 289   //   cmp  %l5, %g5
 290   //   bne,pn   %icc, loop
 291   //   add  %l2, 8, %l2
 292   // success:
 293   //   ld  [ %l2 + -4 ], %l2
 294   //   ld  [ %g3 + %l2 ], %l5
 295   //   restore  %l5, 0, %g5
 296   //   ld  [ %g5 + 0x44 ], %g3
 297   //   jmp  %g3
 298   //   nop
 299   // throw_icce:
 300   //   sethi  %hi(throw_ICCE_entry), %g3
 301   //   ! 5 more instructions here, LP64_ONLY
 302   //   jmp  %g3 + %lo(throw_ICCE_entry)
 303   //   restore
 304 }
 305 
 306 
 307 int VtableStub::pd_code_alignment() {
 308   // UltraSPARC cache line size is 8 instructions:
 309   const unsigned int icache_line_size = 32;
 310   return icache_line_size;
 311 }
   1 /*
   2  * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *


  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.inline.hpp"
  27 #include "code/vtableStubs.hpp"
  28 #include "interp_masm_sparc.hpp"
  29 #include "memory/resourceArea.hpp"
  30 #include "oops/compiledICHolder.hpp"
  31 #include "oops/instanceKlass.hpp"
  32 #include "oops/klassVtable.hpp"
  33 #include "runtime/sharedRuntime.hpp"
  34 #include "vmreg_sparc.inline.hpp"
  35 #ifdef COMPILER2
  36 #include "opto/runtime.hpp"
  37 #endif
  38 
  39 // machine-dependent part of VtableStubs: create vtableStub of correct size and
  40 // initialize its code
  41 
  42 #define __ masm->
  43 

  44 #ifndef PRODUCT
  45 extern "C" void bad_compiled_vtable_index(JavaThread* thread, oopDesc* receiver, int index);
  46 #endif
  47 
  48 
  49 // Used by compiler only; may use only caller saved, non-argument registers


  50 VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
  51   // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
  52   const int stub_code_length = VtableStub::code_size_limit(true);
  53   VtableStub* s = new(stub_code_length) VtableStub(true, vtable_index);
  54   // Can be NULL if there is no free space in the code cache.
  55   if (s == NULL) {
  56     return NULL;
  57   }
  58 
  59   // Count unused bytes in instruction sequences of variable size.
  60   // We add them to the computed buffer size in order to avoid
  61   // overflow in subsequently generated stubs.
  62   address   start_pc;
  63   int       slop_bytes = 0;
  64   int       slop_delta = 0;
  65   const int slop32     = ((vtable_index < 512) ? 2 : 0)*BytesPerInstWord; // code size change with transition from 13-bit to 32-bit constant (@index == 512?).
  66 
  67   ResourceMark    rm;
  68   CodeBuffer      cb(s->entry_point(), stub_code_length);
  69   MacroAssembler* masm = new MacroAssembler(&cb);
  70 
  71 #if (!defined(PRODUCT) && defined(COMPILER2))
  72   if (CountCompiledCalls) {
  73     __ inc_counter(SharedRuntime::nof_megamorphic_calls_addr(), G5, G3_scratch);
  74   }
  75 #endif // PRODUCT
  76 
  77   assert(VtableStub::receiver_location() == O0->as_VMReg(), "receiver expected in O0");
  78 
  79   // get receiver klass
  80   address npe_addr = __ pc();
  81   __ load_klass(O0, G3_scratch);
  82 

  83 #ifndef PRODUCT
  84   if (DebugVtables) {
  85     Label L;
  86     // check offset vs vtable length
  87     __ ld(G3_scratch, in_bytes(Klass::vtable_length_offset()), G5);
  88     __ cmp_and_br_short(G5, vtable_index*vtableEntry::size(), Assembler::greaterUnsigned, Assembler::pt, L);
  89 
  90     // set generates 8 instructions (worst case), 1 instruction (best case)
  91     start_pc = __ pc();
  92     __ set(vtable_index, O2);
  93     slop_delta  = __ worst_case_insts_for_set()*BytesPerInstWord - (__ pc() - start_pc);
  94     slop_bytes += slop_delta;
  95     assert(slop_delta >= 0, "negative slop(%d) encountered, adjust code size estimate!", slop_delta);
  96 
  97     // there is no variance in call_VM() emitted code.
  98     __ call_VM(noreg, CAST_FROM_FN_PTR(address, bad_compiled_vtable_index), O0, O2);
  99     __ bind(L);
 100   }
 101 #endif
 102 
 103   // set Method* (in case of interpreted method), and destination address
 104   start_pc = __ pc();
 105   __ lookup_virtual_method(G3_scratch, vtable_index, G5_method);
 106   // lookup_virtual_method generates 3 instructions (worst case), 1 instruction (best case)
 107   slop_delta  = 3*BytesPerInstWord - (int)(__ pc() - start_pc);
 108   slop_bytes += slop_delta;
 109   assert(slop_delta >= 0, "negative slop(%d) encountered, adjust code size estimate!", slop_delta);
 110 
 111 #ifndef PRODUCT
 112   if (DebugVtables) {
 113     Label L;
 114     __ br_notnull_short(G5_method, Assembler::pt, L);
 115     __ stop("Vtable entry is ZERO");
 116     __ bind(L);
 117   }
 118 #endif
 119 
 120   address ame_addr = __ pc();  // if the vtable entry is null, the method is abstract
 121                                // NOTE: for vtable dispatches, the vtable entry will never be null.
 122 
 123   __ ld_ptr(G5_method, in_bytes(Method::from_compiled_offset()), G3_scratch);
 124 
 125   // jump to target (either compiled code or c2iadapter)
 126   __ JMP(G3_scratch, 0);
 127   // load Method* (in case we call c2iadapter)
 128   __ delayed()->nop();
 129 
 130   masm->flush();
 131   slop_bytes += slop32; // add'l slop for size variance due to large itable offsets
 132   bookkeeping(masm, tty, s, npe_addr, ame_addr, true, vtable_index, slop_bytes, slop32);
 133 












 134   return s;
 135 }
 136 
 137 


 138 VtableStub* VtableStubs::create_itable_stub(int itable_index) {
 139   // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
 140   const int stub_code_length = VtableStub::code_size_limit(false);
 141   VtableStub* s = new(stub_code_length) VtableStub(false, itable_index);
 142   // Can be NULL if there is no free space in the code cache.
 143   if (s == NULL) {
 144     return NULL;
 145   }
 146   // Count unused bytes in instruction sequences of variable size.
 147   // We add them to the computed buffer size in order to avoid
 148   // overflow in subsequently generated stubs.
 149   address   start_pc;
 150   int       slop_bytes = 0;
 151   int       slop_delta = 0;
 152   const int slop32     = ((itable_index < 512) ? 2 : 0)*BytesPerInstWord; // code size change with transition from 13-bit to 32-bit constant (@index == 512?).
 153 
 154   ResourceMark    rm;
 155   CodeBuffer      cb(s->entry_point(), stub_code_length);
 156   MacroAssembler* masm = new MacroAssembler(&cb);
 157 
 158 #if (!defined(PRODUCT) && defined(COMPILER2))
 159   if (CountCompiledCalls) {
 160 //  Use G3_scratch, G4_scratch as work regs for inc_counter.
 161 //  These are defined before use further down.
 162     __ inc_counter(SharedRuntime::nof_megamorphic_calls_addr(), G3_scratch, G4_scratch);
 163   }
 164 #endif // PRODUCT
 165 
 166   Register G3_Klass = G3_scratch;
 167   Register G5_icholder = G5;  // Passed in as an argument
 168   Register G4_interface = G4_scratch;
 169   Label search;
 170 
 171   // Entry arguments:
 172   //  G5_interface: Interface
 173   //  O0:           Receiver
 174   assert(VtableStub::receiver_location() == O0->as_VMReg(), "receiver expected in O0");
 175 
 176   // get receiver klass (also an implicit null-check)
 177   address npe_addr = __ pc();
 178   __ load_klass(O0, G3_Klass);
 179 
 180   // Push a new window to get some temp registers.  This chops the head of all
 181   // my 64-bit %o registers in the LION build, but this is OK because no longs
 182   // are passed in the %o registers.  Instead, longs are passed in G1 and G4
 183   // and so those registers are not available here.
 184   __ save(SP,-frame::register_save_words*wordSize,SP);
 185 






 186   Label    L_no_such_interface;

 187   Register L5_method = L5;
 188 
 189   start_pc = __ pc();
 190 
 191   // Receiver subtype check against REFC.
 192   __ ld_ptr(G5_icholder, CompiledICHolder::holder_klass_offset(), G4_interface);
 193   __ lookup_interface_method(// inputs: rec. class, interface, itable index
 194                              G3_Klass, G4_interface, itable_index,
 195                              // outputs: scan temp. reg1, scan temp. reg2
 196                              L5_method, L2, L3,
 197                              L_no_such_interface,
 198                              /*return_method=*/ false);
 199 
 200   const ptrdiff_t typecheckSize = __ pc() - start_pc;
 201   start_pc = __ pc();
 202 
 203   // Get Method* and entrypoint for compiler
 204   __ ld_ptr(G5_icholder, CompiledICHolder::holder_metadata_offset(), G4_interface);
 205   __ lookup_interface_method(// inputs: rec. class, interface, itable index
 206                              G3_Klass, G4_interface, itable_index,
 207                              // outputs: method, scan temp. reg
 208                              L5_method, L2, L3,
 209                              L_no_such_interface);
 210 
 211   const ptrdiff_t lookupSize = __ pc() - start_pc;
 212 
 213   // Reduce "estimate" such that "padding" does not drop below 8.
 214   // Do not target a left-over number of zero, because a very
 215   // large vtable or itable offset (> 4K) will require an extra
 216   // sethi/or pair of instructions.
 217   // Found typecheck(60) + lookup(72) to exceed previous extimate (32*4).
 218   const ptrdiff_t estimate = 36*BytesPerInstWord;
 219   const ptrdiff_t codesize = typecheckSize + lookupSize + slop32;
 220   slop_delta  = (int)(estimate - codesize);
 221   slop_bytes += slop_delta;
 222   assert(slop_delta >= 0, "itable #%d: Code size estimate (%d) for lookup_interface_method too small, required: %d", itable_index, (int)estimate, (int)codesize);
 223 
 224 #ifndef PRODUCT
 225   if (DebugVtables) {
 226     Label L01;
 227     __ br_notnull_short(L5_method, Assembler::pt, L01);
 228     __ stop("Method* is null");
 229     __ bind(L01);
 230   }
 231 #endif
 232 
 233   // If the following load is through a NULL pointer, we'll take an OS
 234   // exception that should translate into an AbstractMethodError.  We need the
 235   // window count to be correct at that time.
 236   __ restore(L5_method, 0, G5_method);
 237   // Restore registers *before* the AME point.
 238 
 239   address ame_addr = __ pc();   // if the vtable entry is null, the method is abstract
 240   __ ld_ptr(G5_method, in_bytes(Method::from_compiled_offset()), G3_scratch);
 241 
 242   // G5_method:  Method*
 243   // O0:         Receiver
 244   // G3_scratch: entry point
 245   __ JMP(G3_scratch, 0);
 246   __ delayed()->nop();
 247 
 248   __ bind(L_no_such_interface);
 249   // Handle IncompatibleClassChangeError in itable stubs.
 250   // More detailed error message.
 251   // We force resolving of the call site by jumping to the "handle
 252   // wrong method" stub, and so let the interpreter runtime do all the
 253   // dirty work.
 254   AddressLiteral icce(SharedRuntime::get_handle_wrong_method_stub());
 255   __ jump_to(icce, G3_scratch);
 256   __ delayed()->restore();
 257 
 258   masm->flush();
 259   slop_bytes += slop32; // add'l slop for size variance due to large itable offsets
 260   bookkeeping(masm, tty, s, npe_addr, ame_addr, false, itable_index, slop_bytes, slop32);
 261 












 262   return s;
 263 }


































































 264 
 265 int VtableStub::pd_code_alignment() {
 266   // UltraSPARC cache line size is 8 instructions:
 267   const unsigned int icache_line_size = 32;
 268   return icache_line_size;
 269 }
< prev index next >