< prev index next >

src/hotspot/cpu/aarch64/vtableStubs_aarch64.cpp

Print this page




  30 #include "interp_masm_aarch64.hpp"
  31 #include "memory/resourceArea.hpp"
  32 #include "oops/compiledICHolder.hpp"
  33 #include "oops/instanceKlass.hpp"
  34 #include "oops/klassVtable.hpp"
  35 #include "runtime/sharedRuntime.hpp"
  36 #include "vmreg_aarch64.inline.hpp"
  37 #ifdef COMPILER2
  38 #include "opto/runtime.hpp"
  39 #endif
  40 
  41 // machine-dependent part of VtableStubs: create VtableStub of correct size and
  42 // initialize its code
  43 
  44 #define __ masm->
  45 
  46 #ifndef PRODUCT
  47 extern "C" void bad_compiled_vtable_index(JavaThread* thread, oop receiver, int index);
  48 #endif
  49 
  50 VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
  51   // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
  52   const int stub_code_length = code_size_limit(true);
  53   VtableStub* s = new(stub_code_length) VtableStub(true, vtable_index);
  54   // Can be NULL if there is no free space in the code cache.
  55   if (s == NULL) {
  56     return NULL;
  57   }
  58 
  59   // Count unused bytes in instruction sequences of variable size.
  60   // We add them to the computed buffer size in order to avoid
  61   // overflow in subsequently generated stubs.
  62   address   start_pc;
  63   int       slop_bytes = 0;
  64   int       slop_delta = 0;
  65 




  66   ResourceMark    rm;
  67   CodeBuffer      cb(s->entry_point(), stub_code_length);
  68   MacroAssembler* masm = new MacroAssembler(&cb);
  69 
  70 #if (!defined(PRODUCT) && defined(COMPILER2))
  71   if (CountCompiledCalls) {
  72     __ lea(r16, ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr()));
  73     __ incrementw(Address(r16));
  74   }
  75 #endif
  76 
  77   // get receiver (need to skip return address on top of stack)
  78   assert(VtableStub::receiver_location() == j_rarg0->as_VMReg(), "receiver expected in j_rarg0");
  79 
  80   // get receiver klass
  81   address npe_addr = __ pc();
  82   __ load_klass(r16, j_rarg0);
  83 
  84 #ifndef PRODUCT
  85   if (DebugVtables) {


  99     const ptrdiff_t codesize = __ pc() - start_pc;
 100     slop_delta  = estimate - codesize;  // call_VM varies in length, depending on data
 101     slop_bytes += slop_delta;
 102     assert(slop_delta >= 0, "vtable #%d: Code size estimate (%d) for DebugVtables too small, required: %d", vtable_index, (int)estimate, (int)codesize);
 103 
 104     __ leave();
 105     __ bind(L);
 106   }
 107 #endif // PRODUCT
 108 
 109   start_pc = __ pc();
 110   __ lookup_virtual_method(r16, vtable_index, rmethod);
 111   slop_delta  = 8 - (int)(__ pc() - start_pc);
 112   slop_bytes += slop_delta;
 113   assert(slop_delta >= 0, "negative slop(%d) encountered, adjust code size estimate!", slop_delta);
 114 
 115 #ifndef PRODUCT
 116   if (DebugVtables) {
 117     Label L;
 118     __ cbz(rmethod, L);
 119     __ ldr(rscratch1, Address(rmethod, Method::from_compiled_offset()));
 120     __ cbnz(rscratch1, L);
 121     __ stop("Vtable entry is NULL");
 122     __ bind(L);
 123   }
 124 #endif // PRODUCT
 125 
 126   // r0: receiver klass
 127   // rmethod: Method*
 128   // r2: receiver
 129   address ame_addr = __ pc();
 130   __ ldr(rscratch1, Address(rmethod, Method::from_compiled_offset()));
 131   __ br(rscratch1);
 132 
 133   masm->flush();
 134   bookkeeping(masm, tty, s, npe_addr, ame_addr, true, vtable_index, slop_bytes, 0);

 135 
 136   return s;
 137 }
 138 
 139 
 140 VtableStub* VtableStubs::create_itable_stub(int itable_index) {
 141   // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
 142   const int stub_code_length = code_size_limit(false);
 143   VtableStub* s = new(stub_code_length) VtableStub(false, itable_index);
 144   // Can be NULL if there is no free space in the code cache.
 145   if (s == NULL) {
 146     return NULL;
 147   }
 148   // Count unused bytes in instruction sequences of variable size.
 149   // We add them to the computed buffer size in order to avoid
 150   // overflow in subsequently generated stubs.
 151   address   start_pc;
 152   int       slop_bytes = 0;
 153   int       slop_delta = 0;
 154 




 155   ResourceMark    rm;
 156   CodeBuffer      cb(s->entry_point(), stub_code_length);
 157   MacroAssembler* masm = new MacroAssembler(&cb);
 158 
 159 #if (!defined(PRODUCT) && defined(COMPILER2))
 160   if (CountCompiledCalls) {
 161     __ lea(r10, ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr()));
 162     __ incrementw(Address(r10));
 163   }
 164 #endif
 165 
 166   // get receiver (need to skip return address on top of stack)
 167   assert(VtableStub::receiver_location() == j_rarg0->as_VMReg(), "receiver expected in j_rarg0");
 168 
 169   // Entry arguments:
 170   //  rscratch2: CompiledICHolder
 171   //  j_rarg0: Receiver
 172 
 173   // Most registers are in use; we'll use r16, rmethod, r10, r11
 174   const Register recv_klass_reg     = r10;


 204   __ load_klass(recv_klass_reg, j_rarg0);   // restore recv_klass_reg
 205   __ lookup_interface_method(// inputs: rec. class, interface, itable index
 206                              recv_klass_reg, holder_klass_reg, itable_index,
 207                              // outputs: method, scan temp. reg
 208                              rmethod, temp_reg,
 209                              L_no_such_interface);
 210 
 211   const ptrdiff_t lookupSize = __ pc() - start_pc;
 212 
 213   // Reduce "estimate" such that "padding" does not drop below 8.
 214   const ptrdiff_t estimate = 152;
 215   const ptrdiff_t codesize = typecheckSize + lookupSize;
 216   slop_delta  = (int)(estimate - codesize);
 217   slop_bytes += slop_delta;
 218   assert(slop_delta >= 0, "itable #%d: Code size estimate (%d) for lookup_interface_method too small, required: %d", itable_index, (int)estimate, (int)codesize);
 219 
 220 #ifdef ASSERT
 221   if (DebugVtables) {
 222     Label L2;
 223     __ cbz(rmethod, L2);
 224     __ ldr(rscratch1, Address(rmethod, Method::from_compiled_offset()));
 225     __ cbnz(rscratch1, L2);
 226     __ stop("compiler entrypoint is null");
 227     __ bind(L2);
 228   }
 229 #endif // ASSERT
 230 
 231   // rmethod: Method*
 232   // j_rarg0: receiver
 233   address ame_addr = __ pc();
 234   __ ldr(rscratch1, Address(rmethod, Method::from_compiled_offset()));
 235   __ br(rscratch1);
 236 
 237   __ bind(L_no_such_interface);
 238   // Handle IncompatibleClassChangeError in itable stubs.
 239   // More detailed error message.
 240   // We force resolving of the call site by jumping to the "handle
 241   // wrong method" stub, and so let the interpreter runtime do all the
 242   // dirty work.
 243   assert(SharedRuntime::get_handle_wrong_method_stub() != NULL, "check initialization order");
 244   __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
 245 
 246   masm->flush();
 247   bookkeeping(masm, tty, s, npe_addr, ame_addr, false, itable_index, slop_bytes, 0);

 248 
 249   return s;
 250 }
 251 
 252 int VtableStub::pd_code_alignment() {
 253   // aarch64 cache line size is not an architected constant. We just align on 4 bytes (instruction size).
 254   const unsigned int icache_line_size = 4;
 255   return icache_line_size;
 256 }


  30 #include "interp_masm_aarch64.hpp"
  31 #include "memory/resourceArea.hpp"
  32 #include "oops/compiledICHolder.hpp"
  33 #include "oops/instanceKlass.hpp"
  34 #include "oops/klassVtable.hpp"
  35 #include "runtime/sharedRuntime.hpp"
  36 #include "vmreg_aarch64.inline.hpp"
  37 #ifdef COMPILER2
  38 #include "opto/runtime.hpp"
  39 #endif
  40 
  41 // machine-dependent part of VtableStubs: create VtableStub of correct size and
  42 // initialize its code
  43 
  44 #define __ masm->
  45 
  46 #ifndef PRODUCT
  47 extern "C" void bad_compiled_vtable_index(JavaThread* thread, oop receiver, int index);
  48 #endif
  49 
  50 VtableStub* VtableStubs::create_vtable_stub(int vtable_index, bool caller_is_c1) {
  51   // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
  52   const int stub_code_length = code_size_limit(true);
  53   VtableStub* s = new(stub_code_length) VtableStub(true, vtable_index, caller_is_c1);
  54   // Can be NULL if there is no free space in the code cache.
  55   if (s == NULL) {
  56     return NULL;
  57   }
  58 
  59   // Count unused bytes in instruction sequences of variable size.
  60   // We add them to the computed buffer size in order to avoid
  61   // overflow in subsequently generated stubs.
  62   address   start_pc;
  63   int       slop_bytes = 0;
  64   int       slop_delta = 0;
  65 
  66 // No variance was detected in vtable stub sizes. Setting index_dependent_slop == 0 will unveil any deviation from this observation.
  67   const int index_dependent_slop     = 0;
  68   ByteSize  entry_offset = caller_is_c1 ? Method::from_compiled_value_offset() :  Method::from_compiled_value_ro_offset();
  69 
  70   ResourceMark    rm;
  71   CodeBuffer      cb(s->entry_point(), stub_code_length);
  72   MacroAssembler* masm = new MacroAssembler(&cb);
  73 
  74 #if (!defined(PRODUCT) && defined(COMPILER2))
  75   if (CountCompiledCalls) {
  76     __ lea(r16, ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr()));
  77     __ incrementw(Address(r16));
  78   }
  79 #endif
  80 
  81   // get receiver (need to skip return address on top of stack)
  82   assert(VtableStub::receiver_location() == j_rarg0->as_VMReg(), "receiver expected in j_rarg0");
  83 
  84   // get receiver klass
  85   address npe_addr = __ pc();
  86   __ load_klass(r16, j_rarg0);
  87 
  88 #ifndef PRODUCT
  89   if (DebugVtables) {


 103     const ptrdiff_t codesize = __ pc() - start_pc;
 104     slop_delta  = estimate - codesize;  // call_VM varies in length, depending on data
 105     slop_bytes += slop_delta;
 106     assert(slop_delta >= 0, "vtable #%d: Code size estimate (%d) for DebugVtables too small, required: %d", vtable_index, (int)estimate, (int)codesize);
 107 
 108     __ leave();
 109     __ bind(L);
 110   }
 111 #endif // PRODUCT
 112 
 113   start_pc = __ pc();
 114   __ lookup_virtual_method(r16, vtable_index, rmethod);
 115   slop_delta  = 8 - (int)(__ pc() - start_pc);
 116   slop_bytes += slop_delta;
 117   assert(slop_delta >= 0, "negative slop(%d) encountered, adjust code size estimate!", slop_delta);
 118 
 119 #ifndef PRODUCT
 120   if (DebugVtables) {
 121     Label L;
 122     __ cbz(rmethod, L);
 123     __ ldr(rscratch1, Address(rmethod, entry_offset));
 124     __ cbnz(rscratch1, L);
 125     __ stop("Vtable entry is NULL");
 126     __ bind(L);
 127   }
 128 #endif // PRODUCT
 129 
 130   // r0: receiver klass
 131   // rmethod: Method*
 132   // r2: receiver
 133   address ame_addr = __ pc();
 134   __ ldr(rscratch1, Address(rmethod, entry_offset));
 135   __ br(rscratch1);
 136 
 137   masm->flush();
 138   slop_bytes += index_dependent_slop; // add'l slop for size variance due to large itable offsets
 139   bookkeeping(masm, tty, s, npe_addr, ame_addr, true, vtable_index, slop_bytes, index_dependent_slop);
 140 
 141   return s;
 142 }
 143 
 144 
 145 VtableStub* VtableStubs::create_itable_stub(int itable_index, bool caller_is_c1) { 
 146   // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
 147   const int stub_code_length = code_size_limit(false);
 148   VtableStub* s = new(stub_code_length) VtableStub(false, itable_index, caller_is_c1);
 149   // Can be NULL if there is no free space in the code cache.
 150   if (s == NULL) {
 151     return NULL;
 152   }
 153   // Count unused bytes in instruction sequences of variable size.
 154   // We add them to the computed buffer size in order to avoid
 155   // overflow in subsequently generated stubs.
 156   address   start_pc;
 157   int       slop_bytes = 0;
 158   int       slop_delta = 0;
 159 
 160   const int index_dependent_slop = (itable_index == 0) ? 4 :     // code size change with transition from 8-bit to 32-bit constant (@index == 16).
 161                                    (itable_index < 16) ? 3 : 0;  // index == 0 generates even shorter code.
 162   ByteSize  entry_offset = caller_is_c1 ? Method::from_compiled_value_offset() :  Method::from_compiled_value_ro_offset();
 163 
 164   ResourceMark    rm;
 165   CodeBuffer      cb(s->entry_point(), stub_code_length);
 166   MacroAssembler* masm = new MacroAssembler(&cb);
 167 
 168 #if (!defined(PRODUCT) && defined(COMPILER2))
 169   if (CountCompiledCalls) {
 170     __ lea(r10, ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr()));
 171     __ incrementw(Address(r10));
 172   }
 173 #endif
 174 
 175   // get receiver (need to skip return address on top of stack)
 176   assert(VtableStub::receiver_location() == j_rarg0->as_VMReg(), "receiver expected in j_rarg0");
 177 
 178   // Entry arguments:
 179   //  rscratch2: CompiledICHolder
 180   //  j_rarg0: Receiver
 181 
 182   // Most registers are in use; we'll use r16, rmethod, r10, r11
 183   const Register recv_klass_reg     = r10;


 213   __ load_klass(recv_klass_reg, j_rarg0);   // restore recv_klass_reg
 214   __ lookup_interface_method(// inputs: rec. class, interface, itable index
 215                              recv_klass_reg, holder_klass_reg, itable_index,
 216                              // outputs: method, scan temp. reg
 217                              rmethod, temp_reg,
 218                              L_no_such_interface);
 219 
 220   const ptrdiff_t lookupSize = __ pc() - start_pc;
 221 
 222   // Reduce "estimate" such that "padding" does not drop below 8.
 223   const ptrdiff_t estimate = 152;
 224   const ptrdiff_t codesize = typecheckSize + lookupSize;
 225   slop_delta  = (int)(estimate - codesize);
 226   slop_bytes += slop_delta;
 227   assert(slop_delta >= 0, "itable #%d: Code size estimate (%d) for lookup_interface_method too small, required: %d", itable_index, (int)estimate, (int)codesize);
 228 
 229 #ifdef ASSERT
 230   if (DebugVtables) {
 231     Label L2;
 232     __ cbz(rmethod, L2);
 233     __ ldr(rscratch1, Address(rmethod, entry_offset));
 234     __ cbnz(rscratch1, L2);
 235     __ stop("compiler entrypoint is null");
 236     __ bind(L2);
 237   }
 238 #endif // ASSERT
 239 
 240   // rmethod: Method*
 241   // j_rarg0: receiver
 242   address ame_addr = __ pc();
 243   __ ldr(rscratch1, Address(rmethod, entry_offset));
 244   __ br(rscratch1);
 245 
 246   __ bind(L_no_such_interface);
 247   // Handle IncompatibleClassChangeError in itable stubs.
 248   // More detailed error message.
 249   // We force resolving of the call site by jumping to the "handle
 250   // wrong method" stub, and so let the interpreter runtime do all the
 251   // dirty work.
 252   assert(SharedRuntime::get_handle_wrong_method_stub() != NULL, "check initialization order");
 253   __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
 254 
 255   masm->flush();
 256   slop_bytes += index_dependent_slop; // add'l slop for size variance due to large itable offsets
 257   bookkeeping(masm, tty, s, npe_addr, ame_addr, false, itable_index, slop_bytes, index_dependent_slop);
 258 
 259   return s;
 260 }
 261 
 262 int VtableStub::pd_code_alignment() {
 263   // aarch64 cache line size is not an architected constant. We just align on 4 bytes (instruction size).
 264   const unsigned int icache_line_size = 4;
 265   return icache_line_size;
 266 }
< prev index next >