< prev index next >

src/hotspot/cpu/x86/vtableStubs_x86_32.cpp

Print this page
rev 51258 : [mq]: 8207343.patch
   1 /*
   2  * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *


  38 
  39 // machine-dependent part of VtableStubs: create VtableStub of correct size and
  40 // initialize its code
  41 
  42 #define __ masm->
  43 
  44 #ifndef PRODUCT
  45 extern "C" void bad_compiled_vtable_index(JavaThread* thread, oop receiver, int index);
  46 #endif
  47 
  48 // These stubs are used by the compiler only.
  49 // Argument registers, which must be preserved:
  50 //   rcx - receiver (always first argument)
  51 //   rdx - second argument (if any)
  52 // Other registers that might be usable:
  53 //   rax - inline cache register (is interface for itable stub)
  54 //   rbx - method (used when calling out to interpreter)
  55 // Available now, but may become callee-save at some point:
  56 //   rsi, rdi
  57 // Note that rax and rdx are also used for return values.
  58 //
  59 VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
  60   const int i486_code_length = VtableStub::pd_code_size_limit(true);
  61   VtableStub* s = new(i486_code_length) VtableStub(true, vtable_index);

  62   // Can be NULL if there is no free space in the code cache.
  63   if (s == NULL) {
  64     return NULL;
  65   }
  66 











  67   ResourceMark rm;
  68   CodeBuffer cb(s->entry_point(), i486_code_length);
  69   MacroAssembler* masm = new MacroAssembler(&cb);
  70 
  71 #ifndef PRODUCT
  72 
  73   if (CountCompiledCalls) {
  74     __ incrementl(ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr()));
  75   }
  76 #endif /* PRODUCT */
  77 
  78   // get receiver (need to skip return address on top of stack)
  79   assert(VtableStub::receiver_location() == rcx->as_VMReg(), "receiver expected in rcx");
  80 
  81   // get receiver klass
  82   address npe_addr = __ pc();
  83   __ movptr(rax, Address(rcx, oopDesc::klass_offset_in_bytes()));
  84 
  85 #ifndef PRODUCT
  86   if (DebugVtables) {
  87     Label L;

  88     // check offset vs vtable length
  89     __ cmpl(Address(rax, Klass::vtable_length_offset()), vtable_index*vtableEntry::size());




  90     __ jcc(Assembler::greater, L);
  91     __ movl(rbx, vtable_index);


  92     __ call_VM(noreg, CAST_FROM_FN_PTR(address, bad_compiled_vtable_index), rcx, rbx);



  93     __ bind(L);
  94   }
  95 #endif // PRODUCT
  96 
  97   const Register method = rbx;
  98 
  99   // load Method* and target address

 100   __ lookup_virtual_method(rax, vtable_index, method);



 101 

 102   if (DebugVtables) {
 103     Label L;
 104     __ cmpptr(method, (int32_t)NULL_WORD);
 105     __ jcc(Assembler::equal, L);
 106     __ cmpptr(Address(method, Method::from_compiled_offset()), (int32_t)NULL_WORD);
 107     __ jcc(Assembler::notZero, L);
 108     __ stop("Vtable entry is NULL");
 109     __ bind(L);
 110   }

 111 
 112   // rax,: receiver klass
 113   // method (rbx): Method*
 114   // rcx: receiver
 115   address ame_addr = __ pc();
 116   __ jmp( Address(method, Method::from_compiled_offset()));
 117 
 118   masm->flush();


 119 
 120   if (PrintMiscellaneous && (WizardMode || Verbose)) {
 121     tty->print_cr("vtable #%d at " PTR_FORMAT "[%d] left over: %d",
 122                   vtable_index, p2i(s->entry_point()),
 123                   (int)(s->code_end() - s->entry_point()),
 124                   (int)(s->code_end() - __ pc()));
 125   }
 126   guarantee(__ pc() <= s->code_end(), "overflowed buffer");
 127   // shut the door on sizing bugs
 128   int slop = 3;  // 32-bit offset is this much larger than an 8-bit one
 129   assert(vtable_index > 10 || __ pc() + slop <= s->code_end(), "room for 32-bit offset");
 130 
 131   s->set_exception_points(npe_addr, ame_addr);
 132   return s;
 133 }
 134 
 135 
 136 VtableStub* VtableStubs::create_itable_stub(int itable_index) {
 137   // Note well: pd_code_size_limit is the absolute minimum we can get away with.  If you
 138   //            add code here, bump the code stub size returned by pd_code_size_limit!
 139   const int i486_code_length = VtableStub::pd_code_size_limit(false);
 140   VtableStub* s = new(i486_code_length) VtableStub(false, itable_index);
 141   // Can be NULL if there is no free space in the code cache.
 142   if (s == NULL) {
 143     return NULL;
 144   }








 145 
 146   ResourceMark rm;
 147   CodeBuffer cb(s->entry_point(), i486_code_length);
 148   MacroAssembler* masm = new MacroAssembler(&cb);
 149 
 150   // Entry arguments:
 151   //  rax: CompiledICHolder
 152   //  rcx: Receiver
 153 
 154 #ifndef PRODUCT
 155   if (CountCompiledCalls) {
 156     __ incrementl(ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr()));
 157   }
 158 #endif /* PRODUCT */
 159 




 160   // Most registers are in use; we'll use rax, rbx, rsi, rdi
 161   // (If we need to make rsi, rdi callee-save, do a push/pop here.)
 162   const Register recv_klass_reg     = rsi;
 163   const Register holder_klass_reg   = rax; // declaring interface klass (DECC)
 164   const Register resolved_klass_reg = rbx; // resolved interface klass (REFC)
 165   const Register temp_reg           = rdi;
 166 
 167   const Register icholder_reg = rax;
 168   __ movptr(resolved_klass_reg, Address(icholder_reg, CompiledICHolder::holder_klass_offset()));
 169   __ movptr(holder_klass_reg,   Address(icholder_reg, CompiledICHolder::holder_metadata_offset()));
 170 
 171   Label L_no_such_interface;
 172 
 173   // get receiver klass (also an implicit null-check)
 174   address npe_addr = __ pc();
 175   assert(VtableStub::receiver_location() ==  rcx->as_VMReg(), "receiver expected in  rcx");

 176   __ load_klass(recv_klass_reg, rcx);
 177 


 178   // Receiver subtype check against REFC.
 179   // Destroys recv_klass_reg value.
 180   __ lookup_interface_method(// inputs: rec. class, interface
 181                              recv_klass_reg, resolved_klass_reg, noreg,
 182                              // outputs:  scan temp. reg1, scan temp. reg2
 183                              recv_klass_reg, temp_reg,
 184                              L_no_such_interface,
 185                              /*return_method=*/false);
 186 



 187   // Get selected method from declaring class and itable index
 188   const Register method = rbx;
 189   __ load_klass(recv_klass_reg, rcx); // restore recv_klass_reg
 190   __ lookup_interface_method(// inputs: rec. class, interface, itable index
 191                              recv_klass_reg, holder_klass_reg, itable_index,
 192                              // outputs: method, scan temp. reg
 193                              method, temp_reg,
 194                              L_no_such_interface);
 195 











 196   // method (rbx): Method*
 197   // rcx: receiver
 198 
 199 #ifdef ASSERT
 200   if (DebugVtables) {
 201       Label L1;
 202       __ cmpptr(method, (int32_t)NULL_WORD);
 203       __ jcc(Assembler::equal, L1);
 204       __ cmpptr(Address(method, Method::from_compiled_offset()), (int32_t)NULL_WORD);
 205       __ jcc(Assembler::notZero, L1);
 206       __ stop("Method* is null");
 207       __ bind(L1);
 208     }
 209 #endif // ASSERT
 210 
 211   address ame_addr = __ pc();
 212   __ jmp(Address(method, Method::from_compiled_offset()));
 213 
 214   __ bind(L_no_such_interface);
 215   // Handle IncompatibleClassChangeError in itable stubs.
 216   // More detailed error message.
 217   // We force resolving of the call site by jumping to the "handle
 218   // wrong method" stub, and so let the interpreter runtime do all the
 219   // dirty work.
 220   __ jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
 221 
 222   __ flush();
 223 
 224   if (PrintMiscellaneous && (WizardMode || Verbose)) {
 225     tty->print_cr("itable #%d at " PTR_FORMAT "[%d] left over: %d",
 226                   itable_index, p2i(s->entry_point()),
 227                   (int)(s->code_end() - s->entry_point()),
 228                   (int)(s->code_end() - __ pc()));
 229   }
 230   guarantee(__ pc() <= s->code_end(), "overflowed buffer");
 231   // shut the door on sizing bugs
 232   int slop = 3;  // 32-bit offset is this much larger than an 8-bit one
 233   assert(itable_index > 10 || __ pc() + slop <= s->code_end(), "room for 32-bit offset");
 234 
 235   s->set_exception_points(npe_addr, ame_addr);
 236   return s;
 237 }
 238 
 239 
 240 
 241 int VtableStub::pd_code_size_limit(bool is_vtable_stub) {
 242   if (is_vtable_stub) {
 243     // Vtable stub size
 244     return (DebugVtables ? 210 : 16) + (CountCompiledCalls ? 6 : 0);
 245   } else {
 246     // Itable stub size
 247     return (DebugVtables ? 256 : 110) + (CountCompiledCalls ? 6 : 0);
 248   }
 249   // In order to tune these parameters, run the JVM with VM options
 250   // +PrintMiscellaneous and +WizardMode to see information about
 251   // actual itable stubs.  Look for lines like this:
 252   //   itable #1 at 0x5551212[65] left over: 3
 253   // Reduce the constants so that the "left over" number is >=3
 254   // for the common cases.
 255   // Do not aim at a left-over number of zero, because a
 256   // large vtable or itable index (> 16) will require a 32-bit
 257   // immediate displacement instead of an 8-bit one.
 258   //
 259   // The JVM98 app. _202_jess has a megamorphic interface call.
 260   // The itable code looks like this:
 261   // Decoding VtableStub itbl[1]@1
 262   //   mov    0x4(%ecx),%esi
 263   //   mov    0xe8(%esi),%edi
 264   //   lea    0x130(%esi,%edi,4),%edi
 265   //   add    $0x7,%edi
 266   //   and    $0xfffffff8,%edi
 267   //   lea    0x4(%esi),%esi
 268   //   mov    (%edi),%ebx
 269   //   cmp    %ebx,%eax
 270   //   je     success
 271   // loop:
 272   //   test   %ebx,%ebx
 273   //   je     throw_icce
 274   //   add    $0x8,%edi
 275   //   mov    (%edi),%ebx
 276   //   cmp    %ebx,%eax
 277   //   jne    loop
 278   // success:
 279   //   mov    0x4(%edi),%edi
 280   //   mov    (%esi,%edi,1),%ebx
 281   //   jmp    *0x44(%ebx)
 282   // throw_icce:
 283   //   jmp    throw_ICCE_entry
 284 }
 285 
 286 int VtableStub::pd_code_alignment() {
 287   return wordSize;


 288 }
   1 /*
   2  * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *


  38 
  39 // machine-dependent part of VtableStubs: create VtableStub of correct size and
  40 // initialize its code
  41 
  42 #define __ masm->
  43 
  44 #ifndef PRODUCT
  45 extern "C" void bad_compiled_vtable_index(JavaThread* thread, oop receiver, int index);
  46 #endif
  47 
  48 // These stubs are used by the compiler only.
  49 // Argument registers, which must be preserved:
  50 //   rcx - receiver (always first argument)
  51 //   rdx - second argument (if any)
  52 // Other registers that might be usable:
  53 //   rax - inline cache register (is interface for itable stub)
  54 //   rbx - method (used when calling out to interpreter)
  55 // Available now, but may become callee-save at some point:
  56 //   rsi, rdi
  57 // Note that rax and rdx are also used for return values.
  58 
  59 VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
  60   // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
  61   const int stub_code_length = VtableStub::code_size_limit(true);
  62   VtableStub* s = new(stub_code_length) VtableStub(true, vtable_index);
  63   // Can be NULL if there is no free space in the code cache.
  64   if (s == NULL) {
  65     return NULL;
  66   }
  67 
  68   // Count unused bytes in instruction sequences of variable size.
  69   // We add them to the computed buffer size in order to avoid
  70   // overflow in subsequently generated stubs.
  71   address   start_pc;
  72   int       slop_bytes = 0;
  73   int       slop_delta = 0;
  74   // No variance was detected in vtable stub sizes. Setting slop32 == 0 will unveil any deviation from this observation.
  75   const int slop32     = 0;
  76 //  const int slop32     = (vtable_index == 0) ? 4 :     // code size change with transition from 8-bit to 32-bit constant (@index == 32).
  77 //                         (vtable_index < 32) ? 3 : 0;  // index == 0 generates even shorter code.
  78 
  79   ResourceMark    rm;
  80   CodeBuffer      cb(s->entry_point(), stub_code_length);
  81   MacroAssembler* masm = new MacroAssembler(&cb);
  82 
  83 #if (!defined(PRODUCT) && defined(COMPILER2))

  84   if (CountCompiledCalls) {
  85     __ incrementl(ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr()));
  86   }
  87 #endif
  88 
  89   // get receiver (need to skip return address on top of stack)
  90   assert(VtableStub::receiver_location() == rcx->as_VMReg(), "receiver expected in rcx");
  91 
  92   // get receiver klass
  93   address npe_addr = __ pc();
  94   __ movptr(rax, Address(rcx, oopDesc::klass_offset_in_bytes()));
  95 
  96 #ifndef PRODUCT
  97   if (DebugVtables) {
  98     Label L;
  99     start_pc = __ pc();
 100     // check offset vs vtable length
 101     __ cmpl(Address(rax, Klass::vtable_length_offset()), vtable_index*vtableEntry::size());
 102     slop_delta  = 6 - (__ pc() - start_pc);  // cmpl varies in length, depending on data
 103     slop_bytes += slop_delta;
 104     assert(slop_delta >= 0, "negative slop(%d) encountered, adjust code size estimate!", slop_delta);
 105 
 106     __ jcc(Assembler::greater, L);
 107     __ movl(rbx, vtable_index);
 108     // VTABLE TODO: find upper bound for call_VM length.
 109     start_pc = __ pc();
 110     __ call_VM(noreg, CAST_FROM_FN_PTR(address, bad_compiled_vtable_index), rcx, rbx);
 111     slop_delta  = 470 - (__ pc() - start_pc);  // cmpl varies in length, depending on data
 112     slop_bytes += slop_delta;
 113     assert(slop_delta >= 0, "negative slop(%d) encountered, adjust code size estimate!", slop_delta);
 114     __ bind(L);
 115   }
 116 #endif // PRODUCT
 117 
 118   const Register method = rbx;
 119 
 120   // load Method* and target address
 121   start_pc = __ pc();
 122   __ lookup_virtual_method(rax, vtable_index, method);
 123   slop_delta  = 6 - (int)(__ pc() - start_pc);
 124   slop_bytes += slop_delta;
 125   assert(slop_delta >= 0, "negative slop(%d) encountered, adjust code size estimate!", slop_delta);
 126 
 127 #ifndef PRODUCT
 128   if (DebugVtables) {
 129     Label L;
 130     __ cmpptr(method, (int32_t)NULL_WORD);
 131     __ jcc(Assembler::equal, L);
 132     __ cmpptr(Address(method, Method::from_compiled_offset()), (int32_t)NULL_WORD);
 133     __ jcc(Assembler::notZero, L);
 134     __ stop("Vtable entry is NULL");
 135     __ bind(L);
 136   }
 137 #endif // PRODUCT
 138 
 139   // rax: receiver klass
 140   // method (rbx): Method*
 141   // rcx: receiver
 142   address ame_addr = __ pc();
 143   __ jmp( Address(method, Method::from_compiled_offset()));
 144 
 145   masm->flush();
 146   slop_bytes += slop32; // add'l slop for size variance due to large itable offsets
 147   bookkeeping(masm, tty, s, npe_addr, ame_addr, true, vtable_index, slop_bytes, slop32);
 148 












 149   return s;
 150 }
 151 
 152 
 153 VtableStub* VtableStubs::create_itable_stub(int itable_index) {
 154   // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
 155   const int stub_code_length = VtableStub::code_size_limit(false);
 156   VtableStub* s = new(stub_code_length) VtableStub(false, itable_index);

 157   // Can be NULL if there is no free space in the code cache.
 158   if (s == NULL) {
 159     return NULL;
 160   }
 161   // Count unused bytes in instruction sequences of variable size.
 162   // We add them to the computed buffer size in order to avoid
 163   // overflow in subsequently generated stubs.
 164   address   start_pc;
 165   int       slop_bytes = 0;
 166   int       slop_delta = 0;
 167   const int slop32     = (itable_index == 0) ? 4 :     // code size change with transition from 8-bit to 32-bit constant (@index == 32).
 168                          (itable_index < 32) ? 3 : 0;  // index == 0 generates even shorter code.
 169 
 170   ResourceMark    rm;
 171   CodeBuffer      cb(s->entry_point(), stub_code_length);
 172   MacroAssembler* masm = new MacroAssembler(&cb);
 173 
 174 #if (!defined(PRODUCT) && defined(COMPILER2))




 175   if (CountCompiledCalls) {
 176     __ incrementl(ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr()));
 177   }
 178 #endif /* PRODUCT */
 179 
 180   // Entry arguments:
 181   //  rax: CompiledICHolder
 182   //  rcx: Receiver
 183 
 184   // Most registers are in use; we'll use rax, rbx, rsi, rdi
 185   // (If we need to make rsi, rdi callee-save, do a push/pop here.)
 186   const Register recv_klass_reg     = rsi;
 187   const Register holder_klass_reg   = rax; // declaring interface klass (DECC)
 188   const Register resolved_klass_reg = rbx; // resolved interface klass (REFC)
 189   const Register temp_reg           = rdi;
 190 
 191   const Register icholder_reg = rax;
 192   __ movptr(resolved_klass_reg, Address(icholder_reg, CompiledICHolder::holder_klass_offset()));
 193   __ movptr(holder_klass_reg,   Address(icholder_reg, CompiledICHolder::holder_metadata_offset()));
 194 
 195   Label L_no_such_interface;
 196 
 197   // get receiver klass (also an implicit null-check)

 198   assert(VtableStub::receiver_location() ==  rcx->as_VMReg(), "receiver expected in  rcx");
 199   address npe_addr = __ pc();
 200   __ load_klass(recv_klass_reg, rcx);
 201 
 202   start_pc = __ pc();
 203 
 204   // Receiver subtype check against REFC.
 205   // Destroys recv_klass_reg value.
 206   __ lookup_interface_method(// inputs: rec. class, interface
 207                              recv_klass_reg, resolved_klass_reg, noreg,
 208                              // outputs:  scan temp. reg1, scan temp. reg2
 209                              recv_klass_reg, temp_reg,
 210                              L_no_such_interface,
 211                              /*return_method=*/false);
 212 
 213   const ptrdiff_t  typecheckSize = __ pc() - start_pc;
 214   start_pc = __ pc();
 215 
 216   // Get selected method from declaring class and itable index
 217   const Register method = rbx;
 218   __ load_klass(recv_klass_reg, rcx); // restore recv_klass_reg
 219   __ lookup_interface_method(// inputs: rec. class, interface, itable index
 220                              recv_klass_reg, holder_klass_reg, itable_index,
 221                              // outputs: method, scan temp. reg
 222                              method, temp_reg,
 223                              L_no_such_interface);
 224 
 225   const ptrdiff_t  lookupSize = __ pc() - start_pc;
 226 
 227   // We expect we need slop32 extra bytes. Reason:
 228   // The emitted code in lookup_interface_method changes when itable_index exceeds 31.
 229   // For windows, a narrow estimate was found to be 104. Other OSes not tested.
 230   const ptrdiff_t estimate = 104;
 231   const ptrdiff_t codesize = typecheckSize + lookupSize + slop32;
 232   slop_delta  = (int)(estimate - codesize);
 233   slop_bytes += slop_delta;
 234   assert(slop_delta >= 0, "itable #%d: Code size estimate (%d) for lookup_interface_method too small, required: %d", itable_index, (int)estimate, (int)codesize);
 235 
 236   // method (rbx): Method*
 237   // rcx: receiver
 238 
 239 #ifdef ASSERT
 240   if (DebugVtables) {
 241     Label L1;
 242     __ cmpptr(method, (int32_t)NULL_WORD);
 243     __ jcc(Assembler::equal, L1);
 244     __ cmpptr(Address(method, Method::from_compiled_offset()), (int32_t)NULL_WORD);
 245     __ jcc(Assembler::notZero, L1);
 246     __ stop("Method* is null");
 247     __ bind(L1);
 248   }
 249 #endif // ASSERT
 250 
 251   address ame_addr = __ pc();
 252   __ jmp(Address(method, Method::from_compiled_offset()));
 253 
 254   __ bind(L_no_such_interface);
 255   // Handle IncompatibleClassChangeError in itable stubs.
 256   // More detailed error message.
 257   // We force resolving of the call site by jumping to the "handle
 258   // wrong method" stub, and so let the interpreter runtime do all the
 259   // dirty work.
 260   __ jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
 261 
 262   masm->flush();
 263   slop_bytes += slop32; // add'l slop for size variance due to large itable offsets
 264   bookkeeping(masm, tty, s, npe_addr, ame_addr, false, itable_index, slop_bytes, slop32);









 265 

 266   return s;
 267 }
 268 















































 269 int VtableStub::pd_code_alignment() {
 270   // x86 cache line size is 64 bytes, but we want to limit alignment loss.
 271   const unsigned int icache_line_size = wordSize;
 272   return icache_line_size;
 273 }
< prev index next >