1 /* 2 * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.inline.hpp" 27 #include "code/vtableStubs.hpp" 28 #include "interp_masm_sparc.hpp" 29 #include "memory/resourceArea.hpp" 30 #include "oops/compiledICHolder.hpp" 31 #include "oops/instanceKlass.hpp" 32 #include "oops/klassVtable.hpp" 33 #include "runtime/sharedRuntime.hpp" 34 #include "vmreg_sparc.inline.hpp" 35 #ifdef COMPILER2 36 #include "opto/runtime.hpp" 37 #endif 38 39 // machine-dependent part of VtableStubs: create vtableStub of correct size and 40 // initialize its code 41 42 #define __ masm-> 43 44 45 #ifndef PRODUCT 46 extern "C" void bad_compiled_vtable_index(JavaThread* thread, oopDesc* receiver, int index); 47 #endif 48 49 50 // Used by compiler only; may use only caller saved, non-argument registers 51 // NOTE: %%%% if any change is made to this stub make sure that the function 52 // pd_code_size_limit is changed to ensure the correct size for VtableStub 53 VtableStub* VtableStubs::create_vtable_stub(int vtable_index) { 54 const int sparc_code_length = VtableStub::pd_code_size_limit(true); 55 VtableStub* s = new(sparc_code_length) VtableStub(true, vtable_index); 56 // Can be NULL if there is no free space in the code cache. 57 if (s == NULL) { 58 return NULL; 59 } 60 61 ResourceMark rm; 62 CodeBuffer cb(s->entry_point(), sparc_code_length); 63 MacroAssembler* masm = new MacroAssembler(&cb); 64 65 #ifndef PRODUCT 66 if (CountCompiledCalls) { 67 __ inc_counter(SharedRuntime::nof_megamorphic_calls_addr(), G5, G3_scratch); 68 } 69 #endif /* PRODUCT */ 70 71 assert(VtableStub::receiver_location() == O0->as_VMReg(), "receiver expected in O0"); 72 73 // get receiver klass 74 address npe_addr = __ pc(); 75 __ load_klass(O0, G3_scratch); 76 77 // set Method* (in case of interpreted method), and destination address 78 #ifndef PRODUCT 79 if (DebugVtables) { 80 Label L; 81 // check offset vs vtable length 82 __ ld(G3_scratch, in_bytes(Klass::vtable_length_offset()), G5); 83 __ cmp_and_br_short(G5, vtable_index*vtableEntry::size(), Assembler::greaterUnsigned, Assembler::pt, L); 84 __ set(vtable_index, O2); 85 __ call_VM(noreg, CAST_FROM_FN_PTR(address, bad_compiled_vtable_index), O0, O2); 86 __ bind(L); 87 } 88 #endif 89 90 __ lookup_virtual_method(G3_scratch, vtable_index, G5_method); 91 92 #ifndef PRODUCT 93 if (DebugVtables) { 94 Label L; 95 __ br_notnull_short(G5_method, Assembler::pt, L); 96 __ stop("Vtable entry is ZERO"); 97 __ bind(L); 98 } 99 #endif 100 101 address ame_addr = __ pc(); // if the vtable entry is null, the method is abstract 102 // NOTE: for vtable dispatches, the vtable entry will never be null. 103 104 __ ld_ptr(G5_method, in_bytes(Method::from_compiled_offset()), G3_scratch); 105 106 // jump to target (either compiled code or c2iadapter) 107 __ JMP(G3_scratch, 0); 108 // load Method* (in case we call c2iadapter) 109 __ delayed()->nop(); 110 111 masm->flush(); 112 113 if (PrintMiscellaneous && (WizardMode || Verbose)) { 114 tty->print_cr("vtable #%d at " PTR_FORMAT "[%d] left over: %d", 115 vtable_index, p2i(s->entry_point()), 116 (int)(s->code_end() - s->entry_point()), 117 (int)(s->code_end() - __ pc())); 118 } 119 guarantee(__ pc() <= s->code_end(), "overflowed buffer"); 120 // shut the door on sizing bugs 121 int slop = 2*BytesPerInstWord; // 32-bit offset is this much larger than a 13-bit one 122 assert(vtable_index > 10 || __ pc() + slop <= s->code_end(), "room for sethi;add"); 123 124 s->set_exception_points(npe_addr, ame_addr); 125 return s; 126 } 127 128 129 // NOTE: %%%% if any change is made to this stub make sure that the function 130 // pd_code_size_limit is changed to ensure the correct size for VtableStub 131 VtableStub* VtableStubs::create_itable_stub(int itable_index) { 132 const int sparc_code_length = VtableStub::pd_code_size_limit(false); 133 VtableStub* s = new(sparc_code_length) VtableStub(false, itable_index); 134 // Can be NULL if there is no free space in the code cache. 135 if (s == NULL) { 136 return NULL; 137 } 138 139 ResourceMark rm; 140 CodeBuffer cb(s->entry_point(), sparc_code_length); 141 MacroAssembler* masm = new MacroAssembler(&cb); 142 143 Register G3_Klass = G3_scratch; 144 Register G5_icholder = G5; // Passed in as an argument 145 Register G4_interface = G4_scratch; 146 Label search; 147 148 // Entry arguments: 149 // G5_interface: Interface 150 // O0: Receiver 151 assert(VtableStub::receiver_location() == O0->as_VMReg(), "receiver expected in O0"); 152 153 // get receiver klass (also an implicit null-check) 154 address npe_addr = __ pc(); 155 __ load_klass(O0, G3_Klass); 156 157 // Push a new window to get some temp registers. This chops the head of all 158 // my 64-bit %o registers in the LION build, but this is OK because no longs 159 // are passed in the %o registers. Instead, longs are passed in G1 and G4 160 // and so those registers are not available here. 161 __ save(SP,-frame::register_save_words*wordSize,SP); 162 163 #ifndef PRODUCT 164 if (CountCompiledCalls) { 165 __ inc_counter(SharedRuntime::nof_megamorphic_calls_addr(), L0, L1); 166 } 167 #endif /* PRODUCT */ 168 169 Label L_no_such_interface; 170 171 Register L5_method = L5; 172 173 // Receiver subtype check against REFC. 174 __ ld_ptr(G5_icholder, CompiledICHolder::holder_klass_offset(), G4_interface); 175 __ lookup_interface_method(// inputs: rec. class, interface, itable index 176 G3_Klass, G4_interface, itable_index, 177 // outputs: scan temp. reg1, scan temp. reg2 178 L5_method, L2, L3, 179 L_no_such_interface, 180 /*return_method=*/ false); 181 182 // Get Method* and entrypoint for compiler 183 __ ld_ptr(G5_icholder, CompiledICHolder::holder_metadata_offset(), G4_interface); 184 __ lookup_interface_method(// inputs: rec. class, interface, itable index 185 G3_Klass, G4_interface, itable_index, 186 // outputs: method, scan temp. reg 187 L5_method, L2, L3, 188 L_no_such_interface); 189 190 #ifndef PRODUCT 191 if (DebugVtables) { 192 Label L01; 193 __ br_notnull_short(L5_method, Assembler::pt, L01); 194 __ stop("Method* is null"); 195 __ bind(L01); 196 } 197 #endif 198 199 // If the following load is through a NULL pointer, we'll take an OS 200 // exception that should translate into an AbstractMethodError. We need the 201 // window count to be correct at that time. 202 __ restore(L5_method, 0, G5_method); 203 // Restore registers *before* the AME point. 204 205 address ame_addr = __ pc(); // if the vtable entry is null, the method is abstract 206 __ ld_ptr(G5_method, in_bytes(Method::from_compiled_offset()), G3_scratch); 207 208 // G5_method: Method* 209 // O0: Receiver 210 // G3_scratch: entry point 211 __ JMP(G3_scratch, 0); 212 __ delayed()->nop(); 213 214 __ bind(L_no_such_interface); 215 // Handle IncompatibleClassChangeError in itable stubs. 216 // More detailed error message. 217 // We force resolving of the call site by jumping to the "handle 218 // wrong method" stub, and so let the interpreter runtime do all the 219 // dirty work. 220 AddressLiteral icce(SharedRuntime::get_handle_wrong_method_stub()); 221 __ jump_to(icce, G3_scratch); 222 __ delayed()->restore(); 223 224 masm->flush(); 225 226 if (PrintMiscellaneous && (WizardMode || Verbose)) { 227 tty->print_cr("itable #%d at " PTR_FORMAT "[%d] left over: %d", 228 itable_index, p2i(s->entry_point()), 229 (int)(s->code_end() - s->entry_point()), 230 (int)(s->code_end() - __ pc())); 231 } 232 guarantee(__ pc() <= s->code_end(), "overflowed buffer"); 233 // shut the door on sizing bugs 234 int slop = 2*BytesPerInstWord; // 32-bit offset is this much larger than a 13-bit one 235 assert(itable_index > 10 || __ pc() + slop <= s->code_end(), "room for sethi;add"); 236 237 s->set_exception_points(npe_addr, ame_addr); 238 return s; 239 } 240 241 242 int VtableStub::pd_code_size_limit(bool is_vtable_stub) { 243 if (DebugVtables || CountCompiledCalls || VerifyOops) return 1000; 244 else { 245 const int slop = 2*BytesPerInstWord; // sethi;add (needed for long offsets) 246 if (is_vtable_stub) { 247 // ld;ld;ld,jmp,nop 248 const int basic = 5*BytesPerInstWord + 249 // shift;add for load_klass (only shift with zero heap based) 250 (UseCompressedClassPointers ? 251 MacroAssembler::instr_size_for_decode_klass_not_null() : 0); 252 return basic + slop; 253 } else { 254 const int basic = 54 * BytesPerInstWord + 255 // shift;add for load_klass (only shift with zero heap based) 256 (UseCompressedClassPointers ? 257 MacroAssembler::instr_size_for_decode_klass_not_null() : 0); 258 return (basic + slop); 259 } 260 } 261 262 // In order to tune these parameters, run the JVM with VM options 263 // +PrintMiscellaneous and +WizardMode to see information about 264 // actual itable stubs. Look for lines like this: 265 // itable #1 at 0x5551212[116] left over: 8 266 // Reduce the constants so that the "left over" number is 8 267 // Do not aim at a left-over number of zero, because a very 268 // large vtable or itable offset (> 4K) will require an extra 269 // sethi/or pair of instructions. 270 // 271 // The JVM98 app. _202_jess has a megamorphic interface call. 272 // The itable code looks like this: 273 // Decoding VtableStub itbl[1]@16 274 // ld [ %o0 + 4 ], %g3 275 // save %sp, -64, %sp 276 // ld [ %g3 + 0xe8 ], %l2 277 // sll %l2, 2, %l2 278 // add %l2, 0x134, %l2 279 // add %g3, %l2, %l2 280 // add %g3, 4, %g3 281 // ld [ %l2 ], %l5 282 // brz,pn %l5, throw_icce 283 // cmp %l5, %g5 284 // be %icc, success 285 // add %l2, 8, %l2 286 // loop: 287 // ld [ %l2 ], %l5 288 // brz,pn %l5, throw_icce 289 // cmp %l5, %g5 290 // bne,pn %icc, loop 291 // add %l2, 8, %l2 292 // success: 293 // ld [ %l2 + -4 ], %l2 294 // ld [ %g3 + %l2 ], %l5 295 // restore %l5, 0, %g5 296 // ld [ %g5 + 0x44 ], %g3 297 // jmp %g3 298 // nop 299 // throw_icce: 300 // sethi %hi(throw_ICCE_entry), %g3 301 // ! 5 more instructions here, LP64_ONLY 302 // jmp %g3 + %lo(throw_ICCE_entry) 303 // restore 304 } 305 306 307 int VtableStub::pd_code_alignment() { 308 // UltraSPARC cache line size is 8 instructions: 309 const unsigned int icache_line_size = 32; 310 return icache_line_size; 311 }