1 /* 2 * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2012, 2018 SAP SE. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "code/vtableStubs.hpp" 29 #include "interp_masm_ppc.hpp" 30 #include "memory/resourceArea.hpp" 31 #include "oops/compiledICHolder.hpp" 32 #include "oops/instanceKlass.hpp" 33 #include "oops/klassVtable.hpp" 34 #include "runtime/sharedRuntime.hpp" 35 #include "vmreg_ppc.inline.hpp" 36 #ifdef COMPILER2 37 #include "opto/runtime.hpp" 38 #endif 39 40 #define __ masm-> 41 42 #ifndef PRODUCT 43 extern "C" void bad_compiled_vtable_index(JavaThread* thread, oopDesc* receiver, int index); 44 #endif 45 46 // Used by compiler only; may use only caller saved, non-argument registers. 47 VtableStub* VtableStubs::create_vtable_stub(int vtable_index) { 48 // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing. 49 const int stub_code_length = code_size_limit(true); 50 VtableStub* s = new(stub_code_length) VtableStub(true, vtable_index); 51 // Can be NULL if there is no free space in the code cache. 52 if (s == NULL) { 53 return NULL; 54 } 55 56 // Count unused bytes in instruction sequences of variable size. 57 // We add them to the computed buffer size in order to avoid 58 // overflow in subsequently generated stubs. 59 address start_pc; 60 int slop_bytes = 8; // just a two-instruction safety net 61 int slop_delta = 0; 62 63 ResourceMark rm; 64 CodeBuffer cb(s->entry_point(), stub_code_length); 65 MacroAssembler* masm = new MacroAssembler(&cb); 66 67 #if (!defined(PRODUCT) && defined(COMPILER2)) 68 if (CountCompiledCalls) { 69 start_pc = __ pc(); 70 int load_const_maxLen = 5*BytesPerInstWord; // load_const generates 5 instructions. Assume that as max size for laod_const_optimized 71 int offs = __ load_const_optimized(R11_scratch1, SharedRuntime::nof_megamorphic_calls_addr(), R12_scratch2, true); 72 slop_delta = load_const_maxLen - (__ pc() - start_pc); 73 slop_bytes += slop_delta; 74 assert(slop_delta >= 0, "negative slop(%d) encountered, adjust code size estimate!", slop_delta); 75 __ lwz(R12_scratch2, offs, R11_scratch1); 76 __ addi(R12_scratch2, R12_scratch2, 1); 77 __ stw(R12_scratch2, offs, R11_scratch1); 78 } 79 #endif 80 81 assert(VtableStub::receiver_location() == R3_ARG1->as_VMReg(), "receiver expected in R3_ARG1"); 82 83 const Register rcvr_klass = R11_scratch1; 84 address npe_addr = __ pc(); // npe = null pointer exception 85 // check if we must do an explicit check (implicit checks disabled, offset too large). 86 __ null_check(R3, oopDesc::klass_offset_in_bytes(), /*implicit only*/NULL); 87 // Get receiver klass. 88 __ load_klass(rcvr_klass, R3); 89 90 #ifndef PRODUCT 91 if (DebugVtables) { 92 Label L; 93 // Check offset vs vtable length. 94 const Register vtable_len = R12_scratch2; 95 __ lwz(vtable_len, in_bytes(Klass::vtable_length_offset()), rcvr_klass); 96 __ cmpwi(CCR0, vtable_len, vtable_index*vtableEntry::size()); 97 __ bge(CCR0, L); 98 __ li(R12_scratch2, vtable_index); 99 __ call_VM(noreg, CAST_FROM_FN_PTR(address, bad_compiled_vtable_index), R3_ARG1, R12_scratch2, false); 100 __ bind(L); 101 } 102 #endif 103 104 int entry_offset = in_bytes(Klass::vtable_start_offset()) + 105 vtable_index*vtableEntry::size_in_bytes(); 106 int v_off = entry_offset + vtableEntry::method_offset_in_bytes(); 107 108 __ ld(R19_method, (RegisterOrConstant)v_off, rcvr_klass); 109 110 #ifndef PRODUCT 111 if (DebugVtables) { 112 Label L; 113 __ cmpdi(CCR0, R19_method, 0); 114 __ bne(CCR0, L); 115 __ stop("Vtable entry is ZERO", 102); 116 __ bind(L); 117 } 118 #endif 119 120 address ame_addr = __ pc(); // ame = abstract method error 121 // if the vtable entry is null, the method is abstract 122 // NOTE: for vtable dispatches, the vtable entry will never be null. 123 124 __ null_check(R19_method, in_bytes(Method::from_compiled_offset()), /*implicit only*/NULL); 125 __ ld(R12_scratch2, in_bytes(Method::from_compiled_offset()), R19_method); 126 __ mtctr(R12_scratch2); 127 __ bctr(); 128 129 masm->flush(); 130 bookkeeping(masm, tty, s, npe_addr, ame_addr, true, vtable_index, slop_bytes, 0); 131 132 return s; 133 } 134 135 VtableStub* VtableStubs::create_itable_stub(int itable_index) { 136 // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing. 137 const int stub_code_length = code_size_limit(false); 138 VtableStub* s = new(stub_code_length) VtableStub(false, itable_index); 139 // Can be NULL if there is no free space in the code cache. 140 if (s == NULL) { 141 return NULL; 142 } 143 // Count unused bytes in instruction sequences of variable size. 144 // We add them to the computed buffer size in order to avoid 145 // overflow in subsequently generated stubs. 146 address start_pc; 147 int slop_bytes = 8; // just a two-instruction safety net 148 int slop_delta = 0; 149 150 ResourceMark rm; 151 CodeBuffer cb(s->entry_point(), stub_code_length); 152 MacroAssembler* masm = new MacroAssembler(&cb); 153 int load_const_maxLen = 5*BytesPerInstWord; // load_const generates 5 instructions. Assume that as max size for laod_const_optimized 154 155 #if (!defined(PRODUCT) && defined(COMPILER2)) 156 if (CountCompiledCalls) { 157 start_pc = __ pc(); 158 int offs = __ load_const_optimized(R11_scratch1, SharedRuntime::nof_megamorphic_calls_addr(), R12_scratch2, true); 159 slop_delta = load_const_maxLen - (__ pc() - start_pc); 160 slop_bytes += slop_delta; 161 assert(slop_delta >= 0, "negative slop(%d) encountered, adjust code size estimate!", slop_delta); 162 __ lwz(R12_scratch2, offs, R11_scratch1); 163 __ addi(R12_scratch2, R12_scratch2, 1); 164 __ stw(R12_scratch2, offs, R11_scratch1); 165 } 166 #endif 167 168 assert(VtableStub::receiver_location() == R3_ARG1->as_VMReg(), "receiver expected in R3_ARG1"); 169 170 // Entry arguments: 171 // R19_method: Interface 172 // R3_ARG1: Receiver 173 174 Label L_no_such_interface; 175 const Register rcvr_klass = R11_scratch1, 176 interface = R12_scratch2, 177 tmp1 = R21_tmp1, 178 tmp2 = R22_tmp2; 179 180 address npe_addr = __ pc(); // npe = null pointer exception 181 __ null_check(R3_ARG1, oopDesc::klass_offset_in_bytes(), /*implicit only*/NULL); 182 __ load_klass(rcvr_klass, R3_ARG1); 183 184 // Receiver subtype check against REFC. 185 __ ld(interface, CompiledICHolder::holder_klass_offset(), R19_method); 186 __ lookup_interface_method(rcvr_klass, interface, noreg, 187 R0, tmp1, tmp2, 188 L_no_such_interface, /*return_method=*/ false); 189 190 // Get Method* and entrypoint for compiler 191 __ ld(interface, CompiledICHolder::holder_metadata_offset(), R19_method); 192 __ lookup_interface_method(rcvr_klass, interface, itable_index, 193 R19_method, tmp1, tmp2, 194 L_no_such_interface, /*return_method=*/ true); 195 196 #ifndef PRODUCT 197 if (DebugVtables) { 198 Label ok; 199 __ cmpd(CCR0, R19_method, 0); 200 __ bne(CCR0, ok); 201 __ stop("method is null", 103); 202 __ bind(ok); 203 } 204 #endif 205 206 // If the vtable entry is null, the method is abstract. 207 address ame_addr = __ pc(); // ame = abstract method error 208 209 // Must do an explicit check if implicit checks are disabled. 210 __ null_check(R19_method, in_bytes(Method::from_compiled_offset()), &L_no_such_interface); 211 __ ld(R12_scratch2, in_bytes(Method::from_compiled_offset()), R19_method); 212 __ mtctr(R12_scratch2); 213 __ bctr(); 214 215 // Handle IncompatibleClassChangeError in itable stubs. 216 // More detailed error message. 217 // We force resolving of the call site by jumping to the "handle 218 // wrong method" stub, and so let the interpreter runtime do all the 219 // dirty work. 220 __ bind(L_no_such_interface); 221 start_pc = __ pc(); 222 __ load_const_optimized(R11_scratch1, SharedRuntime::get_handle_wrong_method_stub(), R12_scratch2); 223 slop_delta = load_const_maxLen - (__ pc() - start_pc); 224 slop_bytes += slop_delta; 225 assert(slop_delta >= 0, "negative slop(%d) encountered, adjust code size estimate!", slop_delta); 226 __ mtctr(R11_scratch1); 227 __ bctr(); 228 229 masm->flush(); 230 bookkeeping(masm, tty, s, npe_addr, ame_addr, false, itable_index, slop_bytes, 0); 231 232 return s; 233 } 234 235 int VtableStub::pd_code_alignment() { 236 // Power cache line size is 128 bytes, but we want to limit alignment loss. 237 const unsigned int icache_line_size = 32; 238 return icache_line_size; 239 }