1 /* 2 * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2014, Red Hat Inc. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "c1/c1_CodeStubs.hpp" 28 #include "c1/c1_FrameMap.hpp" 29 #include "c1/c1_LIRAssembler.hpp" 30 #include "c1/c1_MacroAssembler.hpp" 31 #include "c1/c1_Runtime1.hpp" 32 #include "nativeInst_aarch64.hpp" 33 #include "runtime/sharedRuntime.hpp" 34 #include "vmreg_aarch64.inline.hpp" 35 #if INCLUDE_ALL_GCS 36 #include "gc/g1/g1BarrierSet.hpp" 37 #endif 38 39 40 #define __ ce->masm()-> 41 42 void CounterOverflowStub::emit_code(LIR_Assembler* ce) { 43 __ bind(_entry); 44 Metadata *m = _method->as_constant_ptr()->as_metadata(); 45 __ mov_metadata(rscratch1, m); 46 ce->store_parameter(rscratch1, 1); 47 ce->store_parameter(_bci, 0); 48 __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::counter_overflow_id))); 49 ce->add_call_info_here(_info); 50 ce->verify_oop_map(_info); 51 __ b(_continuation); 52 } 53 54 RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index, LIR_Opr array) 55 : _throw_index_out_of_bounds_exception(array == NULL), _index(index), _array(array) { 56 assert(info != NULL, "must have info"); 57 _info = new CodeEmitInfo(info); 58 } 59 60 void RangeCheckStub::emit_code(LIR_Assembler* ce) { 61 __ bind(_entry); 62 if (_info->deoptimize_on_exception()) { 63 address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id); 64 __ far_call(RuntimeAddress(a)); 65 ce->add_call_info_here(_info); 66 ce->verify_oop_map(_info); 67 debug_only(__ should_not_reach_here()); 68 return; 69 } 70 71 if (_index->is_cpu_register()) { 72 __ mov(rscratch1, _index->as_register()); 73 } else { 74 __ mov(rscratch1, _index->as_jint()); 75 } 76 __ mov(rscratch2, _array->as_pointer_register()); 77 Runtime1::StubID stub_id; 78 if (_throw_index_out_of_bounds_exception) { 79 stub_id = Runtime1::throw_index_exception_id; 80 } else { 81 stub_id = Runtime1::throw_range_check_failed_id; 82 } 83 __ far_call(RuntimeAddress(Runtime1::entry_for(stub_id)), NULL, rscratch2); 84 ce->add_call_info_here(_info); 85 ce->verify_oop_map(_info); 86 debug_only(__ should_not_reach_here()); 87 } 88 89 PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) { 90 _info = new CodeEmitInfo(info); 91 } 92 93 void PredicateFailedStub::emit_code(LIR_Assembler* ce) { 94 __ bind(_entry); 95 address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id); 96 __ far_call(RuntimeAddress(a)); 97 ce->add_call_info_here(_info); 98 ce->verify_oop_map(_info); 99 debug_only(__ should_not_reach_here()); 100 } 101 102 void DivByZeroStub::emit_code(LIR_Assembler* ce) { 103 if (_offset != -1) { 104 ce->compilation()->implicit_exception_table()->append(_offset, __ offset()); 105 } 106 __ bind(_entry); 107 __ far_call(Address(Runtime1::entry_for(Runtime1::throw_div0_exception_id), relocInfo::runtime_call_type)); 108 ce->add_call_info_here(_info); 109 ce->verify_oop_map(_info); 110 #ifdef ASSERT 111 __ should_not_reach_here(); 112 #endif 113 } 114 115 116 117 // Implementation of NewInstanceStub 118 119 NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) { 120 _result = result; 121 _klass = klass; 122 _klass_reg = klass_reg; 123 _info = new CodeEmitInfo(info); 124 assert(stub_id == Runtime1::new_instance_id || 125 stub_id == Runtime1::fast_new_instance_id || 126 stub_id == Runtime1::fast_new_instance_init_check_id, 127 "need new_instance id"); 128 _stub_id = stub_id; 129 } 130 131 132 133 void NewInstanceStub::emit_code(LIR_Assembler* ce) { 134 assert(__ rsp_offset() == 0, "frame size should be fixed"); 135 __ bind(_entry); 136 __ mov(r3, _klass_reg->as_register()); 137 __ far_call(RuntimeAddress(Runtime1::entry_for(_stub_id))); 138 ce->add_call_info_here(_info); 139 ce->verify_oop_map(_info); 140 assert(_result->as_register() == r0, "result must in r0,"); 141 __ b(_continuation); 142 } 143 144 145 // Implementation of NewTypeArrayStub 146 147 // Implementation of NewTypeArrayStub 148 149 NewTypeArrayStub::NewTypeArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) { 150 _klass_reg = klass_reg; 151 _length = length; 152 _result = result; 153 _info = new CodeEmitInfo(info); 154 } 155 156 157 void NewTypeArrayStub::emit_code(LIR_Assembler* ce) { 158 assert(__ rsp_offset() == 0, "frame size should be fixed"); 159 __ bind(_entry); 160 assert(_length->as_register() == r19, "length must in r19,"); 161 assert(_klass_reg->as_register() == r3, "klass_reg must in r3"); 162 __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_type_array_id))); 163 ce->add_call_info_here(_info); 164 ce->verify_oop_map(_info); 165 assert(_result->as_register() == r0, "result must in r0"); 166 __ b(_continuation); 167 } 168 169 170 // Implementation of NewObjectArrayStub 171 172 NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) { 173 _klass_reg = klass_reg; 174 _result = result; 175 _length = length; 176 _info = new CodeEmitInfo(info); 177 } 178 179 180 void NewObjectArrayStub::emit_code(LIR_Assembler* ce) { 181 assert(__ rsp_offset() == 0, "frame size should be fixed"); 182 __ bind(_entry); 183 assert(_length->as_register() == r19, "length must in r19,"); 184 assert(_klass_reg->as_register() == r3, "klass_reg must in r3"); 185 __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_object_array_id))); 186 ce->add_call_info_here(_info); 187 ce->verify_oop_map(_info); 188 assert(_result->as_register() == r0, "result must in r0"); 189 __ b(_continuation); 190 } 191 // Implementation of MonitorAccessStubs 192 193 MonitorEnterStub::MonitorEnterStub(LIR_Opr obj_reg, LIR_Opr lock_reg, CodeEmitInfo* info) 194 : MonitorAccessStub(obj_reg, lock_reg) 195 { 196 _info = new CodeEmitInfo(info); 197 } 198 199 200 void MonitorEnterStub::emit_code(LIR_Assembler* ce) { 201 assert(__ rsp_offset() == 0, "frame size should be fixed"); 202 __ bind(_entry); 203 ce->store_parameter(_obj_reg->as_register(), 1); 204 ce->store_parameter(_lock_reg->as_register(), 0); 205 Runtime1::StubID enter_id; 206 if (ce->compilation()->has_fpu_code()) { 207 enter_id = Runtime1::monitorenter_id; 208 } else { 209 enter_id = Runtime1::monitorenter_nofpu_id; 210 } 211 __ far_call(RuntimeAddress(Runtime1::entry_for(enter_id))); 212 ce->add_call_info_here(_info); 213 ce->verify_oop_map(_info); 214 __ b(_continuation); 215 } 216 217 218 void MonitorExitStub::emit_code(LIR_Assembler* ce) { 219 __ bind(_entry); 220 if (_compute_lock) { 221 // lock_reg was destroyed by fast unlocking attempt => recompute it 222 ce->monitor_address(_monitor_ix, _lock_reg); 223 } 224 ce->store_parameter(_lock_reg->as_register(), 0); 225 // note: non-blocking leaf routine => no call info needed 226 Runtime1::StubID exit_id; 227 if (ce->compilation()->has_fpu_code()) { 228 exit_id = Runtime1::monitorexit_id; 229 } else { 230 exit_id = Runtime1::monitorexit_nofpu_id; 231 } 232 __ adr(lr, _continuation); 233 __ far_jump(RuntimeAddress(Runtime1::entry_for(exit_id))); 234 } 235 236 237 // Implementation of patching: 238 // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes) 239 // - Replace original code with a call to the stub 240 // At Runtime: 241 // - call to stub, jump to runtime 242 // - in runtime: preserve all registers (rspecially objects, i.e., source and destination object) 243 // - in runtime: after initializing class, restore original code, reexecute instruction 244 245 int PatchingStub::_patch_info_offset = -NativeGeneralJump::instruction_size; 246 247 void PatchingStub::align_patch_site(MacroAssembler* masm) { 248 } 249 250 void PatchingStub::emit_code(LIR_Assembler* ce) { 251 assert(false, "AArch64 should not use C1 runtime patching"); 252 } 253 254 255 void DeoptimizeStub::emit_code(LIR_Assembler* ce) { 256 __ bind(_entry); 257 ce->store_parameter(_trap_request, 0); 258 __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::deoptimize_id))); 259 ce->add_call_info_here(_info); 260 DEBUG_ONLY(__ should_not_reach_here()); 261 } 262 263 264 void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) { 265 address a; 266 if (_info->deoptimize_on_exception()) { 267 // Deoptimize, do not throw the exception, because it is probably wrong to do it here. 268 a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id); 269 } else { 270 a = Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id); 271 } 272 273 ce->compilation()->implicit_exception_table()->append(_offset, __ offset()); 274 __ bind(_entry); 275 __ far_call(RuntimeAddress(a)); 276 ce->add_call_info_here(_info); 277 ce->verify_oop_map(_info); 278 debug_only(__ should_not_reach_here()); 279 } 280 281 282 void SimpleExceptionStub::emit_code(LIR_Assembler* ce) { 283 assert(__ rsp_offset() == 0, "frame size should be fixed"); 284 285 __ bind(_entry); 286 // pass the object in a scratch register because all other registers 287 // must be preserved 288 if (_obj->is_cpu_register()) { 289 __ mov(rscratch1, _obj->as_register()); 290 } 291 __ far_call(RuntimeAddress(Runtime1::entry_for(_stub)), NULL, rscratch2); 292 ce->add_call_info_here(_info); 293 debug_only(__ should_not_reach_here()); 294 } 295 296 297 void ArrayCopyStub::emit_code(LIR_Assembler* ce) { 298 //---------------slow case: call to native----------------- 299 __ bind(_entry); 300 // Figure out where the args should go 301 // This should really convert the IntrinsicID to the Method* and signature 302 // but I don't know how to do that. 303 // 304 VMRegPair args[5]; 305 BasicType signature[5] = { T_OBJECT, T_INT, T_OBJECT, T_INT, T_INT}; 306 SharedRuntime::java_calling_convention(signature, args, 5, true); 307 308 // push parameters 309 // (src, src_pos, dest, destPos, length) 310 Register r[5]; 311 r[0] = src()->as_register(); 312 r[1] = src_pos()->as_register(); 313 r[2] = dst()->as_register(); 314 r[3] = dst_pos()->as_register(); 315 r[4] = length()->as_register(); 316 317 // next registers will get stored on the stack 318 for (int i = 0; i < 5 ; i++ ) { 319 VMReg r_1 = args[i].first(); 320 if (r_1->is_stack()) { 321 int st_off = r_1->reg2stack() * wordSize; 322 __ str (r[i], Address(sp, st_off)); 323 } else { 324 assert(r[i] == args[i].first()->as_Register(), "Wrong register for arg "); 325 } 326 } 327 328 ce->align_call(lir_static_call); 329 330 ce->emit_static_call_stub(); 331 if (ce->compilation()->bailed_out()) { 332 return; // CodeCache is full 333 } 334 Address resolve(SharedRuntime::get_resolve_static_call_stub(), 335 relocInfo::static_call_type); 336 address call = __ trampoline_call(resolve); 337 if (call == NULL) { 338 ce->bailout("trampoline stub overflow"); 339 return; 340 } 341 ce->add_call_info_here(info()); 342 343 #ifndef PRODUCT 344 __ lea(rscratch2, ExternalAddress((address)&Runtime1::_arraycopy_slowcase_cnt)); 345 __ incrementw(Address(rscratch2)); 346 #endif 347 348 __ b(_continuation); 349 } 350 351 352 ///////////////////////////////////////////////////////////////////////////// 353 #if INCLUDE_ALL_GCS 354 355 void G1PreBarrierStub::emit_code(LIR_Assembler* ce) { 356 // At this point we know that marking is in progress. 357 // If do_load() is true then we have to emit the 358 // load of the previous value; otherwise it has already 359 // been loaded into _pre_val. 360 361 __ bind(_entry); 362 assert(pre_val()->is_register(), "Precondition."); 363 364 Register pre_val_reg = pre_val()->as_register(); 365 366 if (do_load()) { 367 ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false /*wide*/, false /*unaligned*/); 368 } 369 __ cbz(pre_val_reg, _continuation); 370 ce->store_parameter(pre_val()->as_register(), 0); 371 __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::g1_pre_barrier_slow_id))); 372 __ b(_continuation); 373 } 374 375 void G1PostBarrierStub::emit_code(LIR_Assembler* ce) { 376 __ bind(_entry); 377 assert(addr()->is_register(), "Precondition."); 378 assert(new_val()->is_register(), "Precondition."); 379 Register new_val_reg = new_val()->as_register(); 380 __ cbz(new_val_reg, _continuation); 381 ce->store_parameter(addr()->as_pointer_register(), 0); 382 __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::g1_post_barrier_slow_id))); 383 __ b(_continuation); 384 } 385 386 #endif // INCLUDE_ALL_GCS 387 ///////////////////////////////////////////////////////////////////////////// 388 389 #undef __