1 /* 2 * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "c1/c1_CodeStubs.hpp" 27 #include "c1/c1_FrameMap.hpp" 28 #include "c1/c1_LIRAssembler.hpp" 29 #include "c1/c1_MacroAssembler.hpp" 30 #include "c1/c1_Runtime1.hpp" 31 #include "nativeInst_x86.hpp" 32 #include "runtime/sharedRuntime.hpp" 33 #include "utilities/align.hpp" 34 #include "utilities/macros.hpp" 35 #include "vmreg_x86.inline.hpp" 36 #if INCLUDE_ALL_GCS 37 #include "gc/g1/g1BarrierSet.hpp" 38 #endif // INCLUDE_ALL_GCS 39 40 41 #define __ ce->masm()-> 42 43 float ConversionStub::float_zero = 0.0; 44 double ConversionStub::double_zero = 0.0; 45 46 void ConversionStub::emit_code(LIR_Assembler* ce) { 47 __ bind(_entry); 48 assert(bytecode() == Bytecodes::_f2i || bytecode() == Bytecodes::_d2i, "other conversions do not require stub"); 49 50 51 if (input()->is_single_xmm()) { 52 __ comiss(input()->as_xmm_float_reg(), 53 ExternalAddress((address)&float_zero)); 54 } else if (input()->is_double_xmm()) { 55 __ comisd(input()->as_xmm_double_reg(), 56 ExternalAddress((address)&double_zero)); 57 } else { 58 LP64_ONLY(ShouldNotReachHere()); 59 __ push(rax); 60 __ ftst(); 61 __ fnstsw_ax(); 62 __ sahf(); 63 __ pop(rax); 64 } 65 66 Label NaN, do_return; 67 __ jccb(Assembler::parity, NaN); 68 __ jccb(Assembler::below, do_return); 69 70 // input is > 0 -> return maxInt 71 // result register already contains 0x80000000, so subtracting 1 gives 0x7fffffff 72 __ decrement(result()->as_register()); 73 __ jmpb(do_return); 74 75 // input is NaN -> return 0 76 __ bind(NaN); 77 __ xorptr(result()->as_register(), result()->as_register()); 78 79 __ bind(do_return); 80 __ jmp(_continuation); 81 } 82 83 void CounterOverflowStub::emit_code(LIR_Assembler* ce) { 84 __ bind(_entry); 85 Metadata *m = _method->as_constant_ptr()->as_metadata(); 86 ce->store_parameter(m, 1); 87 ce->store_parameter(_bci, 0); 88 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::counter_overflow_id))); 89 ce->add_call_info_here(_info); 90 ce->verify_oop_map(_info); 91 __ jmp(_continuation); 92 } 93 94 RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index, LIR_Opr array) 95 : _throw_index_out_of_bounds_exception(false), _index(index), _array(array) { 96 assert(info != NULL, "must have info"); 97 _info = new CodeEmitInfo(info); 98 } 99 100 RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index) 101 : _throw_index_out_of_bounds_exception(true), _index(index), _array(NULL) { 102 assert(info != NULL, "must have info"); 103 _info = new CodeEmitInfo(info); 104 } 105 106 void RangeCheckStub::emit_code(LIR_Assembler* ce) { 107 __ bind(_entry); 108 if (_info->deoptimize_on_exception()) { 109 address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id); 110 __ call(RuntimeAddress(a)); 111 ce->add_call_info_here(_info); 112 ce->verify_oop_map(_info); 113 debug_only(__ should_not_reach_here()); 114 return; 115 } 116 117 // pass the array index on stack because all registers must be preserved 118 if (_index->is_cpu_register()) { 119 ce->store_parameter(_index->as_register(), 0); 120 } else { 121 ce->store_parameter(_index->as_jint(), 0); 122 } 123 Runtime1::StubID stub_id; 124 if (_throw_index_out_of_bounds_exception) { 125 stub_id = Runtime1::throw_index_exception_id; 126 } else { 127 stub_id = Runtime1::throw_range_check_failed_id; 128 ce->store_parameter(_array->as_pointer_register(), 1); 129 } 130 __ call(RuntimeAddress(Runtime1::entry_for(stub_id))); 131 ce->add_call_info_here(_info); 132 ce->verify_oop_map(_info); 133 debug_only(__ should_not_reach_here()); 134 } 135 136 PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) { 137 _info = new CodeEmitInfo(info); 138 } 139 140 void PredicateFailedStub::emit_code(LIR_Assembler* ce) { 141 __ bind(_entry); 142 address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id); 143 __ call(RuntimeAddress(a)); 144 ce->add_call_info_here(_info); 145 ce->verify_oop_map(_info); 146 debug_only(__ should_not_reach_here()); 147 } 148 149 void DivByZeroStub::emit_code(LIR_Assembler* ce) { 150 if (_offset != -1) { 151 ce->compilation()->implicit_exception_table()->append(_offset, __ offset()); 152 } 153 __ bind(_entry); 154 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::throw_div0_exception_id))); 155 ce->add_call_info_here(_info); 156 debug_only(__ should_not_reach_here()); 157 } 158 159 160 // Implementation of NewInstanceStub 161 162 NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) { 163 _result = result; 164 _klass = klass; 165 _klass_reg = klass_reg; 166 _info = new CodeEmitInfo(info); 167 assert(stub_id == Runtime1::new_instance_id || 168 stub_id == Runtime1::fast_new_instance_id || 169 stub_id == Runtime1::fast_new_instance_init_check_id, 170 "need new_instance id"); 171 _stub_id = stub_id; 172 } 173 174 175 void NewInstanceStub::emit_code(LIR_Assembler* ce) { 176 assert(__ rsp_offset() == 0, "frame size should be fixed"); 177 __ bind(_entry); 178 __ movptr(rdx, _klass_reg->as_register()); 179 __ call(RuntimeAddress(Runtime1::entry_for(_stub_id))); 180 ce->add_call_info_here(_info); 181 ce->verify_oop_map(_info); 182 assert(_result->as_register() == rax, "result must in rax,"); 183 __ jmp(_continuation); 184 } 185 186 187 // Implementation of NewTypeArrayStub 188 189 NewTypeArrayStub::NewTypeArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) { 190 _klass_reg = klass_reg; 191 _length = length; 192 _result = result; 193 _info = new CodeEmitInfo(info); 194 } 195 196 197 void NewTypeArrayStub::emit_code(LIR_Assembler* ce) { 198 assert(__ rsp_offset() == 0, "frame size should be fixed"); 199 __ bind(_entry); 200 assert(_length->as_register() == rbx, "length must in rbx,"); 201 assert(_klass_reg->as_register() == rdx, "klass_reg must in rdx"); 202 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_type_array_id))); 203 ce->add_call_info_here(_info); 204 ce->verify_oop_map(_info); 205 assert(_result->as_register() == rax, "result must in rax,"); 206 __ jmp(_continuation); 207 } 208 209 210 // Implementation of NewObjectArrayStub 211 212 NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) { 213 _klass_reg = klass_reg; 214 _result = result; 215 _length = length; 216 _info = new CodeEmitInfo(info); 217 } 218 219 220 void NewObjectArrayStub::emit_code(LIR_Assembler* ce) { 221 assert(__ rsp_offset() == 0, "frame size should be fixed"); 222 __ bind(_entry); 223 assert(_length->as_register() == rbx, "length must in rbx,"); 224 assert(_klass_reg->as_register() == rdx, "klass_reg must in rdx"); 225 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_object_array_id))); 226 ce->add_call_info_here(_info); 227 ce->verify_oop_map(_info); 228 assert(_result->as_register() == rax, "result must in rax,"); 229 __ jmp(_continuation); 230 } 231 232 233 // Implementation of MonitorAccessStubs 234 235 MonitorEnterStub::MonitorEnterStub(LIR_Opr obj_reg, LIR_Opr lock_reg, CodeEmitInfo* info) 236 : MonitorAccessStub(obj_reg, lock_reg) 237 { 238 _info = new CodeEmitInfo(info); 239 } 240 241 242 void MonitorEnterStub::emit_code(LIR_Assembler* ce) { 243 assert(__ rsp_offset() == 0, "frame size should be fixed"); 244 __ bind(_entry); 245 ce->store_parameter(_obj_reg->as_register(), 1); 246 ce->store_parameter(_lock_reg->as_register(), 0); 247 Runtime1::StubID enter_id; 248 if (ce->compilation()->has_fpu_code()) { 249 enter_id = Runtime1::monitorenter_id; 250 } else { 251 enter_id = Runtime1::monitorenter_nofpu_id; 252 } 253 __ call(RuntimeAddress(Runtime1::entry_for(enter_id))); 254 ce->add_call_info_here(_info); 255 ce->verify_oop_map(_info); 256 __ jmp(_continuation); 257 } 258 259 260 void MonitorExitStub::emit_code(LIR_Assembler* ce) { 261 __ bind(_entry); 262 if (_compute_lock) { 263 // lock_reg was destroyed by fast unlocking attempt => recompute it 264 ce->monitor_address(_monitor_ix, _lock_reg); 265 } 266 ce->store_parameter(_lock_reg->as_register(), 0); 267 // note: non-blocking leaf routine => no call info needed 268 Runtime1::StubID exit_id; 269 if (ce->compilation()->has_fpu_code()) { 270 exit_id = Runtime1::monitorexit_id; 271 } else { 272 exit_id = Runtime1::monitorexit_nofpu_id; 273 } 274 __ call(RuntimeAddress(Runtime1::entry_for(exit_id))); 275 __ jmp(_continuation); 276 } 277 278 279 // Implementation of patching: 280 // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes) 281 // - Replace original code with a call to the stub 282 // At Runtime: 283 // - call to stub, jump to runtime 284 // - in runtime: preserve all registers (rspecially objects, i.e., source and destination object) 285 // - in runtime: after initializing class, restore original code, reexecute instruction 286 287 int PatchingStub::_patch_info_offset = -NativeGeneralJump::instruction_size; 288 289 void PatchingStub::align_patch_site(MacroAssembler* masm) { 290 // We're patching a 5-7 byte instruction on intel and we need to 291 // make sure that we don't see a piece of the instruction. It 292 // appears mostly impossible on Intel to simply invalidate other 293 // processors caches and since they may do aggressive prefetch it's 294 // very hard to make a guess about what code might be in the icache. 295 // Force the instruction to be double word aligned so that it 296 // doesn't span a cache line. 297 masm->align(align_up((int)NativeGeneralJump::instruction_size, wordSize)); 298 } 299 300 void PatchingStub::emit_code(LIR_Assembler* ce) { 301 assert(NativeCall::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF, "not enough room for call"); 302 303 Label call_patch; 304 305 // static field accesses have special semantics while the class 306 // initializer is being run so we emit a test which can be used to 307 // check that this code is being executed by the initializing 308 // thread. 309 address being_initialized_entry = __ pc(); 310 if (CommentedAssembly) { 311 __ block_comment(" patch template"); 312 } 313 if (_id == load_klass_id) { 314 // produce a copy of the load klass instruction for use by the being initialized case 315 #ifdef ASSERT 316 address start = __ pc(); 317 #endif 318 Metadata* o = NULL; 319 __ mov_metadata(_obj, o); 320 #ifdef ASSERT 321 for (int i = 0; i < _bytes_to_copy; i++) { 322 address ptr = (address)(_pc_start + i); 323 int a_byte = (*ptr) & 0xFF; 324 assert(a_byte == *start++, "should be the same code"); 325 } 326 #endif 327 } else if (_id == load_mirror_id) { 328 // produce a copy of the load mirror instruction for use by the being 329 // initialized case 330 #ifdef ASSERT 331 address start = __ pc(); 332 #endif 333 jobject o = NULL; 334 __ movoop(_obj, o); 335 #ifdef ASSERT 336 for (int i = 0; i < _bytes_to_copy; i++) { 337 address ptr = (address)(_pc_start + i); 338 int a_byte = (*ptr) & 0xFF; 339 assert(a_byte == *start++, "should be the same code"); 340 } 341 #endif 342 } else { 343 // make a copy the code which is going to be patched. 344 for (int i = 0; i < _bytes_to_copy; i++) { 345 address ptr = (address)(_pc_start + i); 346 int a_byte = (*ptr) & 0xFF; 347 __ emit_int8(a_byte); 348 *ptr = 0x90; // make the site look like a nop 349 } 350 } 351 352 address end_of_patch = __ pc(); 353 int bytes_to_skip = 0; 354 if (_id == load_mirror_id) { 355 int offset = __ offset(); 356 if (CommentedAssembly) { 357 __ block_comment(" being_initialized check"); 358 } 359 assert(_obj != noreg, "must be a valid register"); 360 Register tmp = rax; 361 Register tmp2 = rbx; 362 __ push(tmp); 363 __ push(tmp2); 364 // Load without verification to keep code size small. We need it because 365 // begin_initialized_entry_offset has to fit in a byte. Also, we know it's not null. 366 __ movptr(tmp2, Address(_obj, java_lang_Class::klass_offset_in_bytes())); 367 __ get_thread(tmp); 368 __ cmpptr(tmp, Address(tmp2, InstanceKlass::init_thread_offset())); 369 __ pop(tmp2); 370 __ pop(tmp); 371 __ jcc(Assembler::notEqual, call_patch); 372 373 // access_field patches may execute the patched code before it's 374 // copied back into place so we need to jump back into the main 375 // code of the nmethod to continue execution. 376 __ jmp(_patch_site_continuation); 377 378 // make sure this extra code gets skipped 379 bytes_to_skip += __ offset() - offset; 380 } 381 if (CommentedAssembly) { 382 __ block_comment("patch data encoded as movl"); 383 } 384 // Now emit the patch record telling the runtime how to find the 385 // pieces of the patch. We only need 3 bytes but for readability of 386 // the disassembly we make the data look like a movl reg, imm32, 387 // which requires 5 bytes 388 int sizeof_patch_record = 5; 389 bytes_to_skip += sizeof_patch_record; 390 391 // emit the offsets needed to find the code to patch 392 int being_initialized_entry_offset = __ pc() - being_initialized_entry + sizeof_patch_record; 393 394 __ emit_int8((unsigned char)0xB8); 395 __ emit_int8(0); 396 __ emit_int8(being_initialized_entry_offset); 397 __ emit_int8(bytes_to_skip); 398 __ emit_int8(_bytes_to_copy); 399 address patch_info_pc = __ pc(); 400 assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info"); 401 402 address entry = __ pc(); 403 NativeGeneralJump::insert_unconditional((address)_pc_start, entry); 404 address target = NULL; 405 relocInfo::relocType reloc_type = relocInfo::none; 406 switch (_id) { 407 case access_field_id: target = Runtime1::entry_for(Runtime1::access_field_patching_id); break; 408 case load_klass_id: target = Runtime1::entry_for(Runtime1::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break; 409 case load_mirror_id: target = Runtime1::entry_for(Runtime1::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break; 410 case load_appendix_id: target = Runtime1::entry_for(Runtime1::load_appendix_patching_id); reloc_type = relocInfo::oop_type; break; 411 default: ShouldNotReachHere(); 412 } 413 __ bind(call_patch); 414 415 if (CommentedAssembly) { 416 __ block_comment("patch entry point"); 417 } 418 __ call(RuntimeAddress(target)); 419 assert(_patch_info_offset == (patch_info_pc - __ pc()), "must not change"); 420 ce->add_call_info_here(_info); 421 int jmp_off = __ offset(); 422 __ jmp(_patch_site_entry); 423 // Add enough nops so deoptimization can overwrite the jmp above with a call 424 // and not destroy the world. We cannot use fat nops here, since the concurrent 425 // code rewrite may transiently create the illegal instruction sequence. 426 for (int j = __ offset() ; j < jmp_off + 5 ; j++ ) { 427 __ nop(); 428 } 429 if (_id == load_klass_id || _id == load_mirror_id || _id == load_appendix_id) { 430 CodeSection* cs = __ code_section(); 431 RelocIterator iter(cs, (address)_pc_start, (address)(_pc_start + 1)); 432 relocInfo::change_reloc_info_for_address(&iter, (address) _pc_start, reloc_type, relocInfo::none); 433 } 434 } 435 436 437 void DeoptimizeStub::emit_code(LIR_Assembler* ce) { 438 __ bind(_entry); 439 ce->store_parameter(_trap_request, 0); 440 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::deoptimize_id))); 441 ce->add_call_info_here(_info); 442 DEBUG_ONLY(__ should_not_reach_here()); 443 } 444 445 446 void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) { 447 address a; 448 if (_info->deoptimize_on_exception()) { 449 // Deoptimize, do not throw the exception, because it is probably wrong to do it here. 450 a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id); 451 } else { 452 a = Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id); 453 } 454 455 ce->compilation()->implicit_exception_table()->append(_offset, __ offset()); 456 __ bind(_entry); 457 __ call(RuntimeAddress(a)); 458 ce->add_call_info_here(_info); 459 ce->verify_oop_map(_info); 460 debug_only(__ should_not_reach_here()); 461 } 462 463 464 void SimpleExceptionStub::emit_code(LIR_Assembler* ce) { 465 assert(__ rsp_offset() == 0, "frame size should be fixed"); 466 467 __ bind(_entry); 468 // pass the object on stack because all registers must be preserved 469 if (_obj->is_cpu_register()) { 470 ce->store_parameter(_obj->as_register(), 0); 471 } 472 __ call(RuntimeAddress(Runtime1::entry_for(_stub))); 473 ce->add_call_info_here(_info); 474 debug_only(__ should_not_reach_here()); 475 } 476 477 478 void ArrayCopyStub::emit_code(LIR_Assembler* ce) { 479 //---------------slow case: call to native----------------- 480 __ bind(_entry); 481 // Figure out where the args should go 482 // This should really convert the IntrinsicID to the Method* and signature 483 // but I don't know how to do that. 484 // 485 VMRegPair args[5]; 486 BasicType signature[5] = { T_OBJECT, T_INT, T_OBJECT, T_INT, T_INT}; 487 SharedRuntime::java_calling_convention(signature, args, 5, true); 488 489 // push parameters 490 // (src, src_pos, dest, destPos, length) 491 Register r[5]; 492 r[0] = src()->as_register(); 493 r[1] = src_pos()->as_register(); 494 r[2] = dst()->as_register(); 495 r[3] = dst_pos()->as_register(); 496 r[4] = length()->as_register(); 497 498 // next registers will get stored on the stack 499 for (int i = 0; i < 5 ; i++ ) { 500 VMReg r_1 = args[i].first(); 501 if (r_1->is_stack()) { 502 int st_off = r_1->reg2stack() * wordSize; 503 __ movptr (Address(rsp, st_off), r[i]); 504 } else { 505 assert(r[i] == args[i].first()->as_Register(), "Wrong register for arg "); 506 } 507 } 508 509 ce->align_call(lir_static_call); 510 511 ce->emit_static_call_stub(); 512 if (ce->compilation()->bailed_out()) { 513 return; // CodeCache is full 514 } 515 AddressLiteral resolve(SharedRuntime::get_resolve_static_call_stub(), 516 relocInfo::static_call_type); 517 __ call(resolve); 518 ce->add_call_info_here(info()); 519 520 #ifndef PRODUCT 521 __ incrementl(ExternalAddress((address)&Runtime1::_arraycopy_slowcase_cnt)); 522 #endif 523 524 __ jmp(_continuation); 525 } 526 527 ///////////////////////////////////////////////////////////////////////////// 528 #if INCLUDE_ALL_GCS 529 530 void G1PreBarrierStub::emit_code(LIR_Assembler* ce) { 531 // At this point we know that marking is in progress. 532 // If do_load() is true then we have to emit the 533 // load of the previous value; otherwise it has already 534 // been loaded into _pre_val. 535 536 __ bind(_entry); 537 assert(pre_val()->is_register(), "Precondition."); 538 539 Register pre_val_reg = pre_val()->as_register(); 540 541 if (do_load()) { 542 ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false /*wide*/, false /*unaligned*/); 543 } 544 545 __ cmpptr(pre_val_reg, (int32_t) NULL_WORD); 546 __ jcc(Assembler::equal, _continuation); 547 ce->store_parameter(pre_val()->as_register(), 0); 548 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::g1_pre_barrier_slow_id))); 549 __ jmp(_continuation); 550 551 } 552 553 void G1PostBarrierStub::emit_code(LIR_Assembler* ce) { 554 __ bind(_entry); 555 assert(addr()->is_register(), "Precondition."); 556 assert(new_val()->is_register(), "Precondition."); 557 Register new_val_reg = new_val()->as_register(); 558 __ cmpptr(new_val_reg, (int32_t) NULL_WORD); 559 __ jcc(Assembler::equal, _continuation); 560 ce->store_parameter(addr()->as_pointer_register(), 0); 561 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::g1_post_barrier_slow_id))); 562 __ jmp(_continuation); 563 } 564 565 #endif // INCLUDE_ALL_GCS 566 ///////////////////////////////////////////////////////////////////////////// 567 568 #undef __