1 /* 2 * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "c1/c1_CodeStubs.hpp" 27 #include "c1/c1_FrameMap.hpp" 28 #include "c1/c1_LIRAssembler.hpp" 29 #include "c1/c1_MacroAssembler.hpp" 30 #include "c1/c1_Runtime1.hpp" 31 #include "nativeInst_x86.hpp" 32 #include "runtime/sharedRuntime.hpp" 33 #include "utilities/align.hpp" 34 #include "utilities/macros.hpp" 35 #include "vmreg_x86.inline.hpp" 36 37 38 #define __ ce->masm()-> 39 40 float ConversionStub::float_zero = 0.0; 41 double ConversionStub::double_zero = 0.0; 42 43 void ConversionStub::emit_code(LIR_Assembler* ce) { 44 __ bind(_entry); 45 assert(bytecode() == Bytecodes::_f2i || bytecode() == Bytecodes::_d2i, "other conversions do not require stub"); 46 47 48 if (input()->is_single_xmm()) { 49 __ comiss(input()->as_xmm_float_reg(), 50 ExternalAddress((address)&float_zero)); 51 } else if (input()->is_double_xmm()) { 52 __ comisd(input()->as_xmm_double_reg(), 53 ExternalAddress((address)&double_zero)); 54 } else { 55 LP64_ONLY(ShouldNotReachHere()); 56 __ push(rax); 57 __ ftst(); 58 __ fnstsw_ax(); 59 __ sahf(); 60 __ pop(rax); 61 } 62 63 Label NaN, do_return; 64 __ jccb(Assembler::parity, NaN); 65 __ jccb(Assembler::below, do_return); 66 67 // input is > 0 -> return maxInt 68 // result register already contains 0x80000000, so subtracting 1 gives 0x7fffffff 69 __ decrement(result()->as_register()); 70 __ jmpb(do_return); 71 72 // input is NaN -> return 0 73 __ bind(NaN); 74 __ xorptr(result()->as_register(), result()->as_register()); 75 76 __ bind(do_return); 77 __ jmp(_continuation); 78 } 79 80 void CounterOverflowStub::emit_code(LIR_Assembler* ce) { 81 __ bind(_entry); 82 Metadata *m = _method->as_constant_ptr()->as_metadata(); 83 ce->store_parameter(m, 1); 84 ce->store_parameter(_bci, 0); 85 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::counter_overflow_id))); 86 ce->add_call_info_here(_info); 87 ce->verify_oop_map(_info); 88 __ jmp(_continuation); 89 } 90 91 RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index, LIR_Opr array) 92 : _index(index), _array(array), _throw_index_out_of_bounds_exception(false) { 93 assert(info != NULL, "must have info"); 94 _info = new CodeEmitInfo(info); 95 } 96 97 RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index) 98 : _index(index), _array(NULL), _throw_index_out_of_bounds_exception(true) { 99 assert(info != NULL, "must have info"); 100 _info = new CodeEmitInfo(info); 101 } 102 103 void RangeCheckStub::emit_code(LIR_Assembler* ce) { 104 __ bind(_entry); 105 if (_info->deoptimize_on_exception()) { 106 address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id); 107 __ call(RuntimeAddress(a)); 108 ce->add_call_info_here(_info); 109 ce->verify_oop_map(_info); 110 debug_only(__ should_not_reach_here()); 111 return; 112 } 113 114 // pass the array index on stack because all registers must be preserved 115 if (_index->is_cpu_register()) { 116 ce->store_parameter(_index->as_register(), 0); 117 } else { 118 ce->store_parameter(_index->as_jint(), 0); 119 } 120 Runtime1::StubID stub_id; 121 if (_throw_index_out_of_bounds_exception) { 122 stub_id = Runtime1::throw_index_exception_id; 123 } else { 124 stub_id = Runtime1::throw_range_check_failed_id; 125 ce->store_parameter(_array->as_pointer_register(), 1); 126 } 127 __ call(RuntimeAddress(Runtime1::entry_for(stub_id))); 128 ce->add_call_info_here(_info); 129 ce->verify_oop_map(_info); 130 debug_only(__ should_not_reach_here()); 131 } 132 133 PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) { 134 _info = new CodeEmitInfo(info); 135 } 136 137 void PredicateFailedStub::emit_code(LIR_Assembler* ce) { 138 __ bind(_entry); 139 address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id); 140 __ call(RuntimeAddress(a)); 141 ce->add_call_info_here(_info); 142 ce->verify_oop_map(_info); 143 debug_only(__ should_not_reach_here()); 144 } 145 146 void DivByZeroStub::emit_code(LIR_Assembler* ce) { 147 if (_offset != -1) { 148 ce->compilation()->implicit_exception_table()->append(_offset, __ offset()); 149 } 150 __ bind(_entry); 151 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::throw_div0_exception_id))); 152 ce->add_call_info_here(_info); 153 debug_only(__ should_not_reach_here()); 154 } 155 156 157 // Implementation of NewInstanceStub 158 159 NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) { 160 _result = result; 161 _klass = klass; 162 _klass_reg = klass_reg; 163 _info = new CodeEmitInfo(info); 164 assert(stub_id == Runtime1::new_instance_id || 165 stub_id == Runtime1::fast_new_instance_id || 166 stub_id == Runtime1::fast_new_instance_init_check_id, 167 "need new_instance id"); 168 _stub_id = stub_id; 169 } 170 171 172 void NewInstanceStub::emit_code(LIR_Assembler* ce) { 173 assert(__ rsp_offset() == 0, "frame size should be fixed"); 174 __ bind(_entry); 175 __ movptr(rdx, _klass_reg->as_register()); 176 __ call(RuntimeAddress(Runtime1::entry_for(_stub_id))); 177 ce->add_call_info_here(_info); 178 ce->verify_oop_map(_info); 179 assert(_result->as_register() == rax, "result must in rax,"); 180 __ jmp(_continuation); 181 } 182 183 184 // Implementation of NewTypeArrayStub 185 186 NewTypeArrayStub::NewTypeArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) { 187 _klass_reg = klass_reg; 188 _length = length; 189 _result = result; 190 _info = new CodeEmitInfo(info); 191 } 192 193 194 void NewTypeArrayStub::emit_code(LIR_Assembler* ce) { 195 assert(__ rsp_offset() == 0, "frame size should be fixed"); 196 __ bind(_entry); 197 assert(_length->as_register() == rbx, "length must in rbx,"); 198 assert(_klass_reg->as_register() == rdx, "klass_reg must in rdx"); 199 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_type_array_id))); 200 ce->add_call_info_here(_info); 201 ce->verify_oop_map(_info); 202 assert(_result->as_register() == rax, "result must in rax,"); 203 __ jmp(_continuation); 204 } 205 206 207 // Implementation of NewObjectArrayStub 208 209 NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, 210 CodeEmitInfo* info, bool is_value_type) { 211 _klass_reg = klass_reg; 212 _result = result; 213 _length = length; 214 _info = new CodeEmitInfo(info); 215 _is_value_type = is_value_type; 216 } 217 218 219 void NewObjectArrayStub::emit_code(LIR_Assembler* ce) { 220 assert(__ rsp_offset() == 0, "frame size should be fixed"); 221 __ bind(_entry); 222 assert(_length->as_register() == rbx, "length must in rbx,"); 223 assert(_klass_reg->as_register() == rdx, "klass_reg must in rdx"); 224 if (_is_value_type) { 225 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_value_array_id))); 226 } else { 227 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_object_array_id))); 228 } 229 ce->add_call_info_here(_info); 230 ce->verify_oop_map(_info); 231 assert(_result->as_register() == rax, "result must in rax,"); 232 __ jmp(_continuation); 233 } 234 235 236 // Implementation of MonitorAccessStubs 237 238 MonitorEnterStub::MonitorEnterStub(LIR_Opr obj_reg, LIR_Opr lock_reg, CodeEmitInfo* info, CodeStub* throw_imse_stub, LIR_Opr scratch_reg) 239 : MonitorAccessStub(obj_reg, lock_reg) 240 { 241 _info = new CodeEmitInfo(info); 242 _throw_imse_stub = throw_imse_stub; 243 _scratch_reg = scratch_reg; 244 if (_throw_imse_stub != NULL) { 245 assert(_scratch_reg != LIR_OprFact::illegalOpr, "must be"); 246 } 247 } 248 249 250 void MonitorEnterStub::emit_code(LIR_Assembler* ce) { 251 assert(__ rsp_offset() == 0, "frame size should be fixed"); 252 __ bind(_entry); 253 if (_throw_imse_stub != NULL) { 254 // When we come here, _obj_reg has already been checked to be non-null. 255 Register mark = _scratch_reg->as_register(); 256 __ movptr(mark, Address(_obj_reg->as_register(), oopDesc::mark_offset_in_bytes())); 257 __ testl(mark, markOopDesc::always_locked_pattern); 258 __ jcc(Assembler::notZero, *_throw_imse_stub->entry()); 259 } 260 ce->store_parameter(_obj_reg->as_register(), 1); 261 ce->store_parameter(_lock_reg->as_register(), 0); 262 Runtime1::StubID enter_id; 263 if (ce->compilation()->has_fpu_code()) { 264 enter_id = Runtime1::monitorenter_id; 265 } else { 266 enter_id = Runtime1::monitorenter_nofpu_id; 267 } 268 __ call(RuntimeAddress(Runtime1::entry_for(enter_id))); 269 ce->add_call_info_here(_info); 270 ce->verify_oop_map(_info); 271 __ jmp(_continuation); 272 } 273 274 275 void MonitorExitStub::emit_code(LIR_Assembler* ce) { 276 __ bind(_entry); 277 if (_compute_lock) { 278 // lock_reg was destroyed by fast unlocking attempt => recompute it 279 ce->monitor_address(_monitor_ix, _lock_reg); 280 } 281 ce->store_parameter(_lock_reg->as_register(), 0); 282 // note: non-blocking leaf routine => no call info needed 283 Runtime1::StubID exit_id; 284 if (ce->compilation()->has_fpu_code()) { 285 exit_id = Runtime1::monitorexit_id; 286 } else { 287 exit_id = Runtime1::monitorexit_nofpu_id; 288 } 289 __ call(RuntimeAddress(Runtime1::entry_for(exit_id))); 290 __ jmp(_continuation); 291 } 292 293 294 // Implementation of patching: 295 // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes) 296 // - Replace original code with a call to the stub 297 // At Runtime: 298 // - call to stub, jump to runtime 299 // - in runtime: preserve all registers (rspecially objects, i.e., source and destination object) 300 // - in runtime: after initializing class, restore original code, reexecute instruction 301 302 int PatchingStub::_patch_info_offset = -NativeGeneralJump::instruction_size; 303 304 void PatchingStub::align_patch_site(MacroAssembler* masm) { 305 // We're patching a 5-7 byte instruction on intel and we need to 306 // make sure that we don't see a piece of the instruction. It 307 // appears mostly impossible on Intel to simply invalidate other 308 // processors caches and since they may do aggressive prefetch it's 309 // very hard to make a guess about what code might be in the icache. 310 // Force the instruction to be double word aligned so that it 311 // doesn't span a cache line. 312 masm->align(align_up((int)NativeGeneralJump::instruction_size, wordSize)); 313 } 314 315 void PatchingStub::emit_code(LIR_Assembler* ce) { 316 assert(NativeCall::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF, "not enough room for call"); 317 318 Label call_patch; 319 320 // static field accesses have special semantics while the class 321 // initializer is being run so we emit a test which can be used to 322 // check that this code is being executed by the initializing 323 // thread. 324 address being_initialized_entry = __ pc(); 325 if (CommentedAssembly) { 326 __ block_comment(" patch template"); 327 } 328 if (_id == load_klass_id) { 329 // produce a copy of the load klass instruction for use by the being initialized case 330 #ifdef ASSERT 331 address start = __ pc(); 332 #endif 333 Metadata* o = NULL; 334 __ mov_metadata(_obj, o); 335 #ifdef ASSERT 336 for (int i = 0; i < _bytes_to_copy; i++) { 337 address ptr = (address)(_pc_start + i); 338 int a_byte = (*ptr) & 0xFF; 339 assert(a_byte == *start++, "should be the same code"); 340 } 341 #endif 342 } else if (_id == load_mirror_id) { 343 // produce a copy of the load mirror instruction for use by the being 344 // initialized case 345 #ifdef ASSERT 346 address start = __ pc(); 347 #endif 348 jobject o = NULL; 349 __ movoop(_obj, o); 350 #ifdef ASSERT 351 for (int i = 0; i < _bytes_to_copy; i++) { 352 address ptr = (address)(_pc_start + i); 353 int a_byte = (*ptr) & 0xFF; 354 assert(a_byte == *start++, "should be the same code"); 355 } 356 #endif 357 } else { 358 // make a copy the code which is going to be patched. 359 for (int i = 0; i < _bytes_to_copy; i++) { 360 address ptr = (address)(_pc_start + i); 361 int a_byte = (*ptr) & 0xFF; 362 __ emit_int8(a_byte); 363 *ptr = 0x90; // make the site look like a nop 364 } 365 } 366 367 address end_of_patch = __ pc(); 368 int bytes_to_skip = 0; 369 if (_id == load_mirror_id) { 370 int offset = __ offset(); 371 if (CommentedAssembly) { 372 __ block_comment(" being_initialized check"); 373 } 374 assert(_obj != noreg, "must be a valid register"); 375 Register tmp = rax; 376 Register tmp2 = rbx; 377 __ push(tmp); 378 __ push(tmp2); 379 // Load without verification to keep code size small. We need it because 380 // begin_initialized_entry_offset has to fit in a byte. Also, we know it's not null. 381 __ movptr(tmp2, Address(_obj, java_lang_Class::klass_offset_in_bytes())); 382 __ get_thread(tmp); 383 __ cmpptr(tmp, Address(tmp2, InstanceKlass::init_thread_offset())); 384 __ pop(tmp2); 385 __ pop(tmp); 386 __ jcc(Assembler::notEqual, call_patch); 387 388 // access_field patches may execute the patched code before it's 389 // copied back into place so we need to jump back into the main 390 // code of the nmethod to continue execution. 391 __ jmp(_patch_site_continuation); 392 393 // make sure this extra code gets skipped 394 bytes_to_skip += __ offset() - offset; 395 } 396 if (CommentedAssembly) { 397 __ block_comment("patch data encoded as movl"); 398 } 399 // Now emit the patch record telling the runtime how to find the 400 // pieces of the patch. We only need 3 bytes but for readability of 401 // the disassembly we make the data look like a movl reg, imm32, 402 // which requires 5 bytes 403 int sizeof_patch_record = 5; 404 bytes_to_skip += sizeof_patch_record; 405 406 // emit the offsets needed to find the code to patch 407 int being_initialized_entry_offset = __ pc() - being_initialized_entry + sizeof_patch_record; 408 409 __ emit_int8((unsigned char)0xB8); 410 __ emit_int8(0); 411 __ emit_int8(being_initialized_entry_offset); 412 __ emit_int8(bytes_to_skip); 413 __ emit_int8(_bytes_to_copy); 414 address patch_info_pc = __ pc(); 415 assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info"); 416 417 address entry = __ pc(); 418 NativeGeneralJump::insert_unconditional((address)_pc_start, entry); 419 address target = NULL; 420 relocInfo::relocType reloc_type = relocInfo::none; 421 switch (_id) { 422 case access_field_id: target = Runtime1::entry_for(Runtime1::access_field_patching_id); break; 423 case load_klass_id: target = Runtime1::entry_for(Runtime1::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break; 424 case load_mirror_id: target = Runtime1::entry_for(Runtime1::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break; 425 case load_appendix_id: target = Runtime1::entry_for(Runtime1::load_appendix_patching_id); reloc_type = relocInfo::oop_type; break; 426 default: ShouldNotReachHere(); 427 } 428 __ bind(call_patch); 429 430 if (CommentedAssembly) { 431 __ block_comment("patch entry point"); 432 } 433 __ call(RuntimeAddress(target)); 434 assert(_patch_info_offset == (patch_info_pc - __ pc()), "must not change"); 435 ce->add_call_info_here(_info); 436 int jmp_off = __ offset(); 437 __ jmp(_patch_site_entry); 438 // Add enough nops so deoptimization can overwrite the jmp above with a call 439 // and not destroy the world. We cannot use fat nops here, since the concurrent 440 // code rewrite may transiently create the illegal instruction sequence. 441 for (int j = __ offset() ; j < jmp_off + 5 ; j++ ) { 442 __ nop(); 443 } 444 if (_id == load_klass_id || _id == load_mirror_id || _id == load_appendix_id) { 445 CodeSection* cs = __ code_section(); 446 RelocIterator iter(cs, (address)_pc_start, (address)(_pc_start + 1)); 447 relocInfo::change_reloc_info_for_address(&iter, (address) _pc_start, reloc_type, relocInfo::none); 448 } 449 } 450 451 452 void DeoptimizeStub::emit_code(LIR_Assembler* ce) { 453 __ bind(_entry); 454 ce->store_parameter(_trap_request, 0); 455 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::deoptimize_id))); 456 ce->add_call_info_here(_info); 457 DEBUG_ONLY(__ should_not_reach_here()); 458 } 459 460 461 void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) { 462 address a; 463 if (_info->deoptimize_on_exception()) { 464 // Deoptimize, do not throw the exception, because it is probably wrong to do it here. 465 a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id); 466 } else { 467 a = Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id); 468 } 469 470 ce->compilation()->implicit_exception_table()->append(_offset, __ offset()); 471 __ bind(_entry); 472 __ call(RuntimeAddress(a)); 473 ce->add_call_info_here(_info); 474 ce->verify_oop_map(_info); 475 debug_only(__ should_not_reach_here()); 476 } 477 478 479 void SimpleExceptionStub::emit_code(LIR_Assembler* ce) { 480 assert(__ rsp_offset() == 0, "frame size should be fixed"); 481 482 __ bind(_entry); 483 // pass the object on stack because all registers must be preserved 484 if (_obj->is_cpu_register()) { 485 ce->store_parameter(_obj->as_register(), 0); 486 } 487 __ call(RuntimeAddress(Runtime1::entry_for(_stub))); 488 ce->add_call_info_here(_info); 489 debug_only(__ should_not_reach_here()); 490 } 491 492 493 void ArrayCopyStub::emit_code(LIR_Assembler* ce) { 494 //---------------slow case: call to native----------------- 495 __ bind(_entry); 496 // Figure out where the args should go 497 // This should really convert the IntrinsicID to the Method* and signature 498 // but I don't know how to do that. 499 // 500 VMRegPair args[5]; 501 BasicType signature[5] = { T_OBJECT, T_INT, T_OBJECT, T_INT, T_INT}; 502 SharedRuntime::java_calling_convention(signature, args, 5, true); 503 504 // push parameters 505 // (src, src_pos, dest, destPos, length) 506 Register r[5]; 507 r[0] = src()->as_register(); 508 r[1] = src_pos()->as_register(); 509 r[2] = dst()->as_register(); 510 r[3] = dst_pos()->as_register(); 511 r[4] = length()->as_register(); 512 513 // next registers will get stored on the stack 514 for (int i = 0; i < 5 ; i++ ) { 515 VMReg r_1 = args[i].first(); 516 if (r_1->is_stack()) { 517 int st_off = r_1->reg2stack() * wordSize; 518 __ movptr (Address(rsp, st_off), r[i]); 519 } else { 520 assert(r[i] == args[i].first()->as_Register(), "Wrong register for arg "); 521 } 522 } 523 524 ce->align_call(lir_static_call); 525 526 ce->emit_static_call_stub(); 527 if (ce->compilation()->bailed_out()) { 528 return; // CodeCache is full 529 } 530 AddressLiteral resolve(SharedRuntime::get_resolve_static_call_stub(), 531 relocInfo::static_call_type); 532 __ call(resolve); 533 ce->add_call_info_here(info()); 534 535 #ifndef PRODUCT 536 __ incrementl(ExternalAddress((address)&Runtime1::_arraycopy_slowcase_cnt)); 537 #endif 538 539 __ jmp(_continuation); 540 } 541 542 #undef __