1 /* 2 * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "c1/c1_CodeStubs.hpp" 27 #include "c1/c1_FrameMap.hpp" 28 #include "c1/c1_LIRAssembler.hpp" 29 #include "c1/c1_MacroAssembler.hpp" 30 #include "c1/c1_Runtime1.hpp" 31 #include "nativeInst_x86.hpp" 32 #include "runtime/sharedRuntime.hpp" 33 #include "utilities/macros.hpp" 34 #include "vmreg_x86.inline.hpp" 35 #if INCLUDE_ALL_GCS 36 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp" 37 #endif // INCLUDE_ALL_GCS 38 39 40 #define __ ce->masm()-> 41 42 float ConversionStub::float_zero = 0.0; 43 double ConversionStub::double_zero = 0.0; 44 45 void ConversionStub::emit_code(LIR_Assembler* ce) { 46 __ bind(_entry); 47 assert(bytecode() == Bytecodes::_f2i || bytecode() == Bytecodes::_d2i, "other conversions do not require stub"); 48 49 50 if (input()->is_single_xmm()) { 51 __ comiss(input()->as_xmm_float_reg(), 52 ExternalAddress((address)&float_zero)); 53 } else if (input()->is_double_xmm()) { 54 __ comisd(input()->as_xmm_double_reg(), 55 ExternalAddress((address)&double_zero)); 56 } else { 57 LP64_ONLY(ShouldNotReachHere()); 58 __ push(rax); 59 __ ftst(); 60 __ fnstsw_ax(); 61 __ sahf(); 62 __ pop(rax); 63 } 64 65 Label NaN, do_return; 66 __ jccb(Assembler::parity, NaN); 67 __ jccb(Assembler::below, do_return); 68 69 // input is > 0 -> return maxInt 70 // result register already contains 0x80000000, so subtracting 1 gives 0x7fffffff 71 __ decrement(result()->as_register()); 72 __ jmpb(do_return); 73 74 // input is NaN -> return 0 75 __ bind(NaN); 76 __ xorptr(result()->as_register(), result()->as_register()); 77 78 __ bind(do_return); 79 __ jmp(_continuation); 80 } 81 82 void CounterOverflowStub::emit_code(LIR_Assembler* ce) { 83 __ bind(_entry); 84 ce->store_parameter(_method->as_register(), 1); 85 ce->store_parameter(_bci, 0); 86 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::counter_overflow_id))); 87 ce->add_call_info_here(_info); 88 ce->verify_oop_map(_info); 89 __ jmp(_continuation); 90 } 91 92 RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index, 93 bool throw_index_out_of_bounds_exception) 94 : _throw_index_out_of_bounds_exception(throw_index_out_of_bounds_exception) 95 , _index(index) 96 { 97 assert(info != NULL, "must have info"); 98 _info = new CodeEmitInfo(info); 99 } 100 101 102 void RangeCheckStub::emit_code(LIR_Assembler* ce) { 103 __ bind(_entry); 104 if (_info->deoptimize_on_exception()) { 105 address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id); 106 __ call(RuntimeAddress(a)); 107 ce->add_call_info_here(_info); 108 ce->verify_oop_map(_info); 109 debug_only(__ should_not_reach_here()); 110 return; 111 } 112 113 // pass the array index on stack because all registers must be preserved 114 if (_index->is_cpu_register()) { 115 ce->store_parameter(_index->as_register(), 0); 116 } else { 117 ce->store_parameter(_index->as_jint(), 0); 118 } 119 Runtime1::StubID stub_id; 120 if (_throw_index_out_of_bounds_exception) { 121 stub_id = Runtime1::throw_index_exception_id; 122 } else { 123 stub_id = Runtime1::throw_range_check_failed_id; 124 } 125 __ call(RuntimeAddress(Runtime1::entry_for(stub_id))); 126 ce->add_call_info_here(_info); 127 ce->verify_oop_map(_info); 128 debug_only(__ should_not_reach_here()); 129 } 130 131 PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) { 132 _info = new CodeEmitInfo(info); 133 } 134 135 void PredicateFailedStub::emit_code(LIR_Assembler* ce) { 136 __ bind(_entry); 137 address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id); 138 __ call(RuntimeAddress(a)); 139 ce->add_call_info_here(_info); 140 ce->verify_oop_map(_info); 141 debug_only(__ should_not_reach_here()); 142 } 143 144 void DivByZeroStub::emit_code(LIR_Assembler* ce) { 145 if (_offset != -1) { 146 ce->compilation()->implicit_exception_table()->append(_offset, __ offset()); 147 } 148 __ bind(_entry); 149 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::throw_div0_exception_id))); 150 ce->add_call_info_here(_info); 151 debug_only(__ should_not_reach_here()); 152 } 153 154 155 // Implementation of NewInstanceStub 156 157 NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) { 158 _result = result; 159 _klass = klass; 160 _klass_reg = klass_reg; 161 _info = new CodeEmitInfo(info); 162 assert(stub_id == Runtime1::new_instance_id || 163 stub_id == Runtime1::fast_new_instance_id || 164 stub_id == Runtime1::fast_new_instance_init_check_id, 165 "need new_instance id"); 166 _stub_id = stub_id; 167 } 168 169 170 void NewInstanceStub::emit_code(LIR_Assembler* ce) { 171 assert(__ rsp_offset() == 0, "frame size should be fixed"); 172 __ bind(_entry); 173 __ movptr(rdx, _klass_reg->as_register()); 174 __ call(RuntimeAddress(Runtime1::entry_for(_stub_id))); 175 ce->add_call_info_here(_info); 176 ce->verify_oop_map(_info); 177 assert(_result->as_register() == rax, "result must in rax,"); 178 __ jmp(_continuation); 179 } 180 181 182 // Implementation of NewTypeArrayStub 183 184 NewTypeArrayStub::NewTypeArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) { 185 _klass_reg = klass_reg; 186 _length = length; 187 _result = result; 188 _info = new CodeEmitInfo(info); 189 } 190 191 192 void NewTypeArrayStub::emit_code(LIR_Assembler* ce) { 193 assert(__ rsp_offset() == 0, "frame size should be fixed"); 194 __ bind(_entry); 195 assert(_length->as_register() == rbx, "length must in rbx,"); 196 assert(_klass_reg->as_register() == rdx, "klass_reg must in rdx"); 197 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_type_array_id))); 198 ce->add_call_info_here(_info); 199 ce->verify_oop_map(_info); 200 assert(_result->as_register() == rax, "result must in rax,"); 201 __ jmp(_continuation); 202 } 203 204 205 // Implementation of NewObjectArrayStub 206 207 NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) { 208 _klass_reg = klass_reg; 209 _result = result; 210 _length = length; 211 _info = new CodeEmitInfo(info); 212 } 213 214 215 void NewObjectArrayStub::emit_code(LIR_Assembler* ce) { 216 assert(__ rsp_offset() == 0, "frame size should be fixed"); 217 __ bind(_entry); 218 assert(_length->as_register() == rbx, "length must in rbx,"); 219 assert(_klass_reg->as_register() == rdx, "klass_reg must in rdx"); 220 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_object_array_id))); 221 ce->add_call_info_here(_info); 222 ce->verify_oop_map(_info); 223 assert(_result->as_register() == rax, "result must in rax,"); 224 __ jmp(_continuation); 225 } 226 227 228 // Implementation of MonitorAccessStubs 229 230 MonitorEnterStub::MonitorEnterStub(LIR_Opr obj_reg, LIR_Opr lock_reg, CodeEmitInfo* info) 231 : MonitorAccessStub(obj_reg, lock_reg) 232 { 233 _info = new CodeEmitInfo(info); 234 } 235 236 237 void MonitorEnterStub::emit_code(LIR_Assembler* ce) { 238 assert(__ rsp_offset() == 0, "frame size should be fixed"); 239 __ bind(_entry); 240 ce->store_parameter(_obj_reg->as_register(), 1); 241 ce->store_parameter(_lock_reg->as_register(), 0); 242 Runtime1::StubID enter_id; 243 if (ce->compilation()->has_fpu_code()) { 244 enter_id = Runtime1::monitorenter_id; 245 } else { 246 enter_id = Runtime1::monitorenter_nofpu_id; 247 } 248 __ call(RuntimeAddress(Runtime1::entry_for(enter_id))); 249 ce->add_call_info_here(_info); 250 ce->verify_oop_map(_info); 251 __ jmp(_continuation); 252 } 253 254 255 void MonitorExitStub::emit_code(LIR_Assembler* ce) { 256 __ bind(_entry); 257 if (_compute_lock) { 258 // lock_reg was destroyed by fast unlocking attempt => recompute it 259 ce->monitor_address(_monitor_ix, _lock_reg); 260 } 261 ce->store_parameter(_lock_reg->as_register(), 0); 262 // note: non-blocking leaf routine => no call info needed 263 Runtime1::StubID exit_id; 264 if (ce->compilation()->has_fpu_code()) { 265 exit_id = Runtime1::monitorexit_id; 266 } else { 267 exit_id = Runtime1::monitorexit_nofpu_id; 268 } 269 __ call(RuntimeAddress(Runtime1::entry_for(exit_id))); 270 __ jmp(_continuation); 271 } 272 273 274 // Implementation of patching: 275 // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes) 276 // - Replace original code with a call to the stub 277 // At Runtime: 278 // - call to stub, jump to runtime 279 // - in runtime: preserve all registers (rspecially objects, i.e., source and destination object) 280 // - in runtime: after initializing class, restore original code, reexecute instruction 281 282 int PatchingStub::_patch_info_offset = -NativeGeneralJump::instruction_size; 283 284 void PatchingStub::align_patch_site(MacroAssembler* masm) { 285 // We're patching a 5-7 byte instruction on intel and we need to 286 // make sure that we don't see a piece of the instruction. It 287 // appears mostly impossible on Intel to simply invalidate other 288 // processors caches and since they may do aggressive prefetch it's 289 // very hard to make a guess about what code might be in the icache. 290 // Force the instruction to be double word aligned so that it 291 // doesn't span a cache line. 292 masm->align(round_to(NativeGeneralJump::instruction_size, wordSize)); 293 } 294 295 void PatchingStub::emit_code(LIR_Assembler* ce) { 296 assert(NativeCall::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF, "not enough room for call"); 297 298 Label call_patch; 299 300 // static field accesses have special semantics while the class 301 // initializer is being run so we emit a test which can be used to 302 // check that this code is being executed by the initializing 303 // thread. 304 address being_initialized_entry = __ pc(); 305 if (CommentedAssembly) { 306 __ block_comment(" patch template"); 307 } 308 if (_id == load_klass_id) { 309 // produce a copy of the load klass instruction for use by the being initialized case 310 #ifdef ASSERT 311 address start = __ pc(); 312 #endif 313 Metadata* o = NULL; 314 __ mov_metadata(_obj, o); 315 #ifdef ASSERT 316 for (int i = 0; i < _bytes_to_copy; i++) { 317 address ptr = (address)(_pc_start + i); 318 int a_byte = (*ptr) & 0xFF; 319 assert(a_byte == *start++, "should be the same code"); 320 } 321 #endif 322 } else if (_id == load_mirror_id) { 323 // produce a copy of the load mirror instruction for use by the being 324 // initialized case 325 #ifdef ASSERT 326 address start = __ pc(); 327 #endif 328 jobject o = NULL; 329 __ movoop(_obj, o); 330 #ifdef ASSERT 331 for (int i = 0; i < _bytes_to_copy; i++) { 332 address ptr = (address)(_pc_start + i); 333 int a_byte = (*ptr) & 0xFF; 334 assert(a_byte == *start++, "should be the same code"); 335 } 336 #endif 337 } else { 338 // make a copy the code which is going to be patched. 339 for (int i = 0; i < _bytes_to_copy; i++) { 340 address ptr = (address)(_pc_start + i); 341 int a_byte = (*ptr) & 0xFF; 342 __ emit_int8(a_byte); 343 *ptr = 0x90; // make the site look like a nop 344 } 345 } 346 347 address end_of_patch = __ pc(); 348 int bytes_to_skip = 0; 349 if (_id == load_mirror_id) { 350 int offset = __ offset(); 351 if (CommentedAssembly) { 352 __ block_comment(" being_initialized check"); 353 } 354 assert(_obj != noreg, "must be a valid register"); 355 Register tmp = rax; 356 Register tmp2 = rbx; 357 __ push(tmp); 358 __ push(tmp2); 359 // Load without verification to keep code size small. We need it because 360 // begin_initialized_entry_offset has to fit in a byte. Also, we know it's not null. 361 __ movptr(tmp2, Address(_obj, java_lang_Class::klass_offset_in_bytes())); 362 __ get_thread(tmp); 363 __ cmpptr(tmp, Address(tmp2, InstanceKlass::init_thread_offset())); 364 __ pop(tmp2); 365 __ pop(tmp); 366 __ jcc(Assembler::notEqual, call_patch); 367 368 // access_field patches may execute the patched code before it's 369 // copied back into place so we need to jump back into the main 370 // code of the nmethod to continue execution. 371 __ jmp(_patch_site_continuation); 372 373 // make sure this extra code gets skipped 374 bytes_to_skip += __ offset() - offset; 375 } 376 if (CommentedAssembly) { 377 __ block_comment("patch data encoded as movl"); 378 } 379 // Now emit the patch record telling the runtime how to find the 380 // pieces of the patch. We only need 3 bytes but for readability of 381 // the disassembly we make the data look like a movl reg, imm32, 382 // which requires 5 bytes 383 int sizeof_patch_record = 5; 384 bytes_to_skip += sizeof_patch_record; 385 386 // emit the offsets needed to find the code to patch 387 int being_initialized_entry_offset = __ pc() - being_initialized_entry + sizeof_patch_record; 388 389 __ emit_int8((unsigned char)0xB8); 390 __ emit_int8(0); 391 __ emit_int8(being_initialized_entry_offset); 392 __ emit_int8(bytes_to_skip); 393 __ emit_int8(_bytes_to_copy); 394 address patch_info_pc = __ pc(); 395 assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info"); 396 397 address entry = __ pc(); 398 NativeGeneralJump::insert_unconditional((address)_pc_start, entry); 399 address target = NULL; 400 relocInfo::relocType reloc_type = relocInfo::none; 401 switch (_id) { 402 case access_field_id: target = Runtime1::entry_for(Runtime1::access_field_patching_id); break; 403 case load_klass_id: target = Runtime1::entry_for(Runtime1::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break; 404 case load_mirror_id: target = Runtime1::entry_for(Runtime1::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break; 405 case load_appendix_id: target = Runtime1::entry_for(Runtime1::load_appendix_patching_id); reloc_type = relocInfo::oop_type; break; 406 default: ShouldNotReachHere(); 407 } 408 __ bind(call_patch); 409 410 if (CommentedAssembly) { 411 __ block_comment("patch entry point"); 412 } 413 __ call(RuntimeAddress(target)); 414 assert(_patch_info_offset == (patch_info_pc - __ pc()), "must not change"); 415 ce->add_call_info_here(_info); 416 int jmp_off = __ offset(); 417 __ jmp(_patch_site_entry); 418 // Add enough nops so deoptimization can overwrite the jmp above with a call 419 // and not destroy the world. 420 for (int j = __ offset() ; j < jmp_off + 5 ; j++ ) { 421 __ nop(); 422 } 423 if (_id == load_klass_id || _id == load_mirror_id || _id == load_appendix_id) { 424 CodeSection* cs = __ code_section(); 425 RelocIterator iter(cs, (address)_pc_start, (address)(_pc_start + 1)); 426 relocInfo::change_reloc_info_for_address(&iter, (address) _pc_start, reloc_type, relocInfo::none); 427 } 428 } 429 430 431 void DeoptimizeStub::emit_code(LIR_Assembler* ce) { 432 __ bind(_entry); 433 ce->store_parameter(_trap_request, 0); 434 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::deoptimize_id))); 435 ce->add_call_info_here(_info); 436 DEBUG_ONLY(__ should_not_reach_here()); 437 } 438 439 440 void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) { 441 address a; 442 if (_info->deoptimize_on_exception()) { 443 // Deoptimize, do not throw the exception, because it is probably wrong to do it here. 444 a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id); 445 } else { 446 a = Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id); 447 } 448 449 ce->compilation()->implicit_exception_table()->append(_offset, __ offset()); 450 __ bind(_entry); 451 __ call(RuntimeAddress(a)); 452 ce->add_call_info_here(_info); 453 ce->verify_oop_map(_info); 454 debug_only(__ should_not_reach_here()); 455 } 456 457 458 void SimpleExceptionStub::emit_code(LIR_Assembler* ce) { 459 assert(__ rsp_offset() == 0, "frame size should be fixed"); 460 461 __ bind(_entry); 462 // pass the object on stack because all registers must be preserved 463 if (_obj->is_cpu_register()) { 464 ce->store_parameter(_obj->as_register(), 0); 465 } 466 __ call(RuntimeAddress(Runtime1::entry_for(_stub))); 467 ce->add_call_info_here(_info); 468 debug_only(__ should_not_reach_here()); 469 } 470 471 472 void ArrayCopyStub::emit_code(LIR_Assembler* ce) { 473 //---------------slow case: call to native----------------- 474 __ bind(_entry); 475 // Figure out where the args should go 476 // This should really convert the IntrinsicID to the Method* and signature 477 // but I don't know how to do that. 478 // 479 VMRegPair args[5]; 480 BasicType signature[5] = { T_OBJECT, T_INT, T_OBJECT, T_INT, T_INT}; 481 SharedRuntime::java_calling_convention(signature, args, 5, true); 482 483 // push parameters 484 // (src, src_pos, dest, destPos, length) 485 Register r[5]; 486 r[0] = src()->as_register(); 487 r[1] = src_pos()->as_register(); 488 r[2] = dst()->as_register(); 489 r[3] = dst_pos()->as_register(); 490 r[4] = length()->as_register(); 491 492 // next registers will get stored on the stack 493 for (int i = 0; i < 5 ; i++ ) { 494 VMReg r_1 = args[i].first(); 495 if (r_1->is_stack()) { 496 int st_off = r_1->reg2stack() * wordSize; 497 __ movptr (Address(rsp, st_off), r[i]); 498 } else { 499 assert(r[i] == args[i].first()->as_Register(), "Wrong register for arg "); 500 } 501 } 502 503 ce->align_call(lir_static_call); 504 505 ce->emit_static_call_stub(); 506 AddressLiteral resolve(SharedRuntime::get_resolve_static_call_stub(), 507 relocInfo::static_call_type); 508 __ call(resolve); 509 ce->add_call_info_here(info()); 510 511 #ifndef PRODUCT 512 __ incrementl(ExternalAddress((address)&Runtime1::_arraycopy_slowcase_cnt)); 513 #endif 514 515 __ jmp(_continuation); 516 } 517 518 ///////////////////////////////////////////////////////////////////////////// 519 #if INCLUDE_ALL_GCS 520 521 void G1PreBarrierStub::emit_code(LIR_Assembler* ce) { 522 // At this point we know that marking is in progress. 523 // If do_load() is true then we have to emit the 524 // load of the previous value; otherwise it has already 525 // been loaded into _pre_val. 526 527 __ bind(_entry); 528 assert(pre_val()->is_register(), "Precondition."); 529 530 Register pre_val_reg = pre_val()->as_register(); 531 532 if (do_load()) { 533 ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false /*wide*/, false /*unaligned*/); 534 } 535 536 __ cmpptr(pre_val_reg, (int32_t) NULL_WORD); 537 __ jcc(Assembler::equal, _continuation); 538 ce->store_parameter(pre_val()->as_register(), 0); 539 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::g1_pre_barrier_slow_id))); 540 __ jmp(_continuation); 541 542 } 543 544 void G1PostBarrierStub::emit_code(LIR_Assembler* ce) { 545 __ bind(_entry); 546 assert(addr()->is_register(), "Precondition."); 547 assert(new_val()->is_register(), "Precondition."); 548 Register new_val_reg = new_val()->as_register(); 549 __ cmpptr(new_val_reg, (int32_t) NULL_WORD); 550 __ jcc(Assembler::equal, _continuation); 551 ce->store_parameter(addr()->as_pointer_register(), 0); 552 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::g1_post_barrier_slow_id))); 553 __ jmp(_continuation); 554 } 555 556 #endif // INCLUDE_ALL_GCS 557 ///////////////////////////////////////////////////////////////////////////// 558 559 #undef __