1 /*
   2  * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "c1/c1_CodeStubs.hpp"
  27 #include "c1/c1_FrameMap.hpp"
  28 #include "c1/c1_LIRAssembler.hpp"
  29 #include "c1/c1_MacroAssembler.hpp"
  30 #include "c1/c1_Runtime1.hpp"
  31 #include "nativeInst_x86.hpp"
  32 #include "runtime/sharedRuntime.hpp"
  33 #include "utilities/align.hpp"
  34 #include "utilities/macros.hpp"
  35 #include "vmreg_x86.inline.hpp"
  36 
  37 
  38 #define __ ce->masm()->
  39 
  40 float ConversionStub::float_zero = 0.0;
  41 double ConversionStub::double_zero = 0.0;
  42 
  43 void ConversionStub::emit_code(LIR_Assembler* ce) {
  44   __ bind(_entry);
  45   assert(bytecode() == Bytecodes::_f2i || bytecode() == Bytecodes::_d2i, "other conversions do not require stub");
  46 
  47 
  48   if (input()->is_single_xmm()) {
  49     __ comiss(input()->as_xmm_float_reg(),
  50               ExternalAddress((address)&float_zero));
  51   } else if (input()->is_double_xmm()) {
  52     __ comisd(input()->as_xmm_double_reg(),
  53               ExternalAddress((address)&double_zero));
  54   } else {
  55     LP64_ONLY(ShouldNotReachHere());
  56     __ push(rax);
  57     __ ftst();
  58     __ fnstsw_ax();
  59     __ sahf();
  60     __ pop(rax);
  61   }
  62 
  63   Label NaN, do_return;
  64   __ jccb(Assembler::parity, NaN);
  65   __ jccb(Assembler::below, do_return);
  66 
  67   // input is > 0 -> return maxInt
  68   // result register already contains 0x80000000, so subtracting 1 gives 0x7fffffff
  69   __ decrement(result()->as_register());
  70   __ jmpb(do_return);
  71 
  72   // input is NaN -> return 0
  73   __ bind(NaN);
  74   __ xorptr(result()->as_register(), result()->as_register());
  75 
  76   __ bind(do_return);
  77   __ jmp(_continuation);
  78 }
  79 
  80 void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
  81   __ bind(_entry);
  82   Metadata *m = _method->as_constant_ptr()->as_metadata();
  83   ce->store_parameter(m, 1);
  84   ce->store_parameter(_bci, 0);
  85   __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::counter_overflow_id)));
  86   ce->add_call_info_here(_info);
  87   ce->verify_oop_map(_info);
  88   __ jmp(_continuation);
  89 }
  90 
  91 RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index,
  92                                bool throw_index_out_of_bounds_exception)
  93   : _throw_index_out_of_bounds_exception(throw_index_out_of_bounds_exception)
  94   , _index(index)
  95 {
  96   assert(info != NULL, "must have info");
  97   _info = new CodeEmitInfo(info);
  98 }
  99 
 100 
 101 void RangeCheckStub::emit_code(LIR_Assembler* ce) {
 102   __ bind(_entry);
 103   if (_info->deoptimize_on_exception()) {
 104     address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
 105     __ call(RuntimeAddress(a));
 106     ce->add_call_info_here(_info);
 107     ce->verify_oop_map(_info);
 108     debug_only(__ should_not_reach_here());
 109     return;
 110   }
 111 
 112   // pass the array index on stack because all registers must be preserved
 113   if (_index->is_cpu_register()) {
 114     ce->store_parameter(_index->as_register(), 0);
 115   } else {
 116     ce->store_parameter(_index->as_jint(), 0);
 117   }
 118   Runtime1::StubID stub_id;
 119   if (_throw_index_out_of_bounds_exception) {
 120     stub_id = Runtime1::throw_index_exception_id;
 121   } else {
 122     stub_id = Runtime1::throw_range_check_failed_id;
 123   }
 124   __ call(RuntimeAddress(Runtime1::entry_for(stub_id)));
 125   ce->add_call_info_here(_info);
 126   ce->verify_oop_map(_info);
 127   debug_only(__ should_not_reach_here());
 128 }
 129 
 130 PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) {
 131   _info = new CodeEmitInfo(info);
 132 }
 133 
 134 void PredicateFailedStub::emit_code(LIR_Assembler* ce) {
 135   __ bind(_entry);
 136   address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
 137   __ call(RuntimeAddress(a));
 138   ce->add_call_info_here(_info);
 139   ce->verify_oop_map(_info);
 140   debug_only(__ should_not_reach_here());
 141 }
 142 
 143 void DivByZeroStub::emit_code(LIR_Assembler* ce) {
 144   if (_offset != -1) {
 145     ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
 146   }
 147   __ bind(_entry);
 148   __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::throw_div0_exception_id)));
 149   ce->add_call_info_here(_info);
 150   debug_only(__ should_not_reach_here());
 151 }
 152 
 153 
 154 // Implementation of NewInstanceStub
 155 
 156 NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) {
 157   _result = result;
 158   _klass = klass;
 159   _klass_reg = klass_reg;
 160   _info = new CodeEmitInfo(info);
 161   assert(stub_id == Runtime1::new_instance_id                 ||
 162          stub_id == Runtime1::fast_new_instance_id            ||
 163          stub_id == Runtime1::fast_new_instance_init_check_id,
 164          "need new_instance id");
 165   _stub_id   = stub_id;
 166 }
 167 
 168 
 169 void NewInstanceStub::emit_code(LIR_Assembler* ce) {
 170   assert(__ rsp_offset() == 0, "frame size should be fixed");
 171   __ bind(_entry);
 172   __ movptr(rdx, _klass_reg->as_register());
 173   __ call(RuntimeAddress(Runtime1::entry_for(_stub_id)));
 174   ce->add_call_info_here(_info);
 175   ce->verify_oop_map(_info);
 176   assert(_result->as_register() == rax, "result must in rax,");
 177   __ jmp(_continuation);
 178 }
 179 
 180 
 181 // Implementation of NewTypeArrayStub
 182 
 183 NewTypeArrayStub::NewTypeArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
 184   _klass_reg = klass_reg;
 185   _length = length;
 186   _result = result;
 187   _info = new CodeEmitInfo(info);
 188 }
 189 
 190 
 191 void NewTypeArrayStub::emit_code(LIR_Assembler* ce) {
 192   assert(__ rsp_offset() == 0, "frame size should be fixed");
 193   __ bind(_entry);
 194   assert(_length->as_register() == rbx, "length must in rbx,");
 195   assert(_klass_reg->as_register() == rdx, "klass_reg must in rdx");
 196   __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_type_array_id)));
 197   ce->add_call_info_here(_info);
 198   ce->verify_oop_map(_info);
 199   assert(_result->as_register() == rax, "result must in rax,");
 200   __ jmp(_continuation);
 201 }
 202 
 203 
 204 // Implementation of NewObjectArrayStub
 205 
 206 NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
 207   _klass_reg = klass_reg;
 208   _result = result;
 209   _length = length;
 210   _info = new CodeEmitInfo(info);
 211 }
 212 
 213 
 214 void NewObjectArrayStub::emit_code(LIR_Assembler* ce) {
 215   assert(__ rsp_offset() == 0, "frame size should be fixed");
 216   __ bind(_entry);
 217   assert(_length->as_register() == rbx, "length must in rbx,");
 218   assert(_klass_reg->as_register() == rdx, "klass_reg must in rdx");
 219   __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_object_array_id)));
 220   ce->add_call_info_here(_info);
 221   ce->verify_oop_map(_info);
 222   assert(_result->as_register() == rax, "result must in rax,");
 223   __ jmp(_continuation);
 224 }
 225 
 226 
 227 // Implementation of MonitorAccessStubs
 228 
 229 MonitorEnterStub::MonitorEnterStub(LIR_Opr obj_reg, LIR_Opr lock_reg, CodeEmitInfo* info)
 230 : MonitorAccessStub(obj_reg, lock_reg)
 231 {
 232   _info = new CodeEmitInfo(info);
 233 }
 234 
 235 
 236 void MonitorEnterStub::emit_code(LIR_Assembler* ce) {
 237   assert(__ rsp_offset() == 0, "frame size should be fixed");
 238   __ bind(_entry);
 239   ce->store_parameter(_obj_reg->as_register(),  1);
 240   ce->store_parameter(_lock_reg->as_register(), 0);
 241   Runtime1::StubID enter_id;
 242   if (ce->compilation()->has_fpu_code()) {
 243     enter_id = Runtime1::monitorenter_id;
 244   } else {
 245     enter_id = Runtime1::monitorenter_nofpu_id;
 246   }
 247   __ call(RuntimeAddress(Runtime1::entry_for(enter_id)));
 248   ce->add_call_info_here(_info);
 249   ce->verify_oop_map(_info);
 250   __ jmp(_continuation);
 251 }
 252 
 253 
 254 void MonitorExitStub::emit_code(LIR_Assembler* ce) {
 255   __ bind(_entry);
 256   if (_compute_lock) {
 257     // lock_reg was destroyed by fast unlocking attempt => recompute it
 258     ce->monitor_address(_monitor_ix, _lock_reg);
 259   }
 260   ce->store_parameter(_lock_reg->as_register(), 0);
 261   // note: non-blocking leaf routine => no call info needed
 262   Runtime1::StubID exit_id;
 263   if (ce->compilation()->has_fpu_code()) {
 264     exit_id = Runtime1::monitorexit_id;
 265   } else {
 266     exit_id = Runtime1::monitorexit_nofpu_id;
 267   }
 268   __ call(RuntimeAddress(Runtime1::entry_for(exit_id)));
 269   __ jmp(_continuation);
 270 }
 271 
 272 
 273 // Implementation of patching:
 274 // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes)
 275 // - Replace original code with a call to the stub
 276 // At Runtime:
 277 // - call to stub, jump to runtime
 278 // - in runtime: preserve all registers (rspecially objects, i.e., source and destination object)
 279 // - in runtime: after initializing class, restore original code, reexecute instruction
 280 
 281 int PatchingStub::_patch_info_offset = -NativeGeneralJump::instruction_size;
 282 
 283 void PatchingStub::align_patch_site(MacroAssembler* masm) {
 284   // We're patching a 5-7 byte instruction on intel and we need to
 285   // make sure that we don't see a piece of the instruction.  It
 286   // appears mostly impossible on Intel to simply invalidate other
 287   // processors caches and since they may do aggressive prefetch it's
 288   // very hard to make a guess about what code might be in the icache.
 289   // Force the instruction to be double word aligned so that it
 290   // doesn't span a cache line.
 291   masm->align(align_up((int)NativeGeneralJump::instruction_size, wordSize));
 292 }
 293 
 294 void PatchingStub::emit_code(LIR_Assembler* ce) {
 295   assert(NativeCall::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF, "not enough room for call");
 296 
 297   Label call_patch;
 298 
 299   // static field accesses have special semantics while the class
 300   // initializer is being run so we emit a test which can be used to
 301   // check that this code is being executed by the initializing
 302   // thread.
 303   address being_initialized_entry = __ pc();
 304   if (CommentedAssembly) {
 305     __ block_comment(" patch template");
 306   }
 307   if (_id == load_klass_id) {
 308     // produce a copy of the load klass instruction for use by the being initialized case
 309 #ifdef ASSERT
 310     address start = __ pc();
 311 #endif
 312     Metadata* o = NULL;
 313     __ mov_metadata(_obj, o);
 314 #ifdef ASSERT
 315     for (int i = 0; i < _bytes_to_copy; i++) {
 316       address ptr = (address)(_pc_start + i);
 317       int a_byte = (*ptr) & 0xFF;
 318       assert(a_byte == *start++, "should be the same code");
 319     }
 320 #endif
 321   } else if (_id == load_mirror_id) {
 322     // produce a copy of the load mirror instruction for use by the being
 323     // initialized case
 324 #ifdef ASSERT
 325     address start = __ pc();
 326 #endif
 327     jobject o = NULL;
 328     __ movoop(_obj, o);
 329 #ifdef ASSERT
 330     for (int i = 0; i < _bytes_to_copy; i++) {
 331       address ptr = (address)(_pc_start + i);
 332       int a_byte = (*ptr) & 0xFF;
 333       assert(a_byte == *start++, "should be the same code");
 334     }
 335 #endif
 336   } else {
 337     // make a copy the code which is going to be patched.
 338     for (int i = 0; i < _bytes_to_copy; i++) {
 339       address ptr = (address)(_pc_start + i);
 340       int a_byte = (*ptr) & 0xFF;
 341       __ emit_int8(a_byte);
 342       *ptr = 0x90; // make the site look like a nop
 343     }
 344   }
 345 
 346   address end_of_patch = __ pc();
 347   int bytes_to_skip = 0;
 348   if (_id == load_mirror_id) {
 349     int offset = __ offset();
 350     if (CommentedAssembly) {
 351       __ block_comment(" being_initialized check");
 352     }
 353     assert(_obj != noreg, "must be a valid register");
 354     Register tmp = rax;
 355     Register tmp2 = rbx;
 356     __ push(tmp);
 357     __ push(tmp2);
 358     // Load without verification to keep code size small. We need it because
 359     // begin_initialized_entry_offset has to fit in a byte. Also, we know it's not null.
 360     __ movptr(tmp2, Address(_obj, java_lang_Class::klass_offset_in_bytes()));
 361     __ get_thread(tmp);
 362     __ cmpptr(tmp, Address(tmp2, InstanceKlass::init_thread_offset()));
 363     __ pop(tmp2);
 364     __ pop(tmp);
 365     __ jcc(Assembler::notEqual, call_patch);
 366 
 367     // access_field patches may execute the patched code before it's
 368     // copied back into place so we need to jump back into the main
 369     // code of the nmethod to continue execution.
 370     __ jmp(_patch_site_continuation);
 371 
 372     // make sure this extra code gets skipped
 373     bytes_to_skip += __ offset() - offset;
 374   }
 375   if (CommentedAssembly) {
 376     __ block_comment("patch data encoded as movl");
 377   }
 378   // Now emit the patch record telling the runtime how to find the
 379   // pieces of the patch.  We only need 3 bytes but for readability of
 380   // the disassembly we make the data look like a movl reg, imm32,
 381   // which requires 5 bytes
 382   int sizeof_patch_record = 5;
 383   bytes_to_skip += sizeof_patch_record;
 384 
 385   // emit the offsets needed to find the code to patch
 386   int being_initialized_entry_offset = __ pc() - being_initialized_entry + sizeof_patch_record;
 387 
 388   __ emit_int8((unsigned char)0xB8);
 389   __ emit_int8(0);
 390   __ emit_int8(being_initialized_entry_offset);
 391   __ emit_int8(bytes_to_skip);
 392   __ emit_int8(_bytes_to_copy);
 393   address patch_info_pc = __ pc();
 394   assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info");
 395 
 396   address entry = __ pc();
 397   NativeGeneralJump::insert_unconditional((address)_pc_start, entry);
 398   address target = NULL;
 399   relocInfo::relocType reloc_type = relocInfo::none;
 400   switch (_id) {
 401     case access_field_id:  target = Runtime1::entry_for(Runtime1::access_field_patching_id); break;
 402     case load_klass_id:    target = Runtime1::entry_for(Runtime1::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break;
 403     case load_mirror_id:   target = Runtime1::entry_for(Runtime1::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break;
 404     case load_appendix_id:      target = Runtime1::entry_for(Runtime1::load_appendix_patching_id); reloc_type = relocInfo::oop_type; break;
 405     default: ShouldNotReachHere();
 406   }
 407   __ bind(call_patch);
 408 
 409   if (CommentedAssembly) {
 410     __ block_comment("patch entry point");
 411   }
 412   __ call(RuntimeAddress(target));
 413   assert(_patch_info_offset == (patch_info_pc - __ pc()), "must not change");
 414   ce->add_call_info_here(_info);
 415   int jmp_off = __ offset();
 416   __ jmp(_patch_site_entry);
 417   // Add enough nops so deoptimization can overwrite the jmp above with a call
 418   // and not destroy the world. We cannot use fat nops here, since the concurrent
 419   // code rewrite may transiently create the illegal instruction sequence.
 420   for (int j = __ offset() ; j < jmp_off + 5 ; j++ ) {
 421     __ nop();
 422   }
 423   if (_id == load_klass_id || _id == load_mirror_id || _id == load_appendix_id) {
 424     CodeSection* cs = __ code_section();
 425     RelocIterator iter(cs, (address)_pc_start, (address)(_pc_start + 1));
 426     relocInfo::change_reloc_info_for_address(&iter, (address) _pc_start, reloc_type, relocInfo::none);
 427   }
 428 }
 429 
 430 
 431 void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
 432   __ bind(_entry);
 433   ce->store_parameter(_trap_request, 0);
 434   __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::deoptimize_id)));
 435   ce->add_call_info_here(_info);
 436   DEBUG_ONLY(__ should_not_reach_here());
 437 }
 438 
 439 
 440 void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
 441   address a;
 442   if (_info->deoptimize_on_exception()) {
 443     // Deoptimize, do not throw the exception, because it is probably wrong to do it here.
 444     a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
 445   } else {
 446     a = Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id);
 447   }
 448 
 449   ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
 450   __ bind(_entry);
 451   __ call(RuntimeAddress(a));
 452   ce->add_call_info_here(_info);
 453   ce->verify_oop_map(_info);
 454   debug_only(__ should_not_reach_here());
 455 }
 456 
 457 
 458 void SimpleExceptionStub::emit_code(LIR_Assembler* ce) {
 459   assert(__ rsp_offset() == 0, "frame size should be fixed");
 460 
 461   __ bind(_entry);
 462   // pass the object on stack because all registers must be preserved
 463   if (_obj->is_cpu_register()) {
 464     ce->store_parameter(_obj->as_register(), 0);
 465   }
 466   __ call(RuntimeAddress(Runtime1::entry_for(_stub)));
 467   ce->add_call_info_here(_info);
 468   debug_only(__ should_not_reach_here());
 469 }
 470 
 471 
 472 void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
 473   //---------------slow case: call to native-----------------
 474   __ bind(_entry);
 475   // Figure out where the args should go
 476   // This should really convert the IntrinsicID to the Method* and signature
 477   // but I don't know how to do that.
 478   //
 479   VMRegPair args[5];
 480   BasicType signature[5] = { T_OBJECT, T_INT, T_OBJECT, T_INT, T_INT};
 481   SharedRuntime::java_calling_convention(signature, args, 5, true);
 482 
 483   // push parameters
 484   // (src, src_pos, dest, destPos, length)
 485   Register r[5];
 486   r[0] = src()->as_register();
 487   r[1] = src_pos()->as_register();
 488   r[2] = dst()->as_register();
 489   r[3] = dst_pos()->as_register();
 490   r[4] = length()->as_register();
 491 
 492   // next registers will get stored on the stack
 493   for (int i = 0; i < 5 ; i++ ) {
 494     VMReg r_1 = args[i].first();
 495     if (r_1->is_stack()) {
 496       int st_off = r_1->reg2stack() * wordSize;
 497       __ movptr (Address(rsp, st_off), r[i]);
 498     } else {
 499       assert(r[i] == args[i].first()->as_Register(), "Wrong register for arg ");
 500     }
 501   }
 502 
 503   ce->align_call(lir_static_call);
 504 
 505   ce->emit_static_call_stub();
 506   if (ce->compilation()->bailed_out()) {
 507     return; // CodeCache is full
 508   }
 509   AddressLiteral resolve(SharedRuntime::get_resolve_static_call_stub(),
 510                          relocInfo::static_call_type);
 511   __ call(resolve);
 512   ce->add_call_info_here(info());
 513 
 514 #ifndef PRODUCT
 515   __ incrementl(ExternalAddress((address)&Runtime1::_arraycopy_slowcase_cnt));
 516 #endif
 517 
 518   __ jmp(_continuation);
 519 }
 520 
 521 #undef __