1 /*
   2  * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "c1/c1_CodeStubs.hpp"
  27 #include "c1/c1_FrameMap.hpp"
  28 #include "c1/c1_LIRAssembler.hpp"
  29 #include "c1/c1_MacroAssembler.hpp"
  30 #include "c1/c1_Runtime1.hpp"
  31 #include "nativeInst_x86.hpp"
  32 #include "runtime/sharedRuntime.hpp"
  33 #include "utilities/macros.hpp"
  34 #include "vmreg_x86.inline.hpp"
  35 
  36 #define __ ce->masm()->
  37 
  38 float ConversionStub::float_zero = 0.0;
  39 double ConversionStub::double_zero = 0.0;
  40 
  41 void ConversionStub::emit_code(LIR_Assembler* ce) {
  42   __ bind(_entry);
  43   assert(bytecode() == Bytecodes::_f2i || bytecode() == Bytecodes::_d2i, "other conversions do not require stub");
  44 
  45 
  46   if (input()->is_single_xmm()) {
  47     __ comiss(input()->as_xmm_float_reg(),
  48               ExternalAddress((address)&float_zero));
  49   } else if (input()->is_double_xmm()) {
  50     __ comisd(input()->as_xmm_double_reg(),
  51               ExternalAddress((address)&double_zero));
  52   } else {
  53     LP64_ONLY(ShouldNotReachHere());
  54     __ push(rax);
  55     __ ftst();
  56     __ fnstsw_ax();
  57     __ sahf();
  58     __ pop(rax);
  59   }
  60 
  61   Label NaN, do_return;
  62   __ jccb(Assembler::parity, NaN);
  63   __ jccb(Assembler::below, do_return);
  64 
  65   // input is > 0 -> return maxInt
  66   // result register already contains 0x80000000, so subtracting 1 gives 0x7fffffff
  67   __ decrement(result()->as_register());
  68   __ jmpb(do_return);
  69 
  70   // input is NaN -> return 0
  71   __ bind(NaN);
  72   __ xorptr(result()->as_register(), result()->as_register());
  73 
  74   __ bind(do_return);
  75   __ jmp(_continuation);
  76 }
  77 
  78 void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
  79   __ bind(_entry);
  80   Metadata *m = _method->as_constant_ptr()->as_metadata();
  81   ce->store_parameter(m, 1);
  82   ce->store_parameter(_bci, 0);
  83   __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::counter_overflow_id)));
  84   ce->add_call_info_here(_info);
  85   ce->verify_oop_map(_info);
  86   __ jmp(_continuation);
  87 }
  88 
  89 RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index,
  90                                bool throw_index_out_of_bounds_exception)
  91   : _throw_index_out_of_bounds_exception(throw_index_out_of_bounds_exception)
  92   , _index(index)
  93 {
  94   assert(info != NULL, "must have info");
  95   _info = new CodeEmitInfo(info);
  96 }
  97 
  98 
  99 void RangeCheckStub::emit_code(LIR_Assembler* ce) {
 100   __ bind(_entry);
 101   if (_info->deoptimize_on_exception()) {
 102     address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
 103     __ call(RuntimeAddress(a));
 104     ce->add_call_info_here(_info);
 105     ce->verify_oop_map(_info);
 106     debug_only(__ should_not_reach_here());
 107     return;
 108   }
 109 
 110   // pass the array index on stack because all registers must be preserved
 111   if (_index->is_cpu_register()) {
 112     ce->store_parameter(_index->as_register(), 0);
 113   } else {
 114     ce->store_parameter(_index->as_jint(), 0);
 115   }
 116   Runtime1::StubID stub_id;
 117   if (_throw_index_out_of_bounds_exception) {
 118     stub_id = Runtime1::throw_index_exception_id;
 119   } else {
 120     stub_id = Runtime1::throw_range_check_failed_id;
 121   }
 122   __ call(RuntimeAddress(Runtime1::entry_for(stub_id)));
 123   ce->add_call_info_here(_info);
 124   ce->verify_oop_map(_info);
 125   debug_only(__ should_not_reach_here());
 126 }
 127 
 128 PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) {
 129   _info = new CodeEmitInfo(info);
 130 }
 131 
 132 void PredicateFailedStub::emit_code(LIR_Assembler* ce) {
 133   __ bind(_entry);
 134   address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
 135   __ call(RuntimeAddress(a));
 136   ce->add_call_info_here(_info);
 137   ce->verify_oop_map(_info);
 138   debug_only(__ should_not_reach_here());
 139 }
 140 
 141 void DivByZeroStub::emit_code(LIR_Assembler* ce) {
 142   if (_offset != -1) {
 143     ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
 144   }
 145   __ bind(_entry);
 146   __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::throw_div0_exception_id)));
 147   ce->add_call_info_here(_info);
 148   debug_only(__ should_not_reach_here());
 149 }
 150 
 151 
 152 // Implementation of NewInstanceStub
 153 
 154 NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) {
 155   _result = result;
 156   _klass = klass;
 157   _klass_reg = klass_reg;
 158   _info = new CodeEmitInfo(info);
 159   assert(stub_id == Runtime1::new_instance_id                 ||
 160          stub_id == Runtime1::fast_new_instance_id            ||
 161          stub_id == Runtime1::fast_new_instance_init_check_id,
 162          "need new_instance id");
 163   _stub_id   = stub_id;
 164 }
 165 
 166 
 167 void NewInstanceStub::emit_code(LIR_Assembler* ce) {
 168   assert(__ rsp_offset() == 0, "frame size should be fixed");
 169   __ bind(_entry);
 170   __ movptr(rdx, _klass_reg->as_register());
 171   __ call(RuntimeAddress(Runtime1::entry_for(_stub_id)));
 172   ce->add_call_info_here(_info);
 173   ce->verify_oop_map(_info);
 174   assert(_result->as_register() == rax, "result must in rax,");
 175   __ jmp(_continuation);
 176 }
 177 
 178 
 179 // Implementation of NewTypeArrayStub
 180 
 181 NewTypeArrayStub::NewTypeArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
 182   _klass_reg = klass_reg;
 183   _length = length;
 184   _result = result;
 185   _info = new CodeEmitInfo(info);
 186 }
 187 
 188 
 189 void NewTypeArrayStub::emit_code(LIR_Assembler* ce) {
 190   assert(__ rsp_offset() == 0, "frame size should be fixed");
 191   __ bind(_entry);
 192   assert(_length->as_register() == rbx, "length must in rbx,");
 193   assert(_klass_reg->as_register() == rdx, "klass_reg must in rdx");
 194   __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_type_array_id)));
 195   ce->add_call_info_here(_info);
 196   ce->verify_oop_map(_info);
 197   assert(_result->as_register() == rax, "result must in rax,");
 198   __ jmp(_continuation);
 199 }
 200 
 201 
 202 // Implementation of NewObjectArrayStub
 203 
 204 NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
 205   _klass_reg = klass_reg;
 206   _result = result;
 207   _length = length;
 208   _info = new CodeEmitInfo(info);
 209 }
 210 
 211 
 212 void NewObjectArrayStub::emit_code(LIR_Assembler* ce) {
 213   assert(__ rsp_offset() == 0, "frame size should be fixed");
 214   __ bind(_entry);
 215   assert(_length->as_register() == rbx, "length must in rbx,");
 216   assert(_klass_reg->as_register() == rdx, "klass_reg must in rdx");
 217   __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_object_array_id)));
 218   ce->add_call_info_here(_info);
 219   ce->verify_oop_map(_info);
 220   assert(_result->as_register() == rax, "result must in rax,");
 221   __ jmp(_continuation);
 222 }
 223 
 224 
 225 // Implementation of MonitorAccessStubs
 226 
 227 MonitorEnterStub::MonitorEnterStub(LIR_Opr obj_reg, LIR_Opr lock_reg, CodeEmitInfo* info)
 228 : MonitorAccessStub(obj_reg, lock_reg)
 229 {
 230   _info = new CodeEmitInfo(info);
 231 }
 232 
 233 
 234 void MonitorEnterStub::emit_code(LIR_Assembler* ce) {
 235   assert(__ rsp_offset() == 0, "frame size should be fixed");
 236   __ bind(_entry);
 237   ce->store_parameter(_obj_reg->as_register(),  1);
 238   ce->store_parameter(_lock_reg->as_register(), 0);
 239   Runtime1::StubID enter_id;
 240   if (ce->compilation()->has_fpu_code()) {
 241     enter_id = Runtime1::monitorenter_id;
 242   } else {
 243     enter_id = Runtime1::monitorenter_nofpu_id;
 244   }
 245   __ call(RuntimeAddress(Runtime1::entry_for(enter_id)));
 246   ce->add_call_info_here(_info);
 247   ce->verify_oop_map(_info);
 248   __ jmp(_continuation);
 249 }
 250 
 251 
 252 void MonitorExitStub::emit_code(LIR_Assembler* ce) {
 253   __ bind(_entry);
 254   if (_compute_lock) {
 255     // lock_reg was destroyed by fast unlocking attempt => recompute it
 256     ce->monitor_address(_monitor_ix, _lock_reg);
 257   }
 258   ce->store_parameter(_lock_reg->as_register(), 0);
 259   // note: non-blocking leaf routine => no call info needed
 260   Runtime1::StubID exit_id;
 261   if (ce->compilation()->has_fpu_code()) {
 262     exit_id = Runtime1::monitorexit_id;
 263   } else {
 264     exit_id = Runtime1::monitorexit_nofpu_id;
 265   }
 266   __ call(RuntimeAddress(Runtime1::entry_for(exit_id)));
 267   __ jmp(_continuation);
 268 }
 269 
 270 
 271 // Implementation of patching:
 272 // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes)
 273 // - Replace original code with a call to the stub
 274 // At Runtime:
 275 // - call to stub, jump to runtime
 276 // - in runtime: preserve all registers (rspecially objects, i.e., source and destination object)
 277 // - in runtime: after initializing class, restore original code, reexecute instruction
 278 
 279 int PatchingStub::_patch_info_offset = -NativeGeneralJump::instruction_size;
 280 
 281 void PatchingStub::align_patch_site(MacroAssembler* masm) {
 282   // We're patching a 5-7 byte instruction on intel and we need to
 283   // make sure that we don't see a piece of the instruction.  It
 284   // appears mostly impossible on Intel to simply invalidate other
 285   // processors caches and since they may do aggressive prefetch it's
 286   // very hard to make a guess about what code might be in the icache.
 287   // Force the instruction to be double word aligned so that it
 288   // doesn't span a cache line.
 289   masm->align(round_to(NativeGeneralJump::instruction_size, wordSize));
 290 }
 291 
 292 void PatchingStub::emit_code(LIR_Assembler* ce) {
 293   assert(NativeCall::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF, "not enough room for call");
 294 
 295   Label call_patch;
 296 
 297   // static field accesses have special semantics while the class
 298   // initializer is being run so we emit a test which can be used to
 299   // check that this code is being executed by the initializing
 300   // thread.
 301   address being_initialized_entry = __ pc();
 302   if (CommentedAssembly) {
 303     __ block_comment(" patch template");
 304   }
 305   if (_id == load_klass_id) {
 306     // produce a copy of the load klass instruction for use by the being initialized case
 307 #ifdef ASSERT
 308     address start = __ pc();
 309 #endif
 310     Metadata* o = NULL;
 311     __ mov_metadata(_obj, o);
 312 #ifdef ASSERT
 313     for (int i = 0; i < _bytes_to_copy; i++) {
 314       address ptr = (address)(_pc_start + i);
 315       int a_byte = (*ptr) & 0xFF;
 316       assert(a_byte == *start++, "should be the same code");
 317     }
 318 #endif
 319   } else if (_id == load_mirror_id) {
 320     // produce a copy of the load mirror instruction for use by the being
 321     // initialized case
 322 #ifdef ASSERT
 323     address start = __ pc();
 324 #endif
 325     jobject o = NULL;
 326     __ movoop(_obj, o);
 327 #ifdef ASSERT
 328     for (int i = 0; i < _bytes_to_copy; i++) {
 329       address ptr = (address)(_pc_start + i);
 330       int a_byte = (*ptr) & 0xFF;
 331       assert(a_byte == *start++, "should be the same code");
 332     }
 333 #endif
 334   } else {
 335     // make a copy the code which is going to be patched.
 336     for (int i = 0; i < _bytes_to_copy; i++) {
 337       address ptr = (address)(_pc_start + i);
 338       int a_byte = (*ptr) & 0xFF;
 339       __ emit_int8(a_byte);
 340       *ptr = 0x90; // make the site look like a nop
 341     }
 342   }
 343 
 344   address end_of_patch = __ pc();
 345   int bytes_to_skip = 0;
 346   if (_id == load_mirror_id) {
 347     int offset = __ offset();
 348     if (CommentedAssembly) {
 349       __ block_comment(" being_initialized check");
 350     }
 351     assert(_obj != noreg, "must be a valid register");
 352     Register tmp = rax;
 353     Register tmp2 = rbx;
 354     __ push(tmp);
 355     __ push(tmp2);
 356     // Load without verification to keep code size small. We need it because
 357     // begin_initialized_entry_offset has to fit in a byte. Also, we know it's not null.
 358     __ movptr(tmp2, Address(_obj, java_lang_Class::klass_offset_in_bytes()));
 359     __ get_thread(tmp);
 360     __ cmpptr(tmp, Address(tmp2, InstanceKlass::init_thread_offset()));
 361     __ pop(tmp2);
 362     __ pop(tmp);
 363     __ jcc(Assembler::notEqual, call_patch);
 364 
 365     // access_field patches may execute the patched code before it's
 366     // copied back into place so we need to jump back into the main
 367     // code of the nmethod to continue execution.
 368     __ jmp(_patch_site_continuation);
 369 
 370     // make sure this extra code gets skipped
 371     bytes_to_skip += __ offset() - offset;
 372   }
 373   if (CommentedAssembly) {
 374     __ block_comment("patch data encoded as movl");
 375   }
 376   // Now emit the patch record telling the runtime how to find the
 377   // pieces of the patch.  We only need 3 bytes but for readability of
 378   // the disassembly we make the data look like a movl reg, imm32,
 379   // which requires 5 bytes
 380   int sizeof_patch_record = 5;
 381   bytes_to_skip += sizeof_patch_record;
 382 
 383   // emit the offsets needed to find the code to patch
 384   int being_initialized_entry_offset = __ pc() - being_initialized_entry + sizeof_patch_record;
 385 
 386   __ emit_int8((unsigned char)0xB8);
 387   __ emit_int8(0);
 388   __ emit_int8(being_initialized_entry_offset);
 389   __ emit_int8(bytes_to_skip);
 390   __ emit_int8(_bytes_to_copy);
 391   address patch_info_pc = __ pc();
 392   assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info");
 393 
 394   address entry = __ pc();
 395   NativeGeneralJump::insert_unconditional((address)_pc_start, entry);
 396   address target = NULL;
 397   relocInfo::relocType reloc_type = relocInfo::none;
 398   switch (_id) {
 399     case access_field_id:  target = Runtime1::entry_for(Runtime1::access_field_patching_id); break;
 400     case load_klass_id:    target = Runtime1::entry_for(Runtime1::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break;
 401     case load_mirror_id:   target = Runtime1::entry_for(Runtime1::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break;
 402     case load_appendix_id:      target = Runtime1::entry_for(Runtime1::load_appendix_patching_id); reloc_type = relocInfo::oop_type; break;
 403     default: ShouldNotReachHere();
 404   }
 405   __ bind(call_patch);
 406 
 407   if (CommentedAssembly) {
 408     __ block_comment("patch entry point");
 409   }
 410   __ call(RuntimeAddress(target));
 411   assert(_patch_info_offset == (patch_info_pc - __ pc()), "must not change");
 412   ce->add_call_info_here(_info);
 413   int jmp_off = __ offset();
 414   __ jmp(_patch_site_entry);
 415   // Add enough nops so deoptimization can overwrite the jmp above with a call
 416   // and not destroy the world. We cannot use fat nops here, since the concurrent
 417   // code rewrite may transiently create the illegal instruction sequence.
 418   for (int j = __ offset() ; j < jmp_off + 5 ; j++ ) {
 419     __ nop();
 420   }
 421   if (_id == load_klass_id || _id == load_mirror_id || _id == load_appendix_id) {
 422     CodeSection* cs = __ code_section();
 423     RelocIterator iter(cs, (address)_pc_start, (address)(_pc_start + 1));
 424     relocInfo::change_reloc_info_for_address(&iter, (address) _pc_start, reloc_type, relocInfo::none);
 425   }
 426 }
 427 
 428 
 429 void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
 430   __ bind(_entry);
 431   ce->store_parameter(_trap_request, 0);
 432   __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::deoptimize_id)));
 433   ce->add_call_info_here(_info);
 434   DEBUG_ONLY(__ should_not_reach_here());
 435 }
 436 
 437 
 438 void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
 439   address a;
 440   if (_info->deoptimize_on_exception()) {
 441     // Deoptimize, do not throw the exception, because it is probably wrong to do it here.
 442     a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
 443   } else {
 444     a = Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id);
 445   }
 446 
 447   ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
 448   __ bind(_entry);
 449   __ call(RuntimeAddress(a));
 450   ce->add_call_info_here(_info);
 451   ce->verify_oop_map(_info);
 452   debug_only(__ should_not_reach_here());
 453 }
 454 
 455 
 456 void SimpleExceptionStub::emit_code(LIR_Assembler* ce) {
 457   assert(__ rsp_offset() == 0, "frame size should be fixed");
 458 
 459   __ bind(_entry);
 460   // pass the object on stack because all registers must be preserved
 461   if (_obj->is_cpu_register()) {
 462     ce->store_parameter(_obj->as_register(), 0);
 463   }
 464   __ call(RuntimeAddress(Runtime1::entry_for(_stub)));
 465   ce->add_call_info_here(_info);
 466   debug_only(__ should_not_reach_here());
 467 }
 468 
 469 
 470 void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
 471   //---------------slow case: call to native-----------------
 472   __ bind(_entry);
 473   // Figure out where the args should go
 474   // This should really convert the IntrinsicID to the Method* and signature
 475   // but I don't know how to do that.
 476   //
 477   VMRegPair args[5];
 478   BasicType signature[5] = { T_OBJECT, T_INT, T_OBJECT, T_INT, T_INT};
 479   SharedRuntime::java_calling_convention(signature, args, 5, true);
 480 
 481   // push parameters
 482   // (src, src_pos, dest, destPos, length)
 483   Register r[5];
 484   r[0] = src()->as_register();
 485   r[1] = src_pos()->as_register();
 486   r[2] = dst()->as_register();
 487   r[3] = dst_pos()->as_register();
 488   r[4] = length()->as_register();
 489 
 490   // next registers will get stored on the stack
 491   for (int i = 0; i < 5 ; i++ ) {
 492     VMReg r_1 = args[i].first();
 493     if (r_1->is_stack()) {
 494       int st_off = r_1->reg2stack() * wordSize;
 495       __ movptr (Address(rsp, st_off), r[i]);
 496     } else {
 497       assert(r[i] == args[i].first()->as_Register(), "Wrong register for arg ");
 498     }
 499   }
 500 
 501   ce->align_call(lir_static_call);
 502 
 503   ce->emit_static_call_stub();
 504   if (ce->compilation()->bailed_out()) {
 505     return; // CodeCache is full
 506   }
 507   AddressLiteral resolve(SharedRuntime::get_resolve_static_call_stub(),
 508                          relocInfo::static_call_type);
 509   __ call(resolve);
 510   ce->add_call_info_here(info());
 511 
 512 #ifndef PRODUCT
 513   __ incrementl(ExternalAddress((address)&Runtime1::_arraycopy_slowcase_cnt));
 514 #endif
 515 
 516   __ jmp(_continuation);
 517 }
 518 
 519 #undef __