1 /*
   2  * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "c1/c1_CodeStubs.hpp"
  27 #include "c1/c1_FrameMap.hpp"
  28 #include "c1/c1_LIRAssembler.hpp"
  29 #include "c1/c1_MacroAssembler.hpp"
  30 #include "c1/c1_Runtime1.hpp"
  31 #include "nativeInst_x86.hpp"
  32 #include "oops/objArrayKlass.hpp"
  33 #include "runtime/sharedRuntime.hpp"
  34 #include "utilities/align.hpp"
  35 #include "utilities/macros.hpp"
  36 #include "vmreg_x86.inline.hpp"
  37 
  38 
  39 #define __ ce->masm()->
  40 
  41 float ConversionStub::float_zero = 0.0;
  42 double ConversionStub::double_zero = 0.0;
  43 
  44 void ConversionStub::emit_code(LIR_Assembler* ce) {
  45   __ bind(_entry);
  46   assert(bytecode() == Bytecodes::_f2i || bytecode() == Bytecodes::_d2i, "other conversions do not require stub");
  47 
  48 
  49   if (input()->is_single_xmm()) {
  50     __ comiss(input()->as_xmm_float_reg(),
  51               ExternalAddress((address)&float_zero));
  52   } else if (input()->is_double_xmm()) {
  53     __ comisd(input()->as_xmm_double_reg(),
  54               ExternalAddress((address)&double_zero));
  55   } else {
  56     LP64_ONLY(ShouldNotReachHere());
  57     __ push(rax);
  58     __ ftst();
  59     __ fnstsw_ax();
  60     __ sahf();
  61     __ pop(rax);
  62   }
  63 
  64   Label NaN, do_return;
  65   __ jccb(Assembler::parity, NaN);
  66   __ jccb(Assembler::below, do_return);
  67 
  68   // input is > 0 -> return maxInt
  69   // result register already contains 0x80000000, so subtracting 1 gives 0x7fffffff
  70   __ decrement(result()->as_register());
  71   __ jmpb(do_return);
  72 
  73   // input is NaN -> return 0
  74   __ bind(NaN);
  75   __ xorptr(result()->as_register(), result()->as_register());
  76 
  77   __ bind(do_return);
  78   __ jmp(_continuation);
  79 }
  80 
  81 void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
  82   __ bind(_entry);
  83   Metadata *m = _method->as_constant_ptr()->as_metadata();
  84   ce->store_parameter(m, 1);
  85   ce->store_parameter(_bci, 0);
  86   __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::counter_overflow_id)));
  87   ce->add_call_info_here(_info);
  88   ce->verify_oop_map(_info);
  89   __ jmp(_continuation);
  90 }
  91 
  92 RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index, LIR_Opr array)
  93   : _index(index), _array(array), _throw_index_out_of_bounds_exception(false) {
  94   assert(info != NULL, "must have info");
  95   _info = new CodeEmitInfo(info);
  96 }
  97 
  98 RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index)
  99   : _index(index), _array(NULL), _throw_index_out_of_bounds_exception(true) {
 100   assert(info != NULL, "must have info");
 101   _info = new CodeEmitInfo(info);
 102 }
 103 
 104 void RangeCheckStub::emit_code(LIR_Assembler* ce) {
 105   __ bind(_entry);
 106   if (_info->deoptimize_on_exception()) {
 107     address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
 108     __ call(RuntimeAddress(a));
 109     ce->add_call_info_here(_info);
 110     ce->verify_oop_map(_info);
 111     debug_only(__ should_not_reach_here());
 112     return;
 113   }
 114 
 115   // pass the array index on stack because all registers must be preserved
 116   if (_index->is_cpu_register()) {
 117     ce->store_parameter(_index->as_register(), 0);
 118   } else {
 119     ce->store_parameter(_index->as_jint(), 0);
 120   }
 121   Runtime1::StubID stub_id;
 122   if (_throw_index_out_of_bounds_exception) {
 123     stub_id = Runtime1::throw_index_exception_id;
 124   } else {
 125     stub_id = Runtime1::throw_range_check_failed_id;
 126     ce->store_parameter(_array->as_pointer_register(), 1);
 127   }
 128   __ call(RuntimeAddress(Runtime1::entry_for(stub_id)));
 129   ce->add_call_info_here(_info);
 130   ce->verify_oop_map(_info);
 131   debug_only(__ should_not_reach_here());
 132 }
 133 
 134 PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) {
 135   _info = new CodeEmitInfo(info);
 136 }
 137 
 138 void PredicateFailedStub::emit_code(LIR_Assembler* ce) {
 139   __ bind(_entry);
 140   address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
 141   __ call(RuntimeAddress(a));
 142   ce->add_call_info_here(_info);
 143   ce->verify_oop_map(_info);
 144   debug_only(__ should_not_reach_here());
 145 }
 146 
 147 void DivByZeroStub::emit_code(LIR_Assembler* ce) {
 148   if (_offset != -1) {
 149     ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
 150   }
 151   __ bind(_entry);
 152   __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::throw_div0_exception_id)));
 153   ce->add_call_info_here(_info);
 154   debug_only(__ should_not_reach_here());
 155 }
 156 
 157 
 158 // Implementation of LoadFlattenedArrayStub
 159 
 160 LoadFlattenedArrayStub::LoadFlattenedArrayStub(LIR_Opr array, LIR_Opr index, LIR_Opr result, CodeEmitInfo* info) {
 161   _array = array;
 162   _index = index;
 163   _result = result;
 164   _info = new CodeEmitInfo(info);
 165 }
 166 
 167 void LoadFlattenedArrayStub::visit(LIR_OpVisitState* visitor) {
 168   visitor->do_slow_case(_info);
 169   visitor->do_input(_array);
 170   visitor->do_input(_index);
 171   visitor->do_output(_result);
 172 
 173   // Tell the register allocator that the runtime call will scratch rax.
 174   visitor->do_output(FrameMap::rax_oop_opr);

 175 }
 176 
 177 void LoadFlattenedArrayStub::emit_code(LIR_Assembler* ce) {
 178   assert(__ rsp_offset() == 0, "frame size should be fixed");
 179   __ bind(_entry);
 180   ce->store_parameter(_array->as_register(), 1);
 181   ce->store_parameter(_index->as_register(), 0);
 182   __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::load_flattened_array_id)));
 183   ce->add_call_info_here(_info);
 184   ce->verify_oop_map(_info);
 185   if (_result->as_register() != rax) {
 186     __ movptr(_result->as_register(), rax);
 187   }
 188   __ jmp(_continuation);
 189 }
 190 
 191 
 192 // Implementation of StoreFlattenedArrayStub
 193 
 194 StoreFlattenedArrayStub::StoreFlattenedArrayStub(LIR_Opr array, LIR_Opr index, LIR_Opr value, CodeEmitInfo* info) {
 195   _array = array;
 196   _index = index;
 197   _value = value;
 198   _info = new CodeEmitInfo(info);
 199 }
 200 
 201 
 202 void StoreFlattenedArrayStub::emit_code(LIR_Assembler* ce) {
 203   assert(__ rsp_offset() == 0, "frame size should be fixed");
 204   __ bind(_entry);
 205   ce->store_parameter(_array->as_register(), 2);
 206   ce->store_parameter(_index->as_register(), 1);
 207   ce->store_parameter(_value->as_register(), 0);
 208   __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::store_flattened_array_id)));
 209   ce->add_call_info_here(_info);
 210   ce->verify_oop_map(_info);
 211   __ jmp(_continuation);
 212 }
 213 
 214 
 215 // Implementation of NewInstanceStub
 216 
 217 NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) {
 218   _result = result;
 219   _klass = klass;
 220   _klass_reg = klass_reg;
 221   _info = new CodeEmitInfo(info);
 222   assert(stub_id == Runtime1::new_instance_id                 ||
 223          stub_id == Runtime1::fast_new_instance_id            ||
 224          stub_id == Runtime1::fast_new_instance_init_check_id,
 225          "need new_instance id");
 226   _stub_id   = stub_id;
 227 }
 228 
 229 
 230 void NewInstanceStub::emit_code(LIR_Assembler* ce) {
 231   assert(__ rsp_offset() == 0, "frame size should be fixed");
 232   __ bind(_entry);
 233   __ movptr(rdx, _klass_reg->as_register());
 234   __ call(RuntimeAddress(Runtime1::entry_for(_stub_id)));
 235   ce->add_call_info_here(_info);
 236   ce->verify_oop_map(_info);
 237   assert(_result->as_register() == rax, "result must in rax,");
 238   __ jmp(_continuation);
 239 }
 240 
 241 
 242 // Implementation of NewTypeArrayStub
 243 
 244 NewTypeArrayStub::NewTypeArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
 245   _klass_reg = klass_reg;
 246   _length = length;
 247   _result = result;
 248   _info = new CodeEmitInfo(info);
 249 }
 250 
 251 
 252 void NewTypeArrayStub::emit_code(LIR_Assembler* ce) {
 253   assert(__ rsp_offset() == 0, "frame size should be fixed");
 254   __ bind(_entry);
 255   assert(_length->as_register() == rbx, "length must in rbx,");
 256   assert(_klass_reg->as_register() == rdx, "klass_reg must in rdx");
 257   __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_type_array_id)));
 258   ce->add_call_info_here(_info);
 259   ce->verify_oop_map(_info);
 260   assert(_result->as_register() == rax, "result must in rax,");
 261   __ jmp(_continuation);
 262 }
 263 
 264 
 265 // Implementation of NewObjectArrayStub
 266 
 267 NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result,
 268                                        CodeEmitInfo* info, bool is_value_type) {
 269   _klass_reg = klass_reg;
 270   _result = result;
 271   _length = length;
 272   _info = new CodeEmitInfo(info);
 273   _is_value_type = is_value_type;
 274 }
 275 
 276 
 277 void NewObjectArrayStub::emit_code(LIR_Assembler* ce) {
 278   assert(__ rsp_offset() == 0, "frame size should be fixed");
 279   __ bind(_entry);
 280   assert(_length->as_register() == rbx, "length must in rbx,");
 281   assert(_klass_reg->as_register() == rdx, "klass_reg must in rdx");
 282   if (_is_value_type) {
 283     __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_value_array_id)));
 284   } else {
 285     __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_object_array_id)));
 286   }
 287   ce->add_call_info_here(_info);
 288   ce->verify_oop_map(_info);
 289   assert(_result->as_register() == rax, "result must in rax,");
 290   __ jmp(_continuation);
 291 }
 292 
 293 
 294 // Implementation of MonitorAccessStubs
 295 
 296 MonitorEnterStub::MonitorEnterStub(LIR_Opr obj_reg, LIR_Opr lock_reg, CodeEmitInfo* info, CodeStub* throw_imse_stub, LIR_Opr scratch_reg)
 297 : MonitorAccessStub(obj_reg, lock_reg)
 298 {
 299   _info = new CodeEmitInfo(info);
 300   _throw_imse_stub = throw_imse_stub;
 301   _scratch_reg = scratch_reg;
 302   if (_throw_imse_stub != NULL) {
 303     assert(_scratch_reg != LIR_OprFact::illegalOpr, "must be");
 304   }
 305 }
 306 
 307 
 308 void MonitorEnterStub::emit_code(LIR_Assembler* ce) {
 309   assert(__ rsp_offset() == 0, "frame size should be fixed");
 310   __ bind(_entry);
 311   if (_throw_imse_stub != NULL) {
 312     // When we come here, _obj_reg has already been checked to be non-null.
 313     Register mark = _scratch_reg->as_register();
 314     __ movptr(mark, Address(_obj_reg->as_register(), oopDesc::mark_offset_in_bytes()));
 315     __ testl(mark, markOopDesc::always_locked_pattern);
 316     __ jcc(Assembler::notZero, *_throw_imse_stub->entry());
 317   }
 318   ce->store_parameter(_obj_reg->as_register(),  1);
 319   ce->store_parameter(_lock_reg->as_register(), 0);
 320   Runtime1::StubID enter_id;
 321   if (ce->compilation()->has_fpu_code()) {
 322     enter_id = Runtime1::monitorenter_id;
 323   } else {
 324     enter_id = Runtime1::monitorenter_nofpu_id;
 325   }
 326   __ call(RuntimeAddress(Runtime1::entry_for(enter_id)));
 327   ce->add_call_info_here(_info);
 328   ce->verify_oop_map(_info);
 329   __ jmp(_continuation);
 330 }
 331 
 332 
 333 void MonitorExitStub::emit_code(LIR_Assembler* ce) {
 334   __ bind(_entry);
 335   if (_compute_lock) {
 336     // lock_reg was destroyed by fast unlocking attempt => recompute it
 337     ce->monitor_address(_monitor_ix, _lock_reg);
 338   }
 339   ce->store_parameter(_lock_reg->as_register(), 0);
 340   // note: non-blocking leaf routine => no call info needed
 341   Runtime1::StubID exit_id;
 342   if (ce->compilation()->has_fpu_code()) {
 343     exit_id = Runtime1::monitorexit_id;
 344   } else {
 345     exit_id = Runtime1::monitorexit_nofpu_id;
 346   }
 347   __ call(RuntimeAddress(Runtime1::entry_for(exit_id)));
 348   __ jmp(_continuation);
 349 }
 350 
 351 
 352 // Implementation of patching:
 353 // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes)
 354 // - Replace original code with a call to the stub
 355 // At Runtime:
 356 // - call to stub, jump to runtime
 357 // - in runtime: preserve all registers (rspecially objects, i.e., source and destination object)
 358 // - in runtime: after initializing class, restore original code, reexecute instruction
 359 
 360 int PatchingStub::_patch_info_offset = -NativeGeneralJump::instruction_size;
 361 
 362 void PatchingStub::align_patch_site(MacroAssembler* masm) {
 363   // We're patching a 5-7 byte instruction on intel and we need to
 364   // make sure that we don't see a piece of the instruction.  It
 365   // appears mostly impossible on Intel to simply invalidate other
 366   // processors caches and since they may do aggressive prefetch it's
 367   // very hard to make a guess about what code might be in the icache.
 368   // Force the instruction to be double word aligned so that it
 369   // doesn't span a cache line.
 370   masm->align(align_up((int)NativeGeneralJump::instruction_size, wordSize));
 371 }
 372 
 373 void PatchingStub::emit_code(LIR_Assembler* ce) {
 374   assert(NativeCall::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF, "not enough room for call");
 375 
 376   Label call_patch;
 377 
 378   // static field accesses have special semantics while the class
 379   // initializer is being run so we emit a test which can be used to
 380   // check that this code is being executed by the initializing
 381   // thread.
 382   address being_initialized_entry = __ pc();
 383   if (CommentedAssembly) {
 384     __ block_comment(" patch template");
 385   }
 386   if (_id == load_klass_id) {
 387     // produce a copy of the load klass instruction for use by the being initialized case
 388 #ifdef ASSERT
 389     address start = __ pc();
 390 #endif
 391     Metadata* o = NULL;
 392     __ mov_metadata(_obj, o);
 393 #ifdef ASSERT
 394     for (int i = 0; i < _bytes_to_copy; i++) {
 395       address ptr = (address)(_pc_start + i);
 396       int a_byte = (*ptr) & 0xFF;
 397       assert(a_byte == *start++, "should be the same code");
 398     }
 399 #endif
 400   } else if (_id == load_mirror_id) {
 401     // produce a copy of the load mirror instruction for use by the being
 402     // initialized case
 403 #ifdef ASSERT
 404     address start = __ pc();
 405 #endif
 406     jobject o = NULL;
 407     __ movoop(_obj, o);
 408 #ifdef ASSERT
 409     for (int i = 0; i < _bytes_to_copy; i++) {
 410       address ptr = (address)(_pc_start + i);
 411       int a_byte = (*ptr) & 0xFF;
 412       assert(a_byte == *start++, "should be the same code");
 413     }
 414 #endif
 415   } else {
 416     // make a copy the code which is going to be patched.
 417     for (int i = 0; i < _bytes_to_copy; i++) {
 418       address ptr = (address)(_pc_start + i);
 419       int a_byte = (*ptr) & 0xFF;
 420       __ emit_int8(a_byte);
 421       *ptr = 0x90; // make the site look like a nop
 422     }
 423   }
 424 
 425   address end_of_patch = __ pc();
 426   int bytes_to_skip = 0;
 427   if (_id == load_mirror_id) {
 428     int offset = __ offset();
 429     if (CommentedAssembly) {
 430       __ block_comment(" being_initialized check");
 431     }
 432     assert(_obj != noreg, "must be a valid register");
 433     Register tmp = rax;
 434     Register tmp2 = rbx;
 435     __ push(tmp);
 436     __ push(tmp2);
 437     // Load without verification to keep code size small. We need it because
 438     // begin_initialized_entry_offset has to fit in a byte. Also, we know it's not null.
 439     __ movptr(tmp2, Address(_obj, java_lang_Class::klass_offset_in_bytes()));
 440     __ get_thread(tmp);
 441     __ cmpptr(tmp, Address(tmp2, InstanceKlass::init_thread_offset()));
 442     __ pop(tmp2);
 443     __ pop(tmp);
 444     __ jcc(Assembler::notEqual, call_patch);
 445 
 446     // access_field patches may execute the patched code before it's
 447     // copied back into place so we need to jump back into the main
 448     // code of the nmethod to continue execution.
 449     __ jmp(_patch_site_continuation);
 450 
 451     // make sure this extra code gets skipped
 452     bytes_to_skip += __ offset() - offset;
 453   }
 454   if (CommentedAssembly) {
 455     __ block_comment("patch data encoded as movl");
 456   }
 457   // Now emit the patch record telling the runtime how to find the
 458   // pieces of the patch.  We only need 3 bytes but for readability of
 459   // the disassembly we make the data look like a movl reg, imm32,
 460   // which requires 5 bytes
 461   int sizeof_patch_record = 5;
 462   bytes_to_skip += sizeof_patch_record;
 463 
 464   // emit the offsets needed to find the code to patch
 465   int being_initialized_entry_offset = __ pc() - being_initialized_entry + sizeof_patch_record;
 466 
 467   __ emit_int8((unsigned char)0xB8);
 468   __ emit_int8(0);
 469   __ emit_int8(being_initialized_entry_offset);
 470   __ emit_int8(bytes_to_skip);
 471   __ emit_int8(_bytes_to_copy);
 472   address patch_info_pc = __ pc();
 473   assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info");
 474 
 475   address entry = __ pc();
 476   NativeGeneralJump::insert_unconditional((address)_pc_start, entry);
 477   address target = NULL;
 478   relocInfo::relocType reloc_type = relocInfo::none;
 479   switch (_id) {
 480     case access_field_id:  target = Runtime1::entry_for(Runtime1::access_field_patching_id); break;
 481     case load_klass_id:    target = Runtime1::entry_for(Runtime1::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break;
 482     case load_mirror_id:   target = Runtime1::entry_for(Runtime1::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break;
 483     case load_appendix_id:      target = Runtime1::entry_for(Runtime1::load_appendix_patching_id); reloc_type = relocInfo::oop_type; break;
 484     default: ShouldNotReachHere();
 485   }
 486   __ bind(call_patch);
 487 
 488   if (CommentedAssembly) {
 489     __ block_comment("patch entry point");
 490   }
 491   __ call(RuntimeAddress(target));
 492   assert(_patch_info_offset == (patch_info_pc - __ pc()), "must not change");
 493   ce->add_call_info_here(_info);
 494   int jmp_off = __ offset();
 495   __ jmp(_patch_site_entry);
 496   // Add enough nops so deoptimization can overwrite the jmp above with a call
 497   // and not destroy the world. We cannot use fat nops here, since the concurrent
 498   // code rewrite may transiently create the illegal instruction sequence.
 499   for (int j = __ offset() ; j < jmp_off + 5 ; j++ ) {
 500     __ nop();
 501   }
 502   if (_id == load_klass_id || _id == load_mirror_id || _id == load_appendix_id) {
 503     CodeSection* cs = __ code_section();
 504     RelocIterator iter(cs, (address)_pc_start, (address)(_pc_start + 1));
 505     relocInfo::change_reloc_info_for_address(&iter, (address) _pc_start, reloc_type, relocInfo::none);
 506   }
 507 }
 508 
 509 
 510 void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
 511   __ bind(_entry);
 512   ce->store_parameter(_trap_request, 0);
 513   __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::deoptimize_id)));
 514   ce->add_call_info_here(_info);
 515   DEBUG_ONLY(__ should_not_reach_here());
 516 }
 517 
 518 
 519 void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
 520   address a;
 521   if (_info->deoptimize_on_exception()) {
 522     // Deoptimize, do not throw the exception, because it is probably wrong to do it here.
 523     a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
 524   } else {
 525     a = Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id);
 526   }
 527 
 528   ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
 529   __ bind(_entry);
 530   __ call(RuntimeAddress(a));
 531   ce->add_call_info_here(_info);
 532   ce->verify_oop_map(_info);
 533   debug_only(__ should_not_reach_here());
 534 }
 535 
 536 
 537 void SimpleExceptionStub::emit_code(LIR_Assembler* ce) {
 538   assert(__ rsp_offset() == 0, "frame size should be fixed");
 539 
 540   __ bind(_entry);
 541   // pass the object on stack because all registers must be preserved
 542   if (_obj->is_cpu_register()) {
 543     ce->store_parameter(_obj->as_register(), 0);
 544   }
 545   __ call(RuntimeAddress(Runtime1::entry_for(_stub)));
 546   ce->add_call_info_here(_info);
 547   debug_only(__ should_not_reach_here());
 548 }
 549 
 550 
 551 void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
 552   //---------------slow case: call to native-----------------
 553   __ bind(_entry);
 554   // Figure out where the args should go
 555   // This should really convert the IntrinsicID to the Method* and signature
 556   // but I don't know how to do that.
 557   //
 558   VMRegPair args[5];
 559   BasicType signature[5] = { T_OBJECT, T_INT, T_OBJECT, T_INT, T_INT};
 560   SharedRuntime::java_calling_convention(signature, args, 5, true);
 561 
 562   // push parameters
 563   // (src, src_pos, dest, destPos, length)
 564   Register r[5];
 565   r[0] = src()->as_register();
 566   r[1] = src_pos()->as_register();
 567   r[2] = dst()->as_register();
 568   r[3] = dst_pos()->as_register();
 569   r[4] = length()->as_register();
 570 
 571   // next registers will get stored on the stack
 572   for (int i = 0; i < 5 ; i++ ) {
 573     VMReg r_1 = args[i].first();
 574     if (r_1->is_stack()) {
 575       int st_off = r_1->reg2stack() * wordSize;
 576       __ movptr (Address(rsp, st_off), r[i]);
 577     } else {
 578       assert(r[i] == args[i].first()->as_Register(), "Wrong register for arg ");
 579     }
 580   }
 581 
 582   ce->align_call(lir_static_call);
 583 
 584   ce->emit_static_call_stub();
 585   if (ce->compilation()->bailed_out()) {
 586     return; // CodeCache is full
 587   }
 588   AddressLiteral resolve(SharedRuntime::get_resolve_static_call_stub(),
 589                          relocInfo::static_call_type);
 590   __ call(resolve);
 591   ce->add_call_info_here(info());
 592 
 593 #ifndef PRODUCT
 594   __ incrementl(ExternalAddress((address)&Runtime1::_arraycopy_slowcase_cnt));
 595 #endif
 596 
 597   __ jmp(_continuation);
 598 }
 599 
 600 #undef __
--- EOF ---