1 /*
   2  * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "c1/c1_CodeStubs.hpp"
  27 #include "c1/c1_FrameMap.hpp"
  28 #include "c1/c1_LIRAssembler.hpp"
  29 #include "c1/c1_MacroAssembler.hpp"
  30 #include "c1/c1_Runtime1.hpp"
  31 #include "nativeInst_x86.hpp"
  32 #include "oops/objArrayKlass.hpp"
  33 #include "runtime/sharedRuntime.hpp"
  34 #include "utilities/align.hpp"
  35 #include "utilities/macros.hpp"
  36 #include "vmreg_x86.inline.hpp"
  37 
  38 
  39 #define __ ce->masm()->
  40 
  41 float ConversionStub::float_zero = 0.0;
  42 double ConversionStub::double_zero = 0.0;
  43 
  44 void ConversionStub::emit_code(LIR_Assembler* ce) {
  45   __ bind(_entry);
  46   assert(bytecode() == Bytecodes::_f2i || bytecode() == Bytecodes::_d2i, "other conversions do not require stub");
  47 
  48 
  49   if (input()->is_single_xmm()) {
  50     __ comiss(input()->as_xmm_float_reg(),
  51               ExternalAddress((address)&float_zero));
  52   } else if (input()->is_double_xmm()) {
  53     __ comisd(input()->as_xmm_double_reg(),
  54               ExternalAddress((address)&double_zero));
  55   } else {
  56     LP64_ONLY(ShouldNotReachHere());
  57     __ push(rax);
  58     __ ftst();
  59     __ fnstsw_ax();
  60     __ sahf();
  61     __ pop(rax);
  62   }
  63 
  64   Label NaN, do_return;
  65   __ jccb(Assembler::parity, NaN);
  66   __ jccb(Assembler::below, do_return);
  67 
  68   // input is > 0 -> return maxInt
  69   // result register already contains 0x80000000, so subtracting 1 gives 0x7fffffff
  70   __ decrement(result()->as_register());
  71   __ jmpb(do_return);
  72 
  73   // input is NaN -> return 0
  74   __ bind(NaN);
  75   __ xorptr(result()->as_register(), result()->as_register());
  76 
  77   __ bind(do_return);
  78   __ jmp(_continuation);
  79 }
  80 
  81 void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
  82   __ bind(_entry);
  83   Metadata *m = _method->as_constant_ptr()->as_metadata();
  84   ce->store_parameter(m, 1);
  85   ce->store_parameter(_bci, 0);
  86   __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::counter_overflow_id)));
  87   ce->add_call_info_here(_info);
  88   ce->verify_oop_map(_info);
  89   __ jmp(_continuation);
  90 }
  91 
  92 RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index, LIR_Opr array)
  93   : _index(index), _array(array), _throw_index_out_of_bounds_exception(false) {
  94   assert(info != NULL, "must have info");
  95   _info = new CodeEmitInfo(info);
  96 }
  97 
  98 RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index)
  99   : _index(index), _array(NULL), _throw_index_out_of_bounds_exception(true) {
 100   assert(info != NULL, "must have info");
 101   _info = new CodeEmitInfo(info);
 102 }
 103 
 104 void RangeCheckStub::emit_code(LIR_Assembler* ce) {
 105   __ bind(_entry);
 106   if (_info->deoptimize_on_exception()) {
 107     address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
 108     __ call(RuntimeAddress(a));
 109     ce->add_call_info_here(_info);
 110     ce->verify_oop_map(_info);
 111     debug_only(__ should_not_reach_here());
 112     return;
 113   }
 114 
 115   // pass the array index on stack because all registers must be preserved
 116   if (_index->is_cpu_register()) {
 117     ce->store_parameter(_index->as_register(), 0);
 118   } else {
 119     ce->store_parameter(_index->as_jint(), 0);
 120   }
 121   Runtime1::StubID stub_id;
 122   if (_throw_index_out_of_bounds_exception) {
 123     stub_id = Runtime1::throw_index_exception_id;
 124   } else {
 125     stub_id = Runtime1::throw_range_check_failed_id;
 126     ce->store_parameter(_array->as_pointer_register(), 1);
 127   }
 128   __ call(RuntimeAddress(Runtime1::entry_for(stub_id)));
 129   ce->add_call_info_here(_info);
 130   ce->verify_oop_map(_info);
 131   debug_only(__ should_not_reach_here());
 132 }
 133 
 134 PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) {
 135   _info = new CodeEmitInfo(info);
 136 }
 137 
 138 void PredicateFailedStub::emit_code(LIR_Assembler* ce) {
 139   __ bind(_entry);
 140   address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
 141   __ call(RuntimeAddress(a));
 142   ce->add_call_info_here(_info);
 143   ce->verify_oop_map(_info);
 144   debug_only(__ should_not_reach_here());
 145 }
 146 
 147 void DivByZeroStub::emit_code(LIR_Assembler* ce) {
 148   if (_offset != -1) {
 149     ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
 150   }
 151   __ bind(_entry);
 152   __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::throw_div0_exception_id)));
 153   ce->add_call_info_here(_info);
 154   debug_only(__ should_not_reach_here());
 155 }
 156 
 157 
 158 // Implementation of LoadFlattenedArrayStub
 159 
 160 LoadFlattenedArrayStub::LoadFlattenedArrayStub(LIR_Opr array, LIR_Opr index, LIR_Opr result, CodeEmitInfo* info) {
 161   _array = array;
 162   _index = index;
 163   _result = result;
 164   _info = new CodeEmitInfo(info);
 165 }
 166 
 167 
 168 void LoadFlattenedArrayStub::emit_code(LIR_Assembler* ce) {
 169   assert(__ rsp_offset() == 0, "frame size should be fixed");
 170   __ bind(_entry);
 171   ce->store_parameter(_array->as_register(), 1);
 172   ce->store_parameter(_index->as_register(), 0);
 173   __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::load_flattened_array_id)));
 174   ce->add_call_info_here(_info);
 175   ce->verify_oop_map(_info);
 176   if (_result->as_register() != rax) {
 177     __ movptr(_result->as_register(), rax);
 178   }
 179   __ jmp(_continuation);
 180 }
 181 
 182 
 183 // Implementation of NewInstanceStub
 184 
 185 NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) {
 186   _result = result;
 187   _klass = klass;
 188   _klass_reg = klass_reg;
 189   _info = new CodeEmitInfo(info);
 190   assert(stub_id == Runtime1::new_instance_id                 ||
 191          stub_id == Runtime1::fast_new_instance_id            ||
 192          stub_id == Runtime1::fast_new_instance_init_check_id,
 193          "need new_instance id");
 194   _stub_id   = stub_id;
 195 }
 196 
 197 
 198 void NewInstanceStub::emit_code(LIR_Assembler* ce) {
 199   assert(__ rsp_offset() == 0, "frame size should be fixed");
 200   __ bind(_entry);
 201   __ movptr(rdx, _klass_reg->as_register());
 202   __ call(RuntimeAddress(Runtime1::entry_for(_stub_id)));
 203   ce->add_call_info_here(_info);
 204   ce->verify_oop_map(_info);
 205   assert(_result->as_register() == rax, "result must in rax,");
 206   __ jmp(_continuation);
 207 }
 208 
 209 
 210 // Implementation of NewTypeArrayStub
 211 
 212 NewTypeArrayStub::NewTypeArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
 213   _klass_reg = klass_reg;
 214   _length = length;
 215   _result = result;
 216   _info = new CodeEmitInfo(info);
 217 }
 218 
 219 
 220 void NewTypeArrayStub::emit_code(LIR_Assembler* ce) {
 221   assert(__ rsp_offset() == 0, "frame size should be fixed");
 222   __ bind(_entry);
 223   assert(_length->as_register() == rbx, "length must in rbx,");
 224   assert(_klass_reg->as_register() == rdx, "klass_reg must in rdx");
 225   __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_type_array_id)));
 226   ce->add_call_info_here(_info);
 227   ce->verify_oop_map(_info);
 228   assert(_result->as_register() == rax, "result must in rax,");
 229   __ jmp(_continuation);
 230 }
 231 
 232 
 233 // Implementation of NewObjectArrayStub
 234 
 235 NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result,
 236                                        CodeEmitInfo* info, bool is_value_type) {
 237   _klass_reg = klass_reg;
 238   _result = result;
 239   _length = length;
 240   _info = new CodeEmitInfo(info);
 241   _is_value_type = is_value_type;
 242 }
 243 
 244 
 245 void NewObjectArrayStub::emit_code(LIR_Assembler* ce) {
 246   assert(__ rsp_offset() == 0, "frame size should be fixed");
 247   __ bind(_entry);
 248   assert(_length->as_register() == rbx, "length must in rbx,");
 249   assert(_klass_reg->as_register() == rdx, "klass_reg must in rdx");
 250   if (_is_value_type) {
 251     __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_value_array_id)));
 252   } else {
 253     __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_object_array_id)));
 254   }
 255   ce->add_call_info_here(_info);
 256   ce->verify_oop_map(_info);
 257   assert(_result->as_register() == rax, "result must in rax,");
 258   __ jmp(_continuation);
 259 }
 260 
 261 
 262 // Implementation of MonitorAccessStubs
 263 
 264 MonitorEnterStub::MonitorEnterStub(LIR_Opr obj_reg, LIR_Opr lock_reg, CodeEmitInfo* info, CodeStub* throw_imse_stub, LIR_Opr scratch_reg)
 265 : MonitorAccessStub(obj_reg, lock_reg)
 266 {
 267   _info = new CodeEmitInfo(info);
 268   _throw_imse_stub = throw_imse_stub;
 269   _scratch_reg = scratch_reg;
 270   if (_throw_imse_stub != NULL) {
 271     assert(_scratch_reg != LIR_OprFact::illegalOpr, "must be");
 272   }
 273 }
 274 
 275 
 276 void MonitorEnterStub::emit_code(LIR_Assembler* ce) {
 277   assert(__ rsp_offset() == 0, "frame size should be fixed");
 278   __ bind(_entry);
 279   if (_throw_imse_stub != NULL) {
 280     // When we come here, _obj_reg has already been checked to be non-null.
 281     Register mark = _scratch_reg->as_register();
 282     __ movptr(mark, Address(_obj_reg->as_register(), oopDesc::mark_offset_in_bytes()));
 283     __ testl(mark, markOopDesc::always_locked_pattern);
 284     __ jcc(Assembler::notZero, *_throw_imse_stub->entry());
 285   }
 286   ce->store_parameter(_obj_reg->as_register(),  1);
 287   ce->store_parameter(_lock_reg->as_register(), 0);
 288   Runtime1::StubID enter_id;
 289   if (ce->compilation()->has_fpu_code()) {
 290     enter_id = Runtime1::monitorenter_id;
 291   } else {
 292     enter_id = Runtime1::monitorenter_nofpu_id;
 293   }
 294   __ call(RuntimeAddress(Runtime1::entry_for(enter_id)));
 295   ce->add_call_info_here(_info);
 296   ce->verify_oop_map(_info);
 297   __ jmp(_continuation);
 298 }
 299 
 300 
 301 void MonitorExitStub::emit_code(LIR_Assembler* ce) {
 302   __ bind(_entry);
 303   if (_compute_lock) {
 304     // lock_reg was destroyed by fast unlocking attempt => recompute it
 305     ce->monitor_address(_monitor_ix, _lock_reg);
 306   }
 307   ce->store_parameter(_lock_reg->as_register(), 0);
 308   // note: non-blocking leaf routine => no call info needed
 309   Runtime1::StubID exit_id;
 310   if (ce->compilation()->has_fpu_code()) {
 311     exit_id = Runtime1::monitorexit_id;
 312   } else {
 313     exit_id = Runtime1::monitorexit_nofpu_id;
 314   }
 315   __ call(RuntimeAddress(Runtime1::entry_for(exit_id)));
 316   __ jmp(_continuation);
 317 }
 318 
 319 
 320 // Implementation of patching:
 321 // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes)
 322 // - Replace original code with a call to the stub
 323 // At Runtime:
 324 // - call to stub, jump to runtime
 325 // - in runtime: preserve all registers (rspecially objects, i.e., source and destination object)
 326 // - in runtime: after initializing class, restore original code, reexecute instruction
 327 
 328 int PatchingStub::_patch_info_offset = -NativeGeneralJump::instruction_size;
 329 
 330 void PatchingStub::align_patch_site(MacroAssembler* masm) {
 331   // We're patching a 5-7 byte instruction on intel and we need to
 332   // make sure that we don't see a piece of the instruction.  It
 333   // appears mostly impossible on Intel to simply invalidate other
 334   // processors caches and since they may do aggressive prefetch it's
 335   // very hard to make a guess about what code might be in the icache.
 336   // Force the instruction to be double word aligned so that it
 337   // doesn't span a cache line.
 338   masm->align(align_up((int)NativeGeneralJump::instruction_size, wordSize));
 339 }
 340 
 341 void PatchingStub::emit_code(LIR_Assembler* ce) {
 342   assert(NativeCall::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF, "not enough room for call");
 343 
 344   Label call_patch;
 345 
 346   // static field accesses have special semantics while the class
 347   // initializer is being run so we emit a test which can be used to
 348   // check that this code is being executed by the initializing
 349   // thread.
 350   address being_initialized_entry = __ pc();
 351   if (CommentedAssembly) {
 352     __ block_comment(" patch template");
 353   }
 354   if (_id == load_klass_id) {
 355     // produce a copy of the load klass instruction for use by the being initialized case
 356 #ifdef ASSERT
 357     address start = __ pc();
 358 #endif
 359     Metadata* o = NULL;
 360     __ mov_metadata(_obj, o);
 361 #ifdef ASSERT
 362     for (int i = 0; i < _bytes_to_copy; i++) {
 363       address ptr = (address)(_pc_start + i);
 364       int a_byte = (*ptr) & 0xFF;
 365       assert(a_byte == *start++, "should be the same code");
 366     }
 367 #endif
 368   } else if (_id == load_mirror_id) {
 369     // produce a copy of the load mirror instruction for use by the being
 370     // initialized case
 371 #ifdef ASSERT
 372     address start = __ pc();
 373 #endif
 374     jobject o = NULL;
 375     __ movoop(_obj, o);
 376 #ifdef ASSERT
 377     for (int i = 0; i < _bytes_to_copy; i++) {
 378       address ptr = (address)(_pc_start + i);
 379       int a_byte = (*ptr) & 0xFF;
 380       assert(a_byte == *start++, "should be the same code");
 381     }
 382 #endif
 383   } else {
 384     // make a copy the code which is going to be patched.
 385     for (int i = 0; i < _bytes_to_copy; i++) {
 386       address ptr = (address)(_pc_start + i);
 387       int a_byte = (*ptr) & 0xFF;
 388       __ emit_int8(a_byte);
 389       *ptr = 0x90; // make the site look like a nop
 390     }
 391   }
 392 
 393   address end_of_patch = __ pc();
 394   int bytes_to_skip = 0;
 395   if (_id == load_mirror_id) {
 396     int offset = __ offset();
 397     if (CommentedAssembly) {
 398       __ block_comment(" being_initialized check");
 399     }
 400     assert(_obj != noreg, "must be a valid register");
 401     Register tmp = rax;
 402     Register tmp2 = rbx;
 403     __ push(tmp);
 404     __ push(tmp2);
 405     // Load without verification to keep code size small. We need it because
 406     // begin_initialized_entry_offset has to fit in a byte. Also, we know it's not null.
 407     __ movptr(tmp2, Address(_obj, java_lang_Class::klass_offset_in_bytes()));
 408     __ get_thread(tmp);
 409     __ cmpptr(tmp, Address(tmp2, InstanceKlass::init_thread_offset()));
 410     __ pop(tmp2);
 411     __ pop(tmp);
 412     __ jcc(Assembler::notEqual, call_patch);
 413 
 414     // access_field patches may execute the patched code before it's
 415     // copied back into place so we need to jump back into the main
 416     // code of the nmethod to continue execution.
 417     __ jmp(_patch_site_continuation);
 418 
 419     // make sure this extra code gets skipped
 420     bytes_to_skip += __ offset() - offset;
 421   }
 422   if (CommentedAssembly) {
 423     __ block_comment("patch data encoded as movl");
 424   }
 425   // Now emit the patch record telling the runtime how to find the
 426   // pieces of the patch.  We only need 3 bytes but for readability of
 427   // the disassembly we make the data look like a movl reg, imm32,
 428   // which requires 5 bytes
 429   int sizeof_patch_record = 5;
 430   bytes_to_skip += sizeof_patch_record;
 431 
 432   // emit the offsets needed to find the code to patch
 433   int being_initialized_entry_offset = __ pc() - being_initialized_entry + sizeof_patch_record;
 434 
 435   __ emit_int8((unsigned char)0xB8);
 436   __ emit_int8(0);
 437   __ emit_int8(being_initialized_entry_offset);
 438   __ emit_int8(bytes_to_skip);
 439   __ emit_int8(_bytes_to_copy);
 440   address patch_info_pc = __ pc();
 441   assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info");
 442 
 443   address entry = __ pc();
 444   NativeGeneralJump::insert_unconditional((address)_pc_start, entry);
 445   address target = NULL;
 446   relocInfo::relocType reloc_type = relocInfo::none;
 447   switch (_id) {
 448     case access_field_id:  target = Runtime1::entry_for(Runtime1::access_field_patching_id); break;
 449     case load_klass_id:    target = Runtime1::entry_for(Runtime1::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break;
 450     case load_mirror_id:   target = Runtime1::entry_for(Runtime1::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break;
 451     case load_appendix_id:      target = Runtime1::entry_for(Runtime1::load_appendix_patching_id); reloc_type = relocInfo::oop_type; break;
 452     default: ShouldNotReachHere();
 453   }
 454   __ bind(call_patch);
 455 
 456   if (CommentedAssembly) {
 457     __ block_comment("patch entry point");
 458   }
 459   __ call(RuntimeAddress(target));
 460   assert(_patch_info_offset == (patch_info_pc - __ pc()), "must not change");
 461   ce->add_call_info_here(_info);
 462   int jmp_off = __ offset();
 463   __ jmp(_patch_site_entry);
 464   // Add enough nops so deoptimization can overwrite the jmp above with a call
 465   // and not destroy the world. We cannot use fat nops here, since the concurrent
 466   // code rewrite may transiently create the illegal instruction sequence.
 467   for (int j = __ offset() ; j < jmp_off + 5 ; j++ ) {
 468     __ nop();
 469   }
 470   if (_id == load_klass_id || _id == load_mirror_id || _id == load_appendix_id) {
 471     CodeSection* cs = __ code_section();
 472     RelocIterator iter(cs, (address)_pc_start, (address)(_pc_start + 1));
 473     relocInfo::change_reloc_info_for_address(&iter, (address) _pc_start, reloc_type, relocInfo::none);
 474   }
 475 }
 476 
 477 
 478 void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
 479   __ bind(_entry);
 480   ce->store_parameter(_trap_request, 0);
 481   __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::deoptimize_id)));
 482   ce->add_call_info_here(_info);
 483   DEBUG_ONLY(__ should_not_reach_here());
 484 }
 485 
 486 
 487 void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
 488   address a;
 489   if (_info->deoptimize_on_exception()) {
 490     // Deoptimize, do not throw the exception, because it is probably wrong to do it here.
 491     a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
 492   } else {
 493     a = Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id);
 494   }
 495 
 496   ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
 497   __ bind(_entry);
 498   __ call(RuntimeAddress(a));
 499   ce->add_call_info_here(_info);
 500   ce->verify_oop_map(_info);
 501   debug_only(__ should_not_reach_here());
 502 }
 503 
 504 
 505 void SimpleExceptionStub::emit_code(LIR_Assembler* ce) {
 506   assert(__ rsp_offset() == 0, "frame size should be fixed");
 507 
 508   __ bind(_entry);
 509   // pass the object on stack because all registers must be preserved
 510   if (_obj->is_cpu_register()) {
 511     ce->store_parameter(_obj->as_register(), 0);
 512   }
 513   __ call(RuntimeAddress(Runtime1::entry_for(_stub)));
 514   ce->add_call_info_here(_info);
 515   debug_only(__ should_not_reach_here());
 516 }
 517 
 518 
 519 void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
 520   //---------------slow case: call to native-----------------
 521   __ bind(_entry);
 522   // Figure out where the args should go
 523   // This should really convert the IntrinsicID to the Method* and signature
 524   // but I don't know how to do that.
 525   //
 526   VMRegPair args[5];
 527   BasicType signature[5] = { T_OBJECT, T_INT, T_OBJECT, T_INT, T_INT};
 528   SharedRuntime::java_calling_convention(signature, args, 5, true);
 529 
 530   // push parameters
 531   // (src, src_pos, dest, destPos, length)
 532   Register r[5];
 533   r[0] = src()->as_register();
 534   r[1] = src_pos()->as_register();
 535   r[2] = dst()->as_register();
 536   r[3] = dst_pos()->as_register();
 537   r[4] = length()->as_register();
 538 
 539   // next registers will get stored on the stack
 540   for (int i = 0; i < 5 ; i++ ) {
 541     VMReg r_1 = args[i].first();
 542     if (r_1->is_stack()) {
 543       int st_off = r_1->reg2stack() * wordSize;
 544       __ movptr (Address(rsp, st_off), r[i]);
 545     } else {
 546       assert(r[i] == args[i].first()->as_Register(), "Wrong register for arg ");
 547     }
 548   }
 549 
 550   ce->align_call(lir_static_call);
 551 
 552   ce->emit_static_call_stub();
 553   if (ce->compilation()->bailed_out()) {
 554     return; // CodeCache is full
 555   }
 556   AddressLiteral resolve(SharedRuntime::get_resolve_static_call_stub(),
 557                          relocInfo::static_call_type);
 558   __ call(resolve);
 559   ce->add_call_info_here(info());
 560 
 561 #ifndef PRODUCT
 562   __ incrementl(ExternalAddress((address)&Runtime1::_arraycopy_slowcase_cnt));
 563 #endif
 564 
 565   __ jmp(_continuation);
 566 }
 567 
 568 #undef __