1 /* 2 * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "c1/c1_CodeStubs.hpp" 27 #include "c1/c1_FrameMap.hpp" 28 #include "c1/c1_LIRAssembler.hpp" 29 #include "c1/c1_MacroAssembler.hpp" 30 #include "c1/c1_Runtime1.hpp" 31 #include "nativeInst_x86.hpp" 32 #include "oops/objArrayKlass.hpp" 33 #include "runtime/sharedRuntime.hpp" 34 #include "utilities/align.hpp" 35 #include "utilities/macros.hpp" 36 #include "vmreg_x86.inline.hpp" 37 38 39 #define __ ce->masm()-> 40 41 float ConversionStub::float_zero = 0.0; 42 double ConversionStub::double_zero = 0.0; 43 44 void ConversionStub::emit_code(LIR_Assembler* ce) { 45 __ bind(_entry); 46 assert(bytecode() == Bytecodes::_f2i || bytecode() == Bytecodes::_d2i, "other conversions do not require stub"); 47 48 49 if (input()->is_single_xmm()) { 50 __ comiss(input()->as_xmm_float_reg(), 51 ExternalAddress((address)&float_zero)); 52 } else if (input()->is_double_xmm()) { 53 __ comisd(input()->as_xmm_double_reg(), 54 ExternalAddress((address)&double_zero)); 55 } else { 56 LP64_ONLY(ShouldNotReachHere()); 57 __ push(rax); 58 __ ftst(); 59 __ fnstsw_ax(); 60 __ sahf(); 61 __ pop(rax); 62 } 63 64 Label NaN, do_return; 65 __ jccb(Assembler::parity, NaN); 66 __ jccb(Assembler::below, do_return); 67 68 // input is > 0 -> return maxInt 69 // result register already contains 0x80000000, so subtracting 1 gives 0x7fffffff 70 __ decrement(result()->as_register()); 71 __ jmpb(do_return); 72 73 // input is NaN -> return 0 74 __ bind(NaN); 75 __ xorptr(result()->as_register(), result()->as_register()); 76 77 __ bind(do_return); 78 __ jmp(_continuation); 79 } 80 81 void CounterOverflowStub::emit_code(LIR_Assembler* ce) { 82 __ bind(_entry); 83 Metadata *m = _method->as_constant_ptr()->as_metadata(); 84 ce->store_parameter(m, 1); 85 ce->store_parameter(_bci, 0); 86 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::counter_overflow_id))); 87 ce->add_call_info_here(_info); 88 ce->verify_oop_map(_info); 89 __ jmp(_continuation); 90 } 91 92 RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index, LIR_Opr array) 93 : _index(index), _array(array), _throw_index_out_of_bounds_exception(false) { 94 assert(info != NULL, "must have info"); 95 _info = new CodeEmitInfo(info); 96 } 97 98 RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index) 99 : _index(index), _array(NULL), _throw_index_out_of_bounds_exception(true) { 100 assert(info != NULL, "must have info"); 101 _info = new CodeEmitInfo(info); 102 } 103 104 void RangeCheckStub::emit_code(LIR_Assembler* ce) { 105 __ bind(_entry); 106 if (_info->deoptimize_on_exception()) { 107 address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id); 108 __ call(RuntimeAddress(a)); 109 ce->add_call_info_here(_info); 110 ce->verify_oop_map(_info); 111 debug_only(__ should_not_reach_here()); 112 return; 113 } 114 115 // pass the array index on stack because all registers must be preserved 116 if (_index->is_cpu_register()) { 117 ce->store_parameter(_index->as_register(), 0); 118 } else { 119 ce->store_parameter(_index->as_jint(), 0); 120 } 121 Runtime1::StubID stub_id; 122 if (_throw_index_out_of_bounds_exception) { 123 stub_id = Runtime1::throw_index_exception_id; 124 } else { 125 stub_id = Runtime1::throw_range_check_failed_id; 126 ce->store_parameter(_array->as_pointer_register(), 1); 127 } 128 __ call(RuntimeAddress(Runtime1::entry_for(stub_id))); 129 ce->add_call_info_here(_info); 130 ce->verify_oop_map(_info); 131 debug_only(__ should_not_reach_here()); 132 } 133 134 PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) { 135 _info = new CodeEmitInfo(info); 136 } 137 138 void PredicateFailedStub::emit_code(LIR_Assembler* ce) { 139 __ bind(_entry); 140 address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id); 141 __ call(RuntimeAddress(a)); 142 ce->add_call_info_here(_info); 143 ce->verify_oop_map(_info); 144 debug_only(__ should_not_reach_here()); 145 } 146 147 void DivByZeroStub::emit_code(LIR_Assembler* ce) { 148 if (_offset != -1) { 149 ce->compilation()->implicit_exception_table()->append(_offset, __ offset()); 150 } 151 __ bind(_entry); 152 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::throw_div0_exception_id))); 153 ce->add_call_info_here(_info); 154 debug_only(__ should_not_reach_here()); 155 } 156 157 158 // Implementation of LoadFlattenedArrayStub 159 160 LoadFlattenedArrayStub::LoadFlattenedArrayStub(LIR_Opr array, LIR_Opr index, LIR_Opr result, CodeEmitInfo* info) { 161 _array = array; 162 _index = index; 163 _result = result; 164 // Tell the register allocator that the runtime call will scratch rax. 165 _scratch_reg = FrameMap::rax_oop_opr; 166 _info = new CodeEmitInfo(info); 167 } 168 169 void LoadFlattenedArrayStub::emit_code(LIR_Assembler* ce) { 170 assert(__ rsp_offset() == 0, "frame size should be fixed"); 171 __ bind(_entry); 172 ce->store_parameter(_array->as_register(), 1); 173 ce->store_parameter(_index->as_register(), 0); 174 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::load_flattened_array_id))); 175 ce->add_call_info_here(_info); 176 ce->verify_oop_map(_info); 177 if (_result->as_register() != rax) { 178 __ movptr(_result->as_register(), rax); 179 } 180 __ jmp(_continuation); 181 } 182 183 184 // Implementation of StoreFlattenedArrayStub 185 186 StoreFlattenedArrayStub::StoreFlattenedArrayStub(LIR_Opr array, LIR_Opr index, LIR_Opr value, CodeEmitInfo* info) { 187 _array = array; 188 _index = index; 189 _value = value; 190 _info = new CodeEmitInfo(info); 191 } 192 193 194 void StoreFlattenedArrayStub::emit_code(LIR_Assembler* ce) { 195 assert(__ rsp_offset() == 0, "frame size should be fixed"); 196 __ bind(_entry); 197 ce->store_parameter(_array->as_register(), 2); 198 ce->store_parameter(_index->as_register(), 1); 199 ce->store_parameter(_value->as_register(), 0); 200 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::store_flattened_array_id))); 201 ce->add_call_info_here(_info); 202 ce->verify_oop_map(_info); 203 __ jmp(_continuation); 204 } 205 206 207 // Implementation of NewInstanceStub 208 209 NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) { 210 _result = result; 211 _klass = klass; 212 _klass_reg = klass_reg; 213 _info = new CodeEmitInfo(info); 214 assert(stub_id == Runtime1::new_instance_id || 215 stub_id == Runtime1::fast_new_instance_id || 216 stub_id == Runtime1::fast_new_instance_init_check_id, 217 "need new_instance id"); 218 _stub_id = stub_id; 219 } 220 221 222 void NewInstanceStub::emit_code(LIR_Assembler* ce) { 223 assert(__ rsp_offset() == 0, "frame size should be fixed"); 224 __ bind(_entry); 225 __ movptr(rdx, _klass_reg->as_register()); 226 __ call(RuntimeAddress(Runtime1::entry_for(_stub_id))); 227 ce->add_call_info_here(_info); 228 ce->verify_oop_map(_info); 229 assert(_result->as_register() == rax, "result must in rax,"); 230 __ jmp(_continuation); 231 } 232 233 234 // Implementation of NewTypeArrayStub 235 236 NewTypeArrayStub::NewTypeArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) { 237 _klass_reg = klass_reg; 238 _length = length; 239 _result = result; 240 _info = new CodeEmitInfo(info); 241 } 242 243 244 void NewTypeArrayStub::emit_code(LIR_Assembler* ce) { 245 assert(__ rsp_offset() == 0, "frame size should be fixed"); 246 __ bind(_entry); 247 assert(_length->as_register() == rbx, "length must in rbx,"); 248 assert(_klass_reg->as_register() == rdx, "klass_reg must in rdx"); 249 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_type_array_id))); 250 ce->add_call_info_here(_info); 251 ce->verify_oop_map(_info); 252 assert(_result->as_register() == rax, "result must in rax,"); 253 __ jmp(_continuation); 254 } 255 256 257 // Implementation of NewObjectArrayStub 258 259 NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, 260 CodeEmitInfo* info, bool is_value_type) { 261 _klass_reg = klass_reg; 262 _result = result; 263 _length = length; 264 _info = new CodeEmitInfo(info); 265 _is_value_type = is_value_type; 266 } 267 268 269 void NewObjectArrayStub::emit_code(LIR_Assembler* ce) { 270 assert(__ rsp_offset() == 0, "frame size should be fixed"); 271 __ bind(_entry); 272 assert(_length->as_register() == rbx, "length must in rbx,"); 273 assert(_klass_reg->as_register() == rdx, "klass_reg must in rdx"); 274 if (_is_value_type) { 275 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_value_array_id))); 276 } else { 277 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_object_array_id))); 278 } 279 ce->add_call_info_here(_info); 280 ce->verify_oop_map(_info); 281 assert(_result->as_register() == rax, "result must in rax,"); 282 __ jmp(_continuation); 283 } 284 285 286 // Implementation of MonitorAccessStubs 287 288 MonitorEnterStub::MonitorEnterStub(LIR_Opr obj_reg, LIR_Opr lock_reg, CodeEmitInfo* info, CodeStub* throw_imse_stub, LIR_Opr scratch_reg) 289 : MonitorAccessStub(obj_reg, lock_reg) 290 { 291 _info = new CodeEmitInfo(info); 292 _throw_imse_stub = throw_imse_stub; 293 _scratch_reg = scratch_reg; 294 if (_throw_imse_stub != NULL) { 295 assert(_scratch_reg != LIR_OprFact::illegalOpr, "must be"); 296 } 297 } 298 299 300 void MonitorEnterStub::emit_code(LIR_Assembler* ce) { 301 assert(__ rsp_offset() == 0, "frame size should be fixed"); 302 __ bind(_entry); 303 if (_throw_imse_stub != NULL) { 304 // When we come here, _obj_reg has already been checked to be non-null. 305 Register mark = _scratch_reg->as_register(); 306 __ movptr(mark, Address(_obj_reg->as_register(), oopDesc::mark_offset_in_bytes())); 307 __ testl(mark, markOopDesc::always_locked_pattern); 308 __ jcc(Assembler::notZero, *_throw_imse_stub->entry()); 309 } 310 ce->store_parameter(_obj_reg->as_register(), 1); 311 ce->store_parameter(_lock_reg->as_register(), 0); 312 Runtime1::StubID enter_id; 313 if (ce->compilation()->has_fpu_code()) { 314 enter_id = Runtime1::monitorenter_id; 315 } else { 316 enter_id = Runtime1::monitorenter_nofpu_id; 317 } 318 __ call(RuntimeAddress(Runtime1::entry_for(enter_id))); 319 ce->add_call_info_here(_info); 320 ce->verify_oop_map(_info); 321 __ jmp(_continuation); 322 } 323 324 325 void MonitorExitStub::emit_code(LIR_Assembler* ce) { 326 __ bind(_entry); 327 if (_compute_lock) { 328 // lock_reg was destroyed by fast unlocking attempt => recompute it 329 ce->monitor_address(_monitor_ix, _lock_reg); 330 } 331 ce->store_parameter(_lock_reg->as_register(), 0); 332 // note: non-blocking leaf routine => no call info needed 333 Runtime1::StubID exit_id; 334 if (ce->compilation()->has_fpu_code()) { 335 exit_id = Runtime1::monitorexit_id; 336 } else { 337 exit_id = Runtime1::monitorexit_nofpu_id; 338 } 339 __ call(RuntimeAddress(Runtime1::entry_for(exit_id))); 340 __ jmp(_continuation); 341 } 342 343 344 // Implementation of patching: 345 // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes) 346 // - Replace original code with a call to the stub 347 // At Runtime: 348 // - call to stub, jump to runtime 349 // - in runtime: preserve all registers (rspecially objects, i.e., source and destination object) 350 // - in runtime: after initializing class, restore original code, reexecute instruction 351 352 int PatchingStub::_patch_info_offset = -NativeGeneralJump::instruction_size; 353 354 void PatchingStub::align_patch_site(MacroAssembler* masm) { 355 // We're patching a 5-7 byte instruction on intel and we need to 356 // make sure that we don't see a piece of the instruction. It 357 // appears mostly impossible on Intel to simply invalidate other 358 // processors caches and since they may do aggressive prefetch it's 359 // very hard to make a guess about what code might be in the icache. 360 // Force the instruction to be double word aligned so that it 361 // doesn't span a cache line. 362 masm->align(align_up((int)NativeGeneralJump::instruction_size, wordSize)); 363 } 364 365 void PatchingStub::emit_code(LIR_Assembler* ce) { 366 assert(NativeCall::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF, "not enough room for call"); 367 368 Label call_patch; 369 370 // static field accesses have special semantics while the class 371 // initializer is being run so we emit a test which can be used to 372 // check that this code is being executed by the initializing 373 // thread. 374 address being_initialized_entry = __ pc(); 375 if (CommentedAssembly) { 376 __ block_comment(" patch template"); 377 } 378 if (_id == load_klass_id) { 379 // produce a copy of the load klass instruction for use by the being initialized case 380 #ifdef ASSERT 381 address start = __ pc(); 382 #endif 383 Metadata* o = NULL; 384 __ mov_metadata(_obj, o); 385 #ifdef ASSERT 386 for (int i = 0; i < _bytes_to_copy; i++) { 387 address ptr = (address)(_pc_start + i); 388 int a_byte = (*ptr) & 0xFF; 389 assert(a_byte == *start++, "should be the same code"); 390 } 391 #endif 392 } else if (_id == load_mirror_id) { 393 // produce a copy of the load mirror instruction for use by the being 394 // initialized case 395 #ifdef ASSERT 396 address start = __ pc(); 397 #endif 398 jobject o = NULL; 399 __ movoop(_obj, o); 400 #ifdef ASSERT 401 for (int i = 0; i < _bytes_to_copy; i++) { 402 address ptr = (address)(_pc_start + i); 403 int a_byte = (*ptr) & 0xFF; 404 assert(a_byte == *start++, "should be the same code"); 405 } 406 #endif 407 } else { 408 // make a copy the code which is going to be patched. 409 for (int i = 0; i < _bytes_to_copy; i++) { 410 address ptr = (address)(_pc_start + i); 411 int a_byte = (*ptr) & 0xFF; 412 __ emit_int8(a_byte); 413 *ptr = 0x90; // make the site look like a nop 414 } 415 } 416 417 address end_of_patch = __ pc(); 418 int bytes_to_skip = 0; 419 if (_id == load_mirror_id) { 420 int offset = __ offset(); 421 if (CommentedAssembly) { 422 __ block_comment(" being_initialized check"); 423 } 424 assert(_obj != noreg, "must be a valid register"); 425 Register tmp = rax; 426 Register tmp2 = rbx; 427 __ push(tmp); 428 __ push(tmp2); 429 // Load without verification to keep code size small. We need it because 430 // begin_initialized_entry_offset has to fit in a byte. Also, we know it's not null. 431 __ movptr(tmp2, Address(_obj, java_lang_Class::klass_offset_in_bytes())); 432 __ get_thread(tmp); 433 __ cmpptr(tmp, Address(tmp2, InstanceKlass::init_thread_offset())); 434 __ pop(tmp2); 435 __ pop(tmp); 436 __ jcc(Assembler::notEqual, call_patch); 437 438 // access_field patches may execute the patched code before it's 439 // copied back into place so we need to jump back into the main 440 // code of the nmethod to continue execution. 441 __ jmp(_patch_site_continuation); 442 443 // make sure this extra code gets skipped 444 bytes_to_skip += __ offset() - offset; 445 } 446 if (CommentedAssembly) { 447 __ block_comment("patch data encoded as movl"); 448 } 449 // Now emit the patch record telling the runtime how to find the 450 // pieces of the patch. We only need 3 bytes but for readability of 451 // the disassembly we make the data look like a movl reg, imm32, 452 // which requires 5 bytes 453 int sizeof_patch_record = 5; 454 bytes_to_skip += sizeof_patch_record; 455 456 // emit the offsets needed to find the code to patch 457 int being_initialized_entry_offset = __ pc() - being_initialized_entry + sizeof_patch_record; 458 459 __ emit_int8((unsigned char)0xB8); 460 __ emit_int8(0); 461 __ emit_int8(being_initialized_entry_offset); 462 __ emit_int8(bytes_to_skip); 463 __ emit_int8(_bytes_to_copy); 464 address patch_info_pc = __ pc(); 465 assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info"); 466 467 address entry = __ pc(); 468 NativeGeneralJump::insert_unconditional((address)_pc_start, entry); 469 address target = NULL; 470 relocInfo::relocType reloc_type = relocInfo::none; 471 switch (_id) { 472 case access_field_id: target = Runtime1::entry_for(Runtime1::access_field_patching_id); break; 473 case load_klass_id: target = Runtime1::entry_for(Runtime1::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break; 474 case load_mirror_id: target = Runtime1::entry_for(Runtime1::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break; 475 case load_appendix_id: target = Runtime1::entry_for(Runtime1::load_appendix_patching_id); reloc_type = relocInfo::oop_type; break; 476 default: ShouldNotReachHere(); 477 } 478 __ bind(call_patch); 479 480 if (CommentedAssembly) { 481 __ block_comment("patch entry point"); 482 } 483 __ call(RuntimeAddress(target)); 484 assert(_patch_info_offset == (patch_info_pc - __ pc()), "must not change"); 485 ce->add_call_info_here(_info); 486 int jmp_off = __ offset(); 487 __ jmp(_patch_site_entry); 488 // Add enough nops so deoptimization can overwrite the jmp above with a call 489 // and not destroy the world. We cannot use fat nops here, since the concurrent 490 // code rewrite may transiently create the illegal instruction sequence. 491 for (int j = __ offset() ; j < jmp_off + 5 ; j++ ) { 492 __ nop(); 493 } 494 if (_id == load_klass_id || _id == load_mirror_id || _id == load_appendix_id) { 495 CodeSection* cs = __ code_section(); 496 RelocIterator iter(cs, (address)_pc_start, (address)(_pc_start + 1)); 497 relocInfo::change_reloc_info_for_address(&iter, (address) _pc_start, reloc_type, relocInfo::none); 498 } 499 } 500 501 502 void DeoptimizeStub::emit_code(LIR_Assembler* ce) { 503 __ bind(_entry); 504 ce->store_parameter(_trap_request, 0); 505 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::deoptimize_id))); 506 ce->add_call_info_here(_info); 507 DEBUG_ONLY(__ should_not_reach_here()); 508 } 509 510 511 void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) { 512 address a; 513 if (_info->deoptimize_on_exception()) { 514 // Deoptimize, do not throw the exception, because it is probably wrong to do it here. 515 a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id); 516 } else { 517 a = Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id); 518 } 519 520 ce->compilation()->implicit_exception_table()->append(_offset, __ offset()); 521 __ bind(_entry); 522 __ call(RuntimeAddress(a)); 523 ce->add_call_info_here(_info); 524 ce->verify_oop_map(_info); 525 debug_only(__ should_not_reach_here()); 526 } 527 528 529 void SimpleExceptionStub::emit_code(LIR_Assembler* ce) { 530 assert(__ rsp_offset() == 0, "frame size should be fixed"); 531 532 __ bind(_entry); 533 // pass the object on stack because all registers must be preserved 534 if (_obj->is_cpu_register()) { 535 ce->store_parameter(_obj->as_register(), 0); 536 } 537 __ call(RuntimeAddress(Runtime1::entry_for(_stub))); 538 ce->add_call_info_here(_info); 539 debug_only(__ should_not_reach_here()); 540 } 541 542 543 void ArrayCopyStub::emit_code(LIR_Assembler* ce) { 544 //---------------slow case: call to native----------------- 545 __ bind(_entry); 546 // Figure out where the args should go 547 // This should really convert the IntrinsicID to the Method* and signature 548 // but I don't know how to do that. 549 // 550 VMRegPair args[5]; 551 BasicType signature[5] = { T_OBJECT, T_INT, T_OBJECT, T_INT, T_INT}; 552 SharedRuntime::java_calling_convention(signature, args, 5, true); 553 554 // push parameters 555 // (src, src_pos, dest, destPos, length) 556 Register r[5]; 557 r[0] = src()->as_register(); 558 r[1] = src_pos()->as_register(); 559 r[2] = dst()->as_register(); 560 r[3] = dst_pos()->as_register(); 561 r[4] = length()->as_register(); 562 563 // next registers will get stored on the stack 564 for (int i = 0; i < 5 ; i++ ) { 565 VMReg r_1 = args[i].first(); 566 if (r_1->is_stack()) { 567 int st_off = r_1->reg2stack() * wordSize; 568 __ movptr (Address(rsp, st_off), r[i]); 569 } else { 570 assert(r[i] == args[i].first()->as_Register(), "Wrong register for arg "); 571 } 572 } 573 574 ce->align_call(lir_static_call); 575 576 ce->emit_static_call_stub(); 577 if (ce->compilation()->bailed_out()) { 578 return; // CodeCache is full 579 } 580 AddressLiteral resolve(SharedRuntime::get_resolve_static_call_stub(), 581 relocInfo::static_call_type); 582 __ call(resolve); 583 ce->add_call_info_here(info()); 584 585 #ifndef PRODUCT 586 __ incrementl(ExternalAddress((address)&Runtime1::_arraycopy_slowcase_cnt)); 587 #endif 588 589 __ jmp(_continuation); 590 } 591 592 #undef __