1 /* 2 * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "c1/c1_Compilation.hpp" 27 #include "c1/c1_Instruction.hpp" 28 #include "c1/c1_InstructionPrinter.hpp" 29 #include "c1/c1_LIRAssembler.hpp" 30 #include "c1/c1_MacroAssembler.hpp" 31 #include "c1/c1_ValueStack.hpp" 32 #include "ci/ciInstance.hpp" 33 #ifdef TARGET_ARCH_x86 34 # include "nativeInst_x86.hpp" 35 # include "vmreg_x86.inline.hpp" 36 #endif 37 #ifdef TARGET_ARCH_aarch64 38 # include "nativeInst_aarch64.hpp" 39 # include "vmreg_aarch64.inline.hpp" 40 #endif 41 #ifdef TARGET_ARCH_sparc 42 # include "nativeInst_sparc.hpp" 43 # include "vmreg_sparc.inline.hpp" 44 #endif 45 #ifdef TARGET_ARCH_zero 46 # include "nativeInst_zero.hpp" 47 # include "vmreg_zero.inline.hpp" 48 #endif 49 #ifdef TARGET_ARCH_arm 50 # include "nativeInst_arm.hpp" 51 # include "vmreg_arm.inline.hpp" 52 #endif 53 #ifdef TARGET_ARCH_ppc 54 # include "nativeInst_ppc.hpp" 55 # include "vmreg_ppc.inline.hpp" 56 #endif 57 58 59 void LIR_Assembler::patching_epilog(PatchingStub* patch, LIR_PatchCode patch_code, Register obj, CodeEmitInfo* info) { 60 // we must have enough patching space so that call can be inserted 61 while ((intx) _masm->pc() - (intx) patch->pc_start() < NativeCall::instruction_size) { 62 _masm->nop(); 63 } 64 patch->install(_masm, patch_code, obj, info); 65 append_code_stub(patch); 66 67 #ifdef ASSERT 68 Bytecodes::Code code = info->scope()->method()->java_code_at_bci(info->stack()->bci()); 69 if (patch->id() == PatchingStub::access_field_id) { 70 switch (code) { 71 case Bytecodes::_putstatic: 72 case Bytecodes::_getstatic: 73 case Bytecodes::_putfield: 74 case Bytecodes::_getfield: 75 break; 76 default: 77 ShouldNotReachHere(); 78 } 79 } else if (patch->id() == PatchingStub::load_klass_id) { 80 switch (code) { 81 case Bytecodes::_new: 82 case Bytecodes::_anewarray: 83 case Bytecodes::_multianewarray: 84 case Bytecodes::_instanceof: 85 case Bytecodes::_checkcast: 86 break; 87 default: 88 ShouldNotReachHere(); 89 } 90 } else if (patch->id() == PatchingStub::load_mirror_id) { 91 switch (code) { 92 case Bytecodes::_putstatic: 93 case Bytecodes::_getstatic: 94 case Bytecodes::_ldc: 95 case Bytecodes::_ldc_w: 96 break; 97 default: 98 ShouldNotReachHere(); 99 } 100 } else if (patch->id() == PatchingStub::load_appendix_id) { 101 Bytecodes::Code bc_raw = info->scope()->method()->raw_code_at_bci(info->stack()->bci()); 102 assert(Bytecodes::has_optional_appendix(bc_raw), "unexpected appendix resolution"); 103 } else { 104 ShouldNotReachHere(); 105 } 106 #endif 107 } 108 109 PatchingStub::PatchID LIR_Assembler::patching_id(CodeEmitInfo* info) { 110 IRScope* scope = info->scope(); 111 Bytecodes::Code bc_raw = scope->method()->raw_code_at_bci(info->stack()->bci()); 112 if (Bytecodes::has_optional_appendix(bc_raw)) { 113 return PatchingStub::load_appendix_id; 114 } 115 return PatchingStub::load_mirror_id; 116 } 117 118 //--------------------------------------------------------------- 119 120 121 LIR_Assembler::LIR_Assembler(Compilation* c): 122 _compilation(c) 123 , _masm(c->masm()) 124 , _bs(Universe::heap()->barrier_set()) 125 , _frame_map(c->frame_map()) 126 , _current_block(NULL) 127 , _pending_non_safepoint(NULL) 128 , _pending_non_safepoint_offset(0) 129 { 130 _slow_case_stubs = new CodeStubList(); 131 #ifdef TARGET_ARCH_aarch64 132 init(); // Target-dependent initialization 133 #endif 134 } 135 136 137 LIR_Assembler::~LIR_Assembler() { 138 // The unwind handler label may be unbound if this destructor is invoked because of a bail-out. 139 // Reset it here to avoid an assertion. 140 _unwind_handler_entry.reset(); 141 } 142 143 144 void LIR_Assembler::check_codespace() { 145 CodeSection* cs = _masm->code_section(); 146 if (cs->remaining() < (int)(NOT_LP64(1*K)LP64_ONLY(2*K))) { 147 BAILOUT("CodeBuffer overflow"); 148 } 149 } 150 151 152 void LIR_Assembler::append_code_stub(CodeStub* stub) { 153 _slow_case_stubs->append(stub); 154 } 155 156 void LIR_Assembler::emit_stubs(CodeStubList* stub_list) { 157 for (int m = 0; m < stub_list->length(); m++) { 158 CodeStub* s = (*stub_list)[m]; 159 160 check_codespace(); 161 CHECK_BAILOUT(); 162 163 #ifndef PRODUCT 164 if (CommentedAssembly) { 165 stringStream st; 166 s->print_name(&st); 167 st.print(" slow case"); 168 _masm->block_comment(st.as_string()); 169 } 170 #endif 171 s->emit_code(this); 172 #ifdef ASSERT 173 #ifndef AARCH64 174 s->assert_no_unbound_labels(); 175 #endif 176 #endif 177 } 178 } 179 180 181 void LIR_Assembler::emit_slow_case_stubs() { 182 emit_stubs(_slow_case_stubs); 183 } 184 185 186 bool LIR_Assembler::needs_icache(ciMethod* method) const { 187 return !method->is_static(); 188 } 189 190 191 int LIR_Assembler::code_offset() const { 192 return _masm->offset(); 193 } 194 195 196 address LIR_Assembler::pc() const { 197 return _masm->pc(); 198 } 199 200 // To bang the stack of this compiled method we use the stack size 201 // that the interpreter would need in case of a deoptimization. This 202 // removes the need to bang the stack in the deoptimization blob which 203 // in turn simplifies stack overflow handling. 204 int LIR_Assembler::bang_size_in_bytes() const { 205 return MAX2(initial_frame_size_in_bytes(), _compilation->interpreter_frame_size()); 206 } 207 208 void LIR_Assembler::emit_exception_entries(ExceptionInfoList* info_list) { 209 for (int i = 0; i < info_list->length(); i++) { 210 XHandlers* handlers = info_list->at(i)->exception_handlers(); 211 212 for (int j = 0; j < handlers->length(); j++) { 213 XHandler* handler = handlers->handler_at(j); 214 assert(handler->lir_op_id() != -1, "handler not processed by LinearScan"); 215 assert(handler->entry_code() == NULL || 216 handler->entry_code()->instructions_list()->last()->code() == lir_branch || 217 handler->entry_code()->instructions_list()->last()->code() == lir_delay_slot, "last operation must be branch"); 218 219 if (handler->entry_pco() == -1) { 220 // entry code not emitted yet 221 if (handler->entry_code() != NULL && handler->entry_code()->instructions_list()->length() > 1) { 222 handler->set_entry_pco(code_offset()); 223 if (CommentedAssembly) { 224 _masm->block_comment("Exception adapter block"); 225 } 226 emit_lir_list(handler->entry_code()); 227 } else { 228 handler->set_entry_pco(handler->entry_block()->exception_handler_pco()); 229 } 230 231 assert(handler->entry_pco() != -1, "must be set now"); 232 } 233 } 234 } 235 } 236 237 238 void LIR_Assembler::emit_code(BlockList* hir) { 239 if (PrintLIR) { 240 print_LIR(hir); 241 } 242 243 int n = hir->length(); 244 for (int i = 0; i < n; i++) { 245 emit_block(hir->at(i)); 246 CHECK_BAILOUT(); 247 } 248 249 flush_debug_info(code_offset()); 250 251 DEBUG_ONLY(check_no_unbound_labels()); 252 } 253 254 255 void LIR_Assembler::emit_block(BlockBegin* block) { 256 if (block->is_set(BlockBegin::backward_branch_target_flag)) { 257 align_backward_branch_target(); 258 } 259 260 // if this block is the start of an exception handler, record the 261 // PC offset of the first instruction for later construction of 262 // the ExceptionHandlerTable 263 if (block->is_set(BlockBegin::exception_entry_flag)) { 264 block->set_exception_handler_pco(code_offset()); 265 } 266 267 #ifndef PRODUCT 268 if (PrintLIRWithAssembly) { 269 // don't print Phi's 270 InstructionPrinter ip(false); 271 block->print(ip); 272 } 273 #endif /* PRODUCT */ 274 275 assert(block->lir() != NULL, "must have LIR"); 276 X86_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed")); 277 278 #ifndef PRODUCT 279 if (CommentedAssembly) { 280 stringStream st; 281 st.print_cr(" block B%d [%d, %d]", block->block_id(), block->bci(), block->end()->printable_bci()); 282 _masm->block_comment(st.as_string()); 283 } 284 #endif 285 286 emit_lir_list(block->lir()); 287 288 X86_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed")); 289 } 290 291 292 void LIR_Assembler::emit_lir_list(LIR_List* list) { 293 peephole(list); 294 295 int n = list->length(); 296 for (int i = 0; i < n; i++) { 297 LIR_Op* op = list->at(i); 298 299 check_codespace(); 300 CHECK_BAILOUT(); 301 302 #ifndef PRODUCT 303 if (CommentedAssembly) { 304 // Don't record out every op since that's too verbose. Print 305 // branches since they include block and stub names. Also print 306 // patching moves since they generate funny looking code. 307 if (op->code() == lir_branch || 308 (op->code() == lir_move && op->as_Op1()->patch_code() != lir_patch_none)) { 309 stringStream st; 310 op->print_on(&st); 311 _masm->block_comment(st.as_string()); 312 } 313 } 314 if (PrintLIRWithAssembly) { 315 // print out the LIR operation followed by the resulting assembly 316 list->at(i)->print(); tty->cr(); 317 } 318 #endif /* PRODUCT */ 319 320 op->emit_code(this); 321 322 if (compilation()->debug_info_recorder()->recording_non_safepoints()) { 323 process_debug_info(op); 324 } 325 326 #ifndef PRODUCT 327 if (PrintLIRWithAssembly) { 328 _masm->code()->decode(); 329 } 330 #endif /* PRODUCT */ 331 } 332 } 333 334 #ifdef ASSERT 335 void LIR_Assembler::check_no_unbound_labels() { 336 CHECK_BAILOUT(); 337 338 for (int i = 0; i < _branch_target_blocks.length() - 1; i++) { 339 if (!_branch_target_blocks.at(i)->label()->is_bound()) { 340 tty->print_cr("label of block B%d is not bound", _branch_target_blocks.at(i)->block_id()); 341 assert(false, "unbound label"); 342 } 343 } 344 } 345 #endif 346 347 //----------------------------------debug info-------------------------------- 348 349 350 void LIR_Assembler::add_debug_info_for_branch(CodeEmitInfo* info) { 351 _masm->code_section()->relocate(pc(), relocInfo::poll_type); 352 int pc_offset = code_offset(); 353 flush_debug_info(pc_offset); 354 info->record_debug_info(compilation()->debug_info_recorder(), pc_offset); 355 if (info->exception_handlers() != NULL) { 356 compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers()); 357 } 358 } 359 360 361 void LIR_Assembler::add_call_info(int pc_offset, CodeEmitInfo* cinfo) { 362 flush_debug_info(pc_offset); 363 cinfo->record_debug_info(compilation()->debug_info_recorder(), pc_offset); 364 if (cinfo->exception_handlers() != NULL) { 365 compilation()->add_exception_handlers_for_pco(pc_offset, cinfo->exception_handlers()); 366 } 367 } 368 369 static ValueStack* debug_info(Instruction* ins) { 370 StateSplit* ss = ins->as_StateSplit(); 371 if (ss != NULL) return ss->state(); 372 return ins->state_before(); 373 } 374 375 void LIR_Assembler::process_debug_info(LIR_Op* op) { 376 Instruction* src = op->source(); 377 if (src == NULL) return; 378 int pc_offset = code_offset(); 379 if (_pending_non_safepoint == src) { 380 _pending_non_safepoint_offset = pc_offset; 381 return; 382 } 383 ValueStack* vstack = debug_info(src); 384 if (vstack == NULL) return; 385 if (_pending_non_safepoint != NULL) { 386 // Got some old debug info. Get rid of it. 387 if (debug_info(_pending_non_safepoint) == vstack) { 388 _pending_non_safepoint_offset = pc_offset; 389 return; 390 } 391 if (_pending_non_safepoint_offset < pc_offset) { 392 record_non_safepoint_debug_info(); 393 } 394 _pending_non_safepoint = NULL; 395 } 396 // Remember the debug info. 397 if (pc_offset > compilation()->debug_info_recorder()->last_pc_offset()) { 398 _pending_non_safepoint = src; 399 _pending_non_safepoint_offset = pc_offset; 400 } 401 } 402 403 // Index caller states in s, where 0 is the oldest, 1 its callee, etc. 404 // Return NULL if n is too large. 405 // Returns the caller_bci for the next-younger state, also. 406 static ValueStack* nth_oldest(ValueStack* s, int n, int& bci_result) { 407 ValueStack* t = s; 408 for (int i = 0; i < n; i++) { 409 if (t == NULL) break; 410 t = t->caller_state(); 411 } 412 if (t == NULL) return NULL; 413 for (;;) { 414 ValueStack* tc = t->caller_state(); 415 if (tc == NULL) return s; 416 t = tc; 417 bci_result = tc->bci(); 418 s = s->caller_state(); 419 } 420 } 421 422 void LIR_Assembler::record_non_safepoint_debug_info() { 423 int pc_offset = _pending_non_safepoint_offset; 424 ValueStack* vstack = debug_info(_pending_non_safepoint); 425 int bci = vstack->bci(); 426 427 DebugInformationRecorder* debug_info = compilation()->debug_info_recorder(); 428 assert(debug_info->recording_non_safepoints(), "sanity"); 429 430 debug_info->add_non_safepoint(pc_offset); 431 432 // Visit scopes from oldest to youngest. 433 for (int n = 0; ; n++) { 434 int s_bci = bci; 435 ValueStack* s = nth_oldest(vstack, n, s_bci); 436 if (s == NULL) break; 437 IRScope* scope = s->scope(); 438 //Always pass false for reexecute since these ScopeDescs are never used for deopt 439 debug_info->describe_scope(pc_offset, scope->method(), s->bci(), false/*reexecute*/); 440 } 441 442 debug_info->end_non_safepoint(pc_offset); 443 } 444 445 446 void LIR_Assembler::add_debug_info_for_null_check_here(CodeEmitInfo* cinfo) { 447 add_debug_info_for_null_check(code_offset(), cinfo); 448 } 449 450 void LIR_Assembler::add_debug_info_for_null_check(int pc_offset, CodeEmitInfo* cinfo) { 451 ImplicitNullCheckStub* stub = new ImplicitNullCheckStub(pc_offset, cinfo); 452 append_code_stub(stub); 453 } 454 455 void LIR_Assembler::add_debug_info_for_div0_here(CodeEmitInfo* info) { 456 add_debug_info_for_div0(code_offset(), info); 457 } 458 459 void LIR_Assembler::add_debug_info_for_div0(int pc_offset, CodeEmitInfo* cinfo) { 460 DivByZeroStub* stub = new DivByZeroStub(pc_offset, cinfo); 461 append_code_stub(stub); 462 } 463 464 void LIR_Assembler::emit_rtcall(LIR_OpRTCall* op) { 465 rt_call(op->result_opr(), op->addr(), op->arguments(), op->tmp(), op->info()); 466 } 467 468 469 void LIR_Assembler::emit_call(LIR_OpJavaCall* op) { 470 verify_oop_map(op->info()); 471 472 if (os::is_MP()) { 473 // must align calls sites, otherwise they can't be updated atomically on MP hardware 474 align_call(op->code()); 475 } 476 477 // emit the static call stub stuff out of line 478 emit_static_call_stub(); 479 CHECK_BAILOUT(); 480 481 switch (op->code()) { 482 case lir_static_call: 483 case lir_dynamic_call: 484 call(op, relocInfo::static_call_type); 485 break; 486 case lir_optvirtual_call: 487 call(op, relocInfo::opt_virtual_call_type); 488 break; 489 case lir_icvirtual_call: 490 ic_call(op); 491 break; 492 case lir_virtual_call: 493 vtable_call(op); 494 break; 495 default: 496 fatal(err_msg_res("unexpected op code: %s", op->name())); 497 break; 498 } 499 500 // JSR 292 501 // Record if this method has MethodHandle invokes. 502 if (op->is_method_handle_invoke()) { 503 compilation()->set_has_method_handle_invokes(true); 504 } 505 506 #if defined(X86) && defined(TIERED) 507 // C2 leave fpu stack dirty clean it 508 if (UseSSE < 2) { 509 int i; 510 for ( i = 1; i <= 7 ; i++ ) { 511 ffree(i); 512 } 513 if (!op->result_opr()->is_float_kind()) { 514 ffree(0); 515 } 516 } 517 #endif // X86 && TIERED 518 } 519 520 521 void LIR_Assembler::emit_opLabel(LIR_OpLabel* op) { 522 _masm->bind (*(op->label())); 523 } 524 525 526 void LIR_Assembler::emit_op1(LIR_Op1* op) { 527 switch (op->code()) { 528 case lir_move: 529 if (op->move_kind() == lir_move_volatile) { 530 assert(op->patch_code() == lir_patch_none, "can't patch volatiles"); 531 volatile_move_op(op->in_opr(), op->result_opr(), op->type(), op->info()); 532 } else { 533 move_op(op->in_opr(), op->result_opr(), op->type(), 534 op->patch_code(), op->info(), op->pop_fpu_stack(), 535 op->move_kind() == lir_move_unaligned, 536 op->move_kind() == lir_move_wide); 537 } 538 break; 539 540 case lir_prefetchr: 541 prefetchr(op->in_opr()); 542 break; 543 544 case lir_prefetchw: 545 prefetchw(op->in_opr()); 546 break; 547 548 case lir_roundfp: { 549 LIR_OpRoundFP* round_op = op->as_OpRoundFP(); 550 roundfp_op(round_op->in_opr(), round_op->tmp(), round_op->result_opr(), round_op->pop_fpu_stack()); 551 break; 552 } 553 554 case lir_return: 555 return_op(op->in_opr()); 556 break; 557 558 case lir_safepoint: 559 if (compilation()->debug_info_recorder()->last_pc_offset() == code_offset()) { 560 _masm->nop(); 561 } 562 safepoint_poll(op->in_opr(), op->info()); 563 break; 564 565 case lir_fxch: 566 fxch(op->in_opr()->as_jint()); 567 break; 568 569 case lir_fld: 570 fld(op->in_opr()->as_jint()); 571 break; 572 573 case lir_ffree: 574 ffree(op->in_opr()->as_jint()); 575 break; 576 577 case lir_branch: 578 break; 579 580 case lir_push: 581 push(op->in_opr()); 582 break; 583 584 case lir_pop: 585 pop(op->in_opr()); 586 break; 587 588 case lir_neg: 589 negate(op->in_opr(), op->result_opr()); 590 break; 591 592 case lir_leal: 593 leal(op->in_opr(), op->result_opr()); 594 break; 595 596 case lir_null_check: 597 if (GenerateCompilerNullChecks) { 598 add_debug_info_for_null_check_here(op->info()); 599 600 if (op->in_opr()->is_single_cpu()) { 601 _masm->null_check(op->in_opr()->as_register()); 602 } else { 603 Unimplemented(); 604 } 605 } 606 break; 607 608 case lir_monaddr: 609 monitor_address(op->in_opr()->as_constant_ptr()->as_jint(), op->result_opr()); 610 break; 611 612 #ifdef SPARC 613 case lir_pack64: 614 pack64(op->in_opr(), op->result_opr()); 615 break; 616 617 case lir_unpack64: 618 unpack64(op->in_opr(), op->result_opr()); 619 break; 620 #endif 621 622 case lir_unwind: 623 unwind_op(op->in_opr()); 624 break; 625 626 default: 627 Unimplemented(); 628 break; 629 } 630 } 631 632 633 void LIR_Assembler::emit_op0(LIR_Op0* op) { 634 switch (op->code()) { 635 case lir_word_align: { 636 while (code_offset() % BytesPerWord != 0) { 637 _masm->nop(); 638 } 639 break; 640 } 641 642 case lir_nop: 643 assert(op->info() == NULL, "not supported"); 644 _masm->nop(); 645 break; 646 647 case lir_label: 648 Unimplemented(); 649 break; 650 651 case lir_build_frame: 652 build_frame(); 653 break; 654 655 case lir_std_entry: 656 // init offsets 657 offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset()); 658 _masm->align(CodeEntryAlignment); 659 if (needs_icache(compilation()->method())) { 660 check_icache(); 661 } 662 offsets()->set_value(CodeOffsets::Verified_Entry, _masm->offset()); 663 _masm->verified_entry(); 664 build_frame(); 665 offsets()->set_value(CodeOffsets::Frame_Complete, _masm->offset()); 666 break; 667 668 case lir_osr_entry: 669 offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset()); 670 osr_entry(); 671 break; 672 673 case lir_24bit_FPU: 674 set_24bit_FPU(); 675 break; 676 677 case lir_reset_FPU: 678 reset_FPU(); 679 break; 680 681 case lir_breakpoint: 682 breakpoint(); 683 break; 684 685 case lir_fpop_raw: 686 fpop(); 687 break; 688 689 case lir_membar: 690 membar(); 691 break; 692 693 case lir_membar_acquire: 694 membar_acquire(); 695 break; 696 697 case lir_membar_release: 698 membar_release(); 699 break; 700 701 case lir_membar_loadload: 702 membar_loadload(); 703 break; 704 705 case lir_membar_storestore: 706 membar_storestore(); 707 break; 708 709 case lir_membar_loadstore: 710 membar_loadstore(); 711 break; 712 713 case lir_membar_storeload: 714 membar_storeload(); 715 break; 716 717 case lir_get_thread: 718 get_thread(op->result_opr()); 719 break; 720 721 default: 722 ShouldNotReachHere(); 723 break; 724 } 725 } 726 727 728 void LIR_Assembler::emit_op2(LIR_Op2* op) { 729 switch (op->code()) { 730 case lir_cmp: 731 if (op->info() != NULL) { 732 assert(op->in_opr1()->is_address() || op->in_opr2()->is_address(), 733 "shouldn't be codeemitinfo for non-address operands"); 734 add_debug_info_for_null_check_here(op->info()); // exception possible 735 } 736 comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op); 737 break; 738 739 case lir_cmp_l2i: 740 case lir_cmp_fd2i: 741 case lir_ucmp_fd2i: 742 comp_fl2i(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op); 743 break; 744 745 case lir_cmove: 746 cmove(op->condition(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->type()); 747 break; 748 749 case lir_shl: 750 case lir_shr: 751 case lir_ushr: 752 if (op->in_opr2()->is_constant()) { 753 shift_op(op->code(), op->in_opr1(), op->in_opr2()->as_constant_ptr()->as_jint(), op->result_opr()); 754 } else { 755 shift_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->tmp1_opr()); 756 } 757 break; 758 759 case lir_add: 760 case lir_sub: 761 case lir_mul: 762 case lir_mul_strictfp: 763 case lir_div: 764 case lir_div_strictfp: 765 case lir_rem: 766 assert(op->fpu_pop_count() < 2, ""); 767 arith_op( 768 op->code(), 769 op->in_opr1(), 770 op->in_opr2(), 771 op->result_opr(), 772 op->info(), 773 op->fpu_pop_count() == 1); 774 break; 775 776 case lir_abs: 777 case lir_sqrt: 778 case lir_sin: 779 case lir_tan: 780 case lir_cos: 781 case lir_log: 782 case lir_log10: 783 case lir_exp: 784 case lir_pow: 785 intrinsic_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op); 786 break; 787 788 case lir_logic_and: 789 case lir_logic_or: 790 case lir_logic_xor: 791 logic_op( 792 op->code(), 793 op->in_opr1(), 794 op->in_opr2(), 795 op->result_opr()); 796 break; 797 798 case lir_throw: 799 throw_op(op->in_opr1(), op->in_opr2(), op->info()); 800 break; 801 802 case lir_xadd: 803 case lir_xchg: 804 atomic_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->tmp1_opr()); 805 break; 806 807 default: 808 Unimplemented(); 809 break; 810 } 811 } 812 813 814 void LIR_Assembler::build_frame() { 815 _masm->build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes()); 816 } 817 818 819 void LIR_Assembler::roundfp_op(LIR_Opr src, LIR_Opr tmp, LIR_Opr dest, bool pop_fpu_stack) { 820 assert((src->is_single_fpu() && dest->is_single_stack()) || 821 (src->is_double_fpu() && dest->is_double_stack()), 822 "round_fp: rounds register -> stack location"); 823 824 reg2stack (src, dest, src->type(), pop_fpu_stack); 825 } 826 827 828 void LIR_Assembler::move_op(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool unaligned, bool wide) { 829 if (src->is_register()) { 830 if (dest->is_register()) { 831 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here"); 832 reg2reg(src, dest); 833 } else if (dest->is_stack()) { 834 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here"); 835 reg2stack(src, dest, type, pop_fpu_stack); 836 } else if (dest->is_address()) { 837 reg2mem(src, dest, type, patch_code, info, pop_fpu_stack, wide, unaligned); 838 } else { 839 ShouldNotReachHere(); 840 } 841 842 } else if (src->is_stack()) { 843 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here"); 844 if (dest->is_register()) { 845 stack2reg(src, dest, type); 846 } else if (dest->is_stack()) { 847 stack2stack(src, dest, type); 848 } else { 849 ShouldNotReachHere(); 850 } 851 852 } else if (src->is_constant()) { 853 if (dest->is_register()) { 854 const2reg(src, dest, patch_code, info); // patching is possible 855 } else if (dest->is_stack()) { 856 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here"); 857 const2stack(src, dest); 858 } else if (dest->is_address()) { 859 assert(patch_code == lir_patch_none, "no patching allowed here"); 860 const2mem(src, dest, type, info, wide); 861 } else { 862 ShouldNotReachHere(); 863 } 864 865 } else if (src->is_address()) { 866 mem2reg(src, dest, type, patch_code, info, wide, unaligned); 867 868 } else { 869 ShouldNotReachHere(); 870 } 871 } 872 873 874 void LIR_Assembler::verify_oop_map(CodeEmitInfo* info) { 875 #ifndef PRODUCT 876 if (VerifyOops) { 877 OopMapStream s(info->oop_map()); 878 while (!s.is_done()) { 879 OopMapValue v = s.current(); 880 if (v.is_oop()) { 881 VMReg r = v.reg(); 882 if (!r->is_stack()) { 883 stringStream st; 884 st.print("bad oop %s at %d", r->as_Register()->name(), _masm->offset()); 885 #ifdef SPARC 886 _masm->_verify_oop(r->as_Register(), strdup(st.as_string()), __FILE__, __LINE__); 887 #else 888 _masm->verify_oop(r->as_Register()); 889 #endif 890 } else { 891 _masm->verify_stack_oop(r->reg2stack() * VMRegImpl::stack_slot_size); 892 } 893 } 894 check_codespace(); 895 CHECK_BAILOUT(); 896 897 s.next(); 898 } 899 } 900 #endif 901 }